diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 61ecaa3624..741eed6490 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,58 @@ { "ImportPath": "github.com/docker/machine", "GoVersion": "go1.5", + "Packages": [ + "github.com/docker/machine", + "github.com/docker/machine/cli", + "github.com/docker/machine/cmd", + "github.com/docker/machine/commands", + "github.com/docker/machine/commands/mcndirs", + "github.com/docker/machine/drivers/amazonec2", + "github.com/docker/machine/drivers/amazonec2/amz", + "github.com/docker/machine/drivers/azure", + "github.com/docker/machine/drivers/digitalocean", + "github.com/docker/machine/drivers/errdriver", + "github.com/docker/machine/drivers/exoscale", + "github.com/docker/machine/drivers/fakedriver", + "github.com/docker/machine/drivers/generic", + "github.com/docker/machine/drivers/google", + "github.com/docker/machine/drivers/hyperv", + "github.com/docker/machine/drivers/none", + "github.com/docker/machine/drivers/openstack", + "github.com/docker/machine/drivers/rackspace", + "github.com/docker/machine/drivers/softlayer", + "github.com/docker/machine/drivers/virtualbox", + "github.com/docker/machine/drivers/vmwarefusion", + "github.com/docker/machine/drivers/vmwarevcloudair", + "github.com/docker/machine/drivers/vmwarevsphere", + "github.com/docker/machine/drivers/vmwarevsphere/errors", + "github.com/docker/machine/libmachine", + "github.com/docker/machine/libmachine/auth", + "github.com/docker/machine/libmachine/cert", + "github.com/docker/machine/libmachine/drivers", + "github.com/docker/machine/libmachine/drivers/plugin", + "github.com/docker/machine/libmachine/drivers/plugin/localbinary", + "github.com/docker/machine/libmachine/drivers/rpc", + "github.com/docker/machine/libmachine/engine", + "github.com/docker/machine/libmachine/examples", + "github.com/docker/machine/libmachine/host", + "github.com/docker/machine/libmachine/hosttest", + "github.com/docker/machine/libmachine/log", + "github.com/docker/machine/libmachine/mcnerror", + "github.com/docker/machine/libmachine/mcnflag", + "github.com/docker/machine/libmachine/mcnutils", + "github.com/docker/machine/libmachine/persist", + "github.com/docker/machine/libmachine/persisttest", + "github.com/docker/machine/libmachine/provider", + "github.com/docker/machine/libmachine/provision", + "github.com/docker/machine/libmachine/provision/pkgaction", + "github.com/docker/machine/libmachine/provision/serviceaction", + "github.com/docker/machine/libmachine/ssh", + "github.com/docker/machine/libmachine/state", + "github.com/docker/machine/libmachine/swarm", + "github.com/docker/machine/libmachine/version", + "github.com/docker/machine/version" + ], "Deps": [ { "ImportPath": "code.google.com/p/goauth2/oauth", @@ -16,95 +68,16 @@ "ImportPath": "github.com/cenkalti/backoff", "Rev": "9831e1e25c874e0a0601b6dc43641071414eec7a" }, - { - "ImportPath": "github.com/codegangsta/cli", - "Comment": "1.2.0-64-ge1712f3", - "Rev": "e1712f381785e32046927f64a7c86fe569203196" - }, { "ImportPath": "github.com/digitalocean/godo", "Comment": "v0.5.0", "Rev": "5478aae80694de1d2d0e02c386bbedd201266234" }, - { - "ImportPath": "github.com/docker/docker/dockerversion", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/engine", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/archive", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/fileutils", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/mflag", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/parsers", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/pools", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/promise", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, { "ImportPath": "github.com/docker/docker/pkg/term", "Comment": "v1.5.0", "Rev": "a8a31eff10544860d2188dddabdee4d727545796" }, - { - "ImportPath": "github.com/docker/docker/pkg/timeutils", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/units", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/pkg/version", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41" - }, { "ImportPath": "github.com/google/go-querystring/query", "Rev": "30f7a39f4a218feb5325f3aebc60c32a572a8274" @@ -113,9 +86,13 @@ "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" }, + { + "ImportPath": "github.com/pyr/egoscale/src/egoscale", + "Rev": "bbaa67324aeeacc90430c1fe0a9c620d3929512e" + }, { "ImportPath": "github.com/rackspace/gophercloud", - "Comment": "v1.0.0-558-ce0f487", + "Comment": "v1.0.0-558-gce0f487", "Rev": "ce0f487f6747ab43c4e4404722df25349385bebd" }, { @@ -130,10 +107,6 @@ "ImportPath": "github.com/stretchr/testify/assert", "Rev": "e4ec8152c15fc46bd5056ce65997a07c7d415325" }, - { - "ImportPath": "github.com/pyr/egoscale/src/egoscale", - "Rev": "bbaa67324aeeacc90430c1fe0a9c620d3929512e" - }, { "ImportPath": "github.com/tent/http-link-go", "Rev": "ac974c61c2f990f4115b119354b5e0b47550e888" @@ -151,6 +124,10 @@ "ImportPath": "golang.org/x/net/context", "Rev": "d9558e5c97f85372afee28cf2b6059d7d3818919" }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "038cb4adce85ed41e285c2e7cc6221a92bfa44aa" + }, { "ImportPath": "google.golang.org/api/compute/v1", "Rev": "a09229c13c2f13bbdedf7b31b506cad4c83ef3bf" @@ -160,11 +137,11 @@ "Rev": "a09229c13c2f13bbdedf7b31b506cad4c83ef3bf" }, { - "ImportPath": "golang.org/x/oauth2", - "Rev": "038cb4adce85ed41e285c2e7cc6221a92bfa44aa" + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" }, { - "ImportPath": "google.golang.org/cloud/compute", + "ImportPath": "google.golang.org/cloud/internal", "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" } ] diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d684ef..0000000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/cgi/testdata/test.cgi b/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/cgi/testdata/test.cgi deleted file mode 100644 index 3214df6f00..0000000000 --- a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/cgi/testdata/test.cgi +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/perl -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. -# -# Test script run as a child process under cgi_test.go - -use strict; -use Cwd; - -binmode STDOUT; - -my $q = MiniCGI->new; -my $params = $q->Vars; - -if ($params->{"loc"}) { - print "Location: $params->{loc}\r\n\r\n"; - exit(0); -} - -print "Content-Type: text/html\r\n"; -print "X-CGI-Pid: $$\r\n"; -print "X-Test-Header: X-Test-Value\r\n"; -print "\r\n"; - -if ($params->{"bigresponse"}) { - # 17 MB, for OS X: golang.org/issue/4958 - for (1..(17 * 1024)) { - print "A" x 1024, "\r\n"; - } - exit 0; -} - -print "test=Hello CGI\r\n"; - -foreach my $k (sort keys %$params) { - print "param-$k=$params->{$k}\r\n"; -} - -foreach my $k (sort keys %ENV) { - my $clean_env = $ENV{$k}; - $clean_env =~ s/[\n\r]//g; - print "env-$k=$clean_env\r\n"; -} - -# NOTE: msys perl returns /c/go/src/... not C:\go\.... -my $dir = getcwd(); -if ($^O eq 'MSWin32' || $^O eq 'msys') { - if ($dir =~ /^.:/) { - $dir =~ s!/!\\!g; - } else { - my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe'; - $cmd =~ s!\\!/!g; - $dir = `$cmd /c cd`; - chomp $dir; - } -} -print "cwd=$dir\r\n"; - -# A minimal version of CGI.pm, for people without the perl-modules -# package installed. (CGI.pm used to be part of the Perl core, but -# some distros now bundle perl-base and perl-modules separately...) -package MiniCGI; - -sub new { - my $class = shift; - return bless {}, $class; -} - -sub Vars { - my $self = shift; - my $pairs; - if ($ENV{CONTENT_LENGTH}) { - $pairs = do { local $/; }; - } else { - $pairs = $ENV{QUERY_STRING}; - } - my $vars = {}; - foreach my $kv (split(/&/, $pairs)) { - my ($k, $v) = split(/=/, $kv, 2); - $vars->{_urldecode($k)} = _urldecode($v); - } - return $vars; -} - -sub _urldecode { - my $v = shift; - $v =~ tr/+/ /; - $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg; - return $v; -} diff --git a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/file b/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/file deleted file mode 100644 index 11f11f9be3..0000000000 --- a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/file +++ /dev/null @@ -1 +0,0 @@ -0123456789 diff --git a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/index.html b/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/index.html deleted file mode 100644 index da8e1e93d1..0000000000 --- a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/index.html +++ /dev/null @@ -1 +0,0 @@ -index.html says hello diff --git a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/style.css b/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/style.css deleted file mode 100644 index 208d16d421..0000000000 --- a/Godeps/_workspace/src/github.com/MSOpenTech/azure-sdk-for-go/core/http/testdata/style.css +++ /dev/null @@ -1 +0,0 @@ -body {} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml b/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml deleted file mode 100644 index baf46abc6f..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go: 1.1 - -script: -- go vet ./... -- go test -v ./... diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE deleted file mode 100644 index 5515ccfb71..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2013 Jeremy Saenz -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md b/Godeps/_workspace/src/github.com/codegangsta/cli/README.md deleted file mode 100644 index 0e8327b8b3..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md +++ /dev/null @@ -1,298 +0,0 @@ -[![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) - -# cli.go -cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. - -You can view the API docs here: -http://godoc.org/github.com/codegangsta/cli - -## Overview -Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. - -**This is where cli.go comes into play.** cli.go makes command line programming fun, organized, and expressive! - -## Installation -Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html). - -To install `cli.go`, simply run: -``` -$ go get github.com/codegangsta/cli -``` - -Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` - -## Getting Started -One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - cli.NewApp().Run(os.Args) -} -``` - -This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) { - println("boom! I say!") - } - - app.Run(os.Args) -} -``` - -Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below. - -## Example - -Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! - -Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "greet" - app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) { - println("Hello friend!") - } - - app.Run(os.Args) -} -``` - -Install our command to the `$GOPATH/bin` directory: - -``` -$ go install -``` - -Finally run our new command: - -``` -$ greet -Hello friend! -``` - -cli.go also generates some bitchass help text: -``` -$ greet help -NAME: - greet - fight the loneliness! - -USAGE: - greet [global options] command [command options] [arguments...] - -VERSION: - 0.0.0 - -COMMANDS: - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS - --version Shows version information -``` - -### Arguments -You can lookup arguments by calling the `Args` function on `cli.Context`. - -``` go -... -app.Action = func(c *cli.Context) { - println("Hello", c.Args()[0]) -} -... -``` - -### Flags -Setting and querying flags is simple. -``` go -... -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, -} -app.Action = func(c *cli.Context) { - name := "someone" - if len(c.Args()) > 0 { - name = c.Args()[0] - } - if c.String("lang") == "spanish" { - println("Hola", name) - } else { - println("Hello", name) - } -} -... -``` - -#### Alternate Names - -You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - }, -} -``` - -That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. - -#### Values from the Environment - -You can also have the default value set from the environment via `EnvVar`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "APP_LANG", - }, -} -``` - -The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, -} -``` - -### Subcommands - -Subcommands can be defined for a more git-like command line app. -```go -... -app.Commands = []cli.Command{ - { - Name: "add", - ShortName: "a", - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - { - Name: "template", - ShortName: "r", - Usage: "options for task templates", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "add a new template", - Action: func(c *cli.Context) { - println("new task template: ", c.Args().First()) - }, - }, - { - Name: "remove", - Usage: "remove an existing template", - Action: func(c *cli.Context) { - println("removed task template: ", c.Args().First()) - }, - }, - }, - }, -} -... -``` - -### Bash Completion - -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to -show an app's subcommands, but you can write your own completion methods for -the App or its subcommands. -```go -... -var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} -app := cli.NewApp() -app.EnableBashCompletion = true -app.Commands = []cli.Command{ - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - BashComplete: func(c *cli.Context) { - // This will complete if no args are passed - if len(c.Args()) > 0 { - return - } - for _, t := range tasks { - fmt.Println(t) - } - }, - } -} -... -``` - -#### To Enable - -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: - -`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` - - -## Contribution Guidelines -Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. - -If you are have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. - -If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go b/Godeps/_workspace/src/github.com/codegangsta/cli/app.go deleted file mode 100644 index 6422345dc2..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go +++ /dev/null @@ -1,275 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "text/tabwriter" - "text/template" - "time" -) - -// App is the main structure of a cli application. It is recomended that -// and app be created with the cli.NewApp() function -type App struct { - // The name of the program. Defaults to os.Args[0] - Name string - // Description of the program. - Usage string - // Version of the program - Version string - // List of commands to execute - Commands []Command - // List of flags to parse - Flags []Flag - // Boolean to enable bash completion commands - EnableBashCompletion bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide built-in version flag - HideVersion bool - // An action to execute when the bash-completion flag is set - BashComplete func(context *Context) - // An action to execute before any subcommands are run, but after the context is ready - // If a non-nil error is returned, no subcommands are run - Before func(context *Context) error - // The action to execute when no subcommands are specified - Action func(context *Context) - // Execute this function if the proper command cannot be found - CommandNotFound func(context *Context, command string) - // Compilation date - Compiled time.Time - // Author - Author string - // Author e-mail - Email string - // Writer writer to write output to - Writer io.Writer -} - -// Tries to find out when this binary was compiled. -// Returns the current time if it fails to find it. -func compileTime() time.Time { - info, err := os.Stat(os.Args[0]) - if err != nil { - return time.Now() - } - return info.ModTime() -} - -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. -func NewApp() *App { - return &App{ - Name: os.Args[0], - Usage: "A new cli application", - Version: "0.0.0", - BashComplete: DefaultAppComplete, - Action: helpCommand.Action, - Compiled: compileTime(), - Author: "Author", - Email: "unknown@email", - Writer: os.Stdout, - } -} - -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) error { - if HelpPrinter == nil { - defer func() { - HelpPrinter = nil - }() - - HelpPrinter = func(templ string, data interface{}) { - w := tabwriter.NewWriter(a.Writer, 0, 8, 1, '\t', 0) - t := template.Must(template.New("help").Parse(templ)) - err := t.Execute(w, data) - if err != nil { - panic(err) - } - w.Flush() - } - } - - // append help to commands - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - - //append version/help flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - if !a.HideVersion { - a.appendFlag(VersionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err := set.Parse(arguments[1:]) - nerr := normalizeFlags(a.Flags, set) - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - context := NewContext(a, set, set) - ShowAppHelp(context) - fmt.Fprintln(a.Writer) - return nerr - } - context := NewContext(a, set, set) - - if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowAppHelp(context) - fmt.Fprintln(a.Writer) - return err - } - - if checkCompletions(context) { - return nil - } - - if checkHelp(context) { - return nil - } - - if checkVersion(context) { - return nil - } - - if a.Before != nil { - err := a.Before(context) - if err != nil { - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - a.Action(context) - return nil -} - -// Another entry point to the cli app, takes care of passing arguments and error handling -func (a *App) RunAndExitOnError() { - if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) error { - // append help to commands - if len(a.Commands) > 0 { - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - } - - // append flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err := set.Parse(ctx.Args().Tail()) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx.globalSet) - - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - if len(a.Commands) > 0 { - ShowSubcommandHelp(context) - } else { - ShowCommandHelp(ctx, context.Args().First()) - } - fmt.Fprintln(a.Writer) - return nerr - } - - if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowSubcommandHelp(context) - return err - } - - if checkCompletions(context) { - return nil - } - - if len(a.Commands) > 0 { - if checkSubcommandHelp(context) { - return nil - } - } else { - if checkCommandHelp(ctx, context.Args().First()) { - return nil - } - } - - if a.Before != nil { - err := a.Before(context) - if err != nil { - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - a.Action(context) - - return nil -} - -// Returns the named command on App. Returns nil if the command does not exist -func (a *App) Command(name string) *Command { - for _, c := range a.Commands { - if c.HasName(name) { - return &c - } - } - - return nil -} - -func (a *App) hasFlag(flag Flag) bool { - for _, f := range a.Flags { - if flag == f { - return true - } - } - - return false -} - -func (a *App) appendFlag(flag Flag) { - if !a.hasFlag(flag) { - a.Flags = append(a.Flags, flag) - } -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go deleted file mode 100644 index 2cbb0e3a17..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package cli_test - -import ( - "flag" - "fmt" - "os" - "testing" - - "github.com/codegangsta/cli" -) - -func ExampleApp() { - // set args for examples sake - os.Args = []string{"greet", "--name", "Jeremy"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Action = func(c *cli.Context) { - fmt.Printf("Hello %v\n", c.String("name")) - } - app.Run(os.Args) - // Output: - // Hello Jeremy -} - -func ExampleAppSubcommand() { - // set args for examples sake - os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - fmt.Println("Hello,", c.String("name")) - }, - }, - }, - }, - } - - app.Run(os.Args) - // Output: - // Hello, Jeremy -} - -func ExampleAppHelp() { - // set args for examples sake - os.Args = []string{"greet", "h", "describeit"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Commands = []cli.Command{ - { - Name: "describeit", - ShortName: "d", - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, - } - app.Run(os.Args) - // Output: - // NAME: - // describeit - use it to see a description - // - // USAGE: - // command describeit [arguments...] - // - // DESCRIPTION: - // This is how we describe describeit the function -} - -func ExampleAppBashComplete() { - // set args for examples sake - os.Args = []string{"greet", "--generate-bash-completion"} - - app := cli.NewApp() - app.Name = "greet" - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "describeit", - ShortName: "d", - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, { - Name: "next", - Usage: "next example", - Description: "more stuff to see when generating bash completion", - Action: func(c *cli.Context) { - fmt.Printf("the next example") - }, - }, - } - - app.Run(os.Args) - // Output: - // describeit - // d - // next - // help - // h -} - -func TestApp_Run(t *testing.T) { - s := "" - - app := cli.NewApp() - app.Action = func(c *cli.Context) { - s = s + c.Args().First() - } - - err := app.Run([]string{"command", "foo"}) - expect(t, err, nil) - err = app.Run([]string{"command", "bar"}) - expect(t, err, nil) - expect(t, s, "foobar") -} - -var commandAppTests = []struct { - name string - expected bool -}{ - {"foobar", true}, - {"batbaz", true}, - {"b", true}, - {"f", true}, - {"bat", false}, - {"nothing", false}, -} - -func TestApp_Command(t *testing.T) { - app := cli.NewApp() - fooCommand := cli.Command{Name: "foobar", ShortName: "f"} - batCommand := cli.Command{Name: "batbaz", ShortName: "b"} - app.Commands = []cli.Command{ - fooCommand, - batCommand, - } - - for _, test := range commandAppTests { - expect(t, app.Command(test.name) != nil, test.expected) - } -} - -func TestApp_CommandWithArgBeforeFlags(t *testing.T) { - var parsedOption, firstArg string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) - - expect(t, parsedOption, "my-option") - expect(t, firstArg, "my-arg") -} - -func TestApp_RunAsSubcommandParseFlags(t *testing.T) { - var context *cli.Context - - a := cli.NewApp() - a.Commands = []cli.Command{ - { - Name: "foo", - Action: func(c *cli.Context) { - context = c - }, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - }, - Before: func(_ *cli.Context) error { return nil }, - }, - } - a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) - - expect(t, context.Args().Get(0), "abcd") - expect(t, context.String("lang"), "spanish") -} - -func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { - var parsedOption string - var args []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - args = c.Args() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) - - expect(t, parsedOption, "my-option") - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "--notARealFlag") -} - -func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { - var args []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Action: func(c *cli.Context) { - args = c.Args() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) - - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "notAFlagAtAll") -} - -func TestApp_Float64Flag(t *testing.T) { - var meters float64 - - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, - } - app.Action = func(c *cli.Context) { - meters = c.Float64("height") - } - - app.Run([]string{"", "--height", "1.93"}) - expect(t, meters, 1.93) -} - -func TestApp_ParseSliceFlags(t *testing.T) { - var parsedOption, firstArg string - var parsedIntSlice []int - var parsedStringSlice []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, - cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, - }, - Action: func(c *cli.Context) { - parsedIntSlice = c.IntSlice("p") - parsedStringSlice = c.StringSlice("ip") - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) - - IntsEquals := func(a, b []int) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - - StrsEquals := func(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - var expectedIntSlice = []int{22, 80} - var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} - - if !IntsEquals(parsedIntSlice, expectedIntSlice) { - t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) - } - - if !StrsEquals(parsedStringSlice, expectedStringSlice) { - t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) - } -} - -func TestApp_DefaultStdout(t *testing.T) { - app := cli.NewApp() - - if app.Writer != os.Stdout { - t.Error("Default output writer not set.") - } -} - -type mockWriter struct { - written []byte -} - -func (fw *mockWriter) Write(p []byte) (n int, err error) { - if fw.written == nil { - fw.written = p - } else { - fw.written = append(fw.written, p...) - } - - return len(p), nil -} - -func (fw *mockWriter) GetWritten() (b []byte) { - return fw.written -} - -func TestApp_SetStdout(t *testing.T) { - w := &mockWriter{} - - app := cli.NewApp() - app.Name = "test" - app.Writer = w - - err := app.Run([]string{"help"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if len(w.written) == 0 { - t.Error("App did not write output to desired writer.") - } -} - -func TestApp_BeforeFunc(t *testing.T) { - beforeRun, subcommandRun := false, false - beforeError := fmt.Errorf("fail") - var err error - - app := cli.NewApp() - - app.Before = func(c *cli.Context) error { - beforeRun = true - s := c.String("opt") - if s == "fail" { - return beforeError - } - - return nil - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "sub", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, - } - - // run with the Before() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - beforeRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != beforeError { - t.Errorf("Run error expected, but not received") - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == true { - t.Errorf("Subcommand executed when NOT expected") - } - -} - -func TestAppNoHelpFlag(t *testing.T) { - oldFlag := cli.HelpFlag - defer func() { - cli.HelpFlag = oldFlag - }() - - cli.HelpFlag = cli.BoolFlag{} - - app := cli.NewApp() - err := app.Run([]string{"test", "-h"}) - - if err != flag.ErrHelp { - t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err) - } -} - -func TestAppHelpPrinter(t *testing.T) { - oldPrinter := cli.HelpPrinter - defer func() { - cli.HelpPrinter = oldPrinter - }() - - var wasCalled = false - cli.HelpPrinter = func(template string, data interface{}) { - wasCalled = true - } - - app := cli.NewApp() - app.Run([]string{"-h"}) - - if wasCalled == false { - t.Errorf("Help printer expected to be called, but was not") - } -} - -func TestAppVersionPrinter(t *testing.T) { - oldPrinter := cli.VersionPrinter - defer func() { - cli.VersionPrinter = oldPrinter - }() - - var wasCalled = false - cli.VersionPrinter = func(c *cli.Context) { - wasCalled = true - } - - app := cli.NewApp() - ctx := cli.NewContext(app, nil, nil) - cli.ShowVersion(ctx) - - if wasCalled == false { - t.Errorf("Version printer expected to be called, but was not") - } -} - -func TestAppCommandNotFound(t *testing.T) { - beforeRun, subcommandRun := false, false - app := cli.NewApp() - - app.CommandNotFound = func(c *cli.Context, command string) { - beforeRun = true - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "bar", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Run([]string{"command", "foo"}) - - expect(t, beforeRun, true) - expect(t, subcommandRun, false) -} - -func TestGlobalFlagsInSubcommands(t *testing.T) { - subcommandRun := false - app := cli.NewApp() - - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "foo", - Subcommands: []cli.Command{ - { - Name: "bar", - Action: func(c *cli.Context) { - if c.GlobalBool("debug") { - subcommandRun = true - } - }, - }, - }, - }, - } - - app.Run([]string{"command", "-d", "foo", "bar"}) - - expect(t, subcommandRun, true) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete deleted file mode 100644 index 9b55dd990c..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ /dev/null @@ -1,13 +0,0 @@ -#! /bin/bash - -_cli_bash_autocomplete() { - local cur prev opts base - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - } - - complete -F _cli_bash_autocomplete $PROG \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete deleted file mode 100644 index 5430a18f95..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete +++ /dev/null @@ -1,5 +0,0 @@ -autoload -U compinit && compinit -autoload -U bashcompinit && bashcompinit - -script_dir=$(dirname $0) -source ${script_dir}/bash_autocomplete diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go b/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go deleted file mode 100644 index b742545812..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package cli provides a minimal framework for creating and organizing command line -// Go applications. cli is designed to be easy to understand and write, the most simple -// cli application can be written as follows: -// func main() { -// cli.NewApp().Run(os.Args) -// } -// -// Of course this application does not do much, so let's make this an actual application: -// func main() { -// app := cli.NewApp() -// app.Name = "greet" -// app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) { -// println("Greetings") -// } -// -// app.Run(os.Args) -// } -package cli diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go deleted file mode 100644 index 879a793dc2..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cli_test - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func Example() { - app := cli.NewApp() - app.Name = "todo" - app.Usage = "task list on the command line" - app.Commands = []cli.Command{ - { - Name: "add", - ShortName: "a", - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - } - - app.Run(os.Args) -} - -func ExampleSubcommand() { - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hello, ", c.String("name")) - }, - }, { - Name: "spanish", - ShortName: "sp", - Usage: "sends a greeting in spanish", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "surname", - Value: "Jones", - Usage: "Surname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hola, ", c.String("surname")) - }, - }, { - Name: "french", - ShortName: "fr", - Usage: "sends a greeting in french", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "nickname", - Value: "Stevie", - Usage: "Nickname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Bonjour, ", c.String("nickname")) - }, - }, - }, - }, { - Name: "bye", - Usage: "says goodbye", - Action: func(c *cli.Context) { - println("bye") - }, - }, - } - - app.Run(os.Args) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go b/Godeps/_workspace/src/github.com/codegangsta/cli/command.go deleted file mode 100644 index ffd3ef81d3..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go +++ /dev/null @@ -1,156 +0,0 @@ -package cli - -import ( - "fmt" - "io/ioutil" - "strings" -) - -// Command is a subcommand for a cli.App. -type Command struct { - // The name of the command - Name string - // short name of the command. Typically one character - ShortName string - // A short description of the usage of this command - Usage string - // A longer explanation of how the command works - Description string - // The function to call when checking for bash command completions - BashComplete func(context *Context) - // An action to execute before any sub-subcommands are run, but after the context is ready - // If a non-nil error is returned, no sub-subcommands are run - Before func(context *Context) error - // The function to call when this command is invoked - Action func(context *Context) - // List of child commands - Subcommands []Command - // List of flags to parse - Flags []Flag - // Treat all flags as normal arguments if true - SkipFlagParsing bool - // Boolean to hide built-in help command - HideHelp bool -} - -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) error { - - if len(c.Subcommands) > 0 || c.Before != nil { - return c.startApp(ctx) - } - - if !c.HideHelp && (HelpFlag != BoolFlag{}) { - // append help to flags - c.Flags = append( - c.Flags, - HelpFlag, - ) - } - - if ctx.App.EnableBashCompletion { - c.Flags = append(c.Flags, BashCompletionFlag) - } - - set := flagSet(c.Name, c.Flags) - set.SetOutput(ioutil.Discard) - - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - - var err error - if firstFlagIndex > -1 && !c.SkipFlagParsing { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] - } - - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - - if err != nil { - fmt.Fprint(ctx.App.Writer, "Incorrect Usage.\n\n") - ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) - return err - } - - nerr := normalizeFlags(c.Flags, set) - if nerr != nil { - fmt.Fprintln(ctx.App.Writer, nerr) - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) - return nerr - } - context := NewContext(ctx.App, set, ctx.globalSet) - - if checkCommandCompletions(context, c.Name) { - return nil - } - - if checkCommandHelp(context, c.Name) { - return nil - } - context.Command = c - c.Action(context) - return nil -} - -// Returns true if Command.Name or Command.ShortName matches given name -func (c Command) HasName(name string) bool { - return c.Name == name || c.ShortName == name -} - -func (c Command) startApp(ctx *Context) error { - app := NewApp() - - // set the name and usage - app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - if c.Description != "" { - app.Usage = c.Description - } else { - app.Usage = c.Usage - } - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - - // set the flags and commands - app.Commands = c.Subcommands - app.Flags = c.Flags - app.HideHelp = c.HideHelp - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if c.BashComplete != nil { - app.BashComplete = c.BashComplete - } - - // set the actions - app.Before = c.Before - if c.Action != nil { - app.Action = c.Action - } else { - app.Action = helpSubcommand.Action - } - - return app.RunAsSubcommand(ctx) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go deleted file mode 100644 index c0f556ad24..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package cli_test - -import ( - "flag" - "testing" - - "github.com/codegangsta/cli" -) - -func TestCommandDoNotIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah", "-break"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - } - err := command.Run(c) - - expect(t, err.Error(), "flag provided but not defined: -break") -} - -func TestCommandIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - SkipFlagParsing: true, - } - err := command.Run(c) - - expect(t, err, nil) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go b/Godeps/_workspace/src/github.com/codegangsta/cli/context.go deleted file mode 100644 index c9f645b189..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go +++ /dev/null @@ -1,339 +0,0 @@ -package cli - -import ( - "errors" - "flag" - "strconv" - "strings" - "time" -) - -// Context is a type that is passed through to -// each Handler action in a cli application. Context -// can be used to retrieve context-specific Args and -// parsed command-line options. -type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - globalSet *flag.FlagSet - setFlags map[string]bool - globalSetFlags map[string]bool -} - -// Creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { - return &Context{App: app, flagSet: set, globalSet: globalSet} -} - -// Looks up the value of a local int flag, returns 0 if no int flag exists -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// Looks up the value of a local bool flag, returns false if no bool flag exists -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} - -// Looks up the value of a local boolT flag, returns false if no bool flag exists -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} - -// Looks up the value of a local string flag, returns "" if no string flag exists -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) -} - -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) -} - -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) -} - -// Looks up the value of a local generic flag, returns nil if no generic flag exists -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) -} - -// Looks up the value of a global int flag, returns 0 if no int flag exists -func (c *Context) GlobalInt(name string) int { - return lookupInt(name, c.globalSet) -} - -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) GlobalDuration(name string) time.Duration { - return lookupDuration(name, c.globalSet) -} - -// Looks up the value of a global bool flag, returns false if no bool flag exists -func (c *Context) GlobalBool(name string) bool { - return lookupBool(name, c.globalSet) -} - -// Looks up the value of a global string flag, returns "" if no string flag exists -func (c *Context) GlobalString(name string) string { - return lookupString(name, c.globalSet) -} - -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists -func (c *Context) GlobalStringSlice(name string) []string { - return lookupStringSlice(name, c.globalSet) -} - -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists -func (c *Context) GlobalIntSlice(name string) []int { - return lookupIntSlice(name, c.globalSet) -} - -// Looks up the value of a global generic flag, returns nil if no generic flag exists -func (c *Context) GlobalGeneric(name string) interface{} { - return lookupGeneric(name, c.globalSet) -} - -// Determines if the flag was actually set -func (c *Context) IsSet(name string) bool { - if c.setFlags == nil { - c.setFlags = make(map[string]bool) - c.flagSet.Visit(func(f *flag.Flag) { - c.setFlags[f.Name] = true - }) - } - return c.setFlags[name] == true -} - -// Determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - if c.globalSetFlags == nil { - c.globalSetFlags = make(map[string]bool) - c.globalSet.Visit(func(f *flag.Flag) { - c.globalSetFlags[f.Name] = true - }) - } - return c.globalSetFlags[name] == true -} - -// Returns a slice of flag names used in this context. -func (c *Context) FlagNames() (names []string) { - for _, flag := range c.Command.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" { - continue - } - names = append(names, name) - } - return -} - -// Returns a slice of global flag names used by the app. -func (c *Context) GlobalFlagNames() (names []string) { - for _, flag := range c.App.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" || name == "version" { - continue - } - names = append(names, name) - } - return -} - -type Args []string - -// Returns the command line arguments associated with the context. -func (c *Context) Args() Args { - args := Args(c.flagSet.Args()) - return args -} - -// Returns the nth argument, or else a blank string -func (a Args) Get(n int) string { - if len(a) > n { - return a[n] - } - return "" -} - -// Returns the first argument, or else a blank string -func (a Args) First() string { - return a.Get(0) -} - -// Return the rest of the arguments (not the first one) -// or else an empty string slice -func (a Args) Tail() []string { - if len(a) >= 2 { - return []string(a)[1:] - } - return []string{} -} - -// Checks if there are any arguments present -func (a Args) Present() bool { - return len(a) != 0 -} - -// Swaps arguments at the given indexes -func (a Args) Swap(from, to int) error { - if from >= len(a) || to >= len(a) { - return errors.New("index out of range") - } - a[from], a[to] = a[to], a[from] - return nil -} - -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - val, err := strconv.Atoi(f.Value.String()) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - val, err := time.ParseDuration(f.Value.String()) - if err == nil { - return val - } - } - - return 0 -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - return f.Value.String() - } - - return "" -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*StringSlice)).Value() - - } - - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*IntSlice)).Value() - - } - - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - return f.Value - } - return nil -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return val - } - - return false -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return true - } - return val - } - - return false -} - -func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { - switch ff.Value.(type) { - case *StringSlice: - default: - set.Set(name, ff.Value.String()) - } -} - -func normalizeFlags(flags []Flag, set *flag.FlagSet) error { - visited := make(map[string]bool) - set.Visit(func(f *flag.Flag) { - visited[f.Name] = true - }) - for _, f := range flags { - parts := strings.Split(f.getName(), ",") - if len(parts) == 1 { - continue - } - var ff *flag.Flag - for _, name := range parts { - name = strings.Trim(name, " ") - if visited[name] { - if ff != nil { - return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) - } - ff = set.Lookup(name) - } - } - if ff == nil { - continue - } - for _, name := range parts { - name = strings.Trim(name, " ") - if !visited[name] { - copyFlag(name, ff, set) - } - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go deleted file mode 100644 index 7c9a4436fc..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package cli_test - -import ( - "flag" - "testing" - "time" - - "github.com/codegangsta/cli" -) - -func TestNewContext(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Int("myflag", 42, "doc") - command := cli.Command{Name: "mycommand"} - c := cli.NewContext(nil, set, globalSet) - c.Command = command - expect(t, c.Int("myflag"), 12) - expect(t, c.GlobalInt("myflag"), 42) - expect(t, c.Command.Name, "mycommand") -} - -func TestContext_Int(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Int("myflag"), 12) -} - -func TestContext_Duration(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Duration("myflag", time.Duration(12*time.Second), "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) -} - -func TestContext_String(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.String("myflag", "hello world", "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.String("myflag"), "hello world") -} - -func TestContext_Bool(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Bool("myflag"), false) -} - -func TestContext_BoolT(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", true, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.BoolT("myflag"), true) -} - -func TestContext_Args(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - set.Parse([]string{"--myflag", "bat", "baz"}) - expect(t, len(c.Args()), 2) - expect(t, c.Bool("myflag"), true) -} - -func TestContext_IsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.IsSet("myflag"), true) - expect(t, c.IsSet("otherflag"), false) - expect(t, c.IsSet("bogusflag"), false) - expect(t, c.IsSet("myflagGlobal"), false) -} - -func TestContext_GlobalIsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalSet.Bool("myflagGlobalUnset", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.GlobalIsSet("myflag"), false) - expect(t, c.GlobalIsSet("otherflag"), false) - expect(t, c.GlobalIsSet("bogusflag"), false) - expect(t, c.GlobalIsSet("myflagGlobal"), true) - expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) - expect(t, c.GlobalIsSet("bogusGlobal"), false) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go b/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go deleted file mode 100644 index 251158667b..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go +++ /dev/null @@ -1,454 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "time" -) - -// This flag enables bash-completion for all commands and subcommands -var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", -} - -// This flag prints the version for the application -var VersionFlag = BoolFlag{ - Name: "version, v", - Usage: "print the version", -} - -// This flag prints the help for all commands and subcommands -// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand -// unless HideHelp is set to true) -var HelpFlag = BoolFlag{ - Name: "help, h", - Usage: "show help", -} - -// Flag is a common interface related to parsing flags in cli. -// For more advanced flag parsing techniques, it is recomended that -// this interface be implemented. -type Flag interface { - fmt.Stringer - // Apply Flag settings to the given flag set - Apply(*flag.FlagSet) - getName() string -} - -func flagSet(name string, flags []Flag) *flag.FlagSet { - set := flag.NewFlagSet(name, flag.ContinueOnError) - - for _, f := range flags { - f.Apply(set) - } - return set -} - -func eachName(longName string, fn func(string)) { - parts := strings.Split(longName, ",") - for _, name := range parts { - name = strings.Trim(name, " ") - fn(name) - } -} - -// Generic is a generic parseable type identified by a specific flag -type Generic interface { - Set(value string) error - String() string -} - -// GenericFlag is the flag type for types implementing Generic -type GenericFlag struct { - Name string - Value Generic - Usage string - EnvVar string -} - -// String returns the string representation of the generic flag to display the -// help text to the user (uses the String() method of the generic flag to show -// the value) -func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s \"%v\"\t%v", prefixFor(f.Name), f.Name, f.Value, f.Usage)) -} - -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -func (f GenericFlag) Apply(set *flag.FlagSet) { - val := f.Value - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - val.Set(envVal) - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f GenericFlag) getName() string { - return f.Name -} - -type StringSlice []string - -func (f *StringSlice) Set(value string) error { - *f = append(*f, value) - return nil -} - -func (f *StringSlice) String() string { - return fmt.Sprintf("%s", *f) -} - -func (f *StringSlice) Value() []string { - return *f -} - -type StringSliceFlag struct { - Name string - Value *StringSlice - Usage string - EnvVar string -} - -func (f StringSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) -} - -func (f StringSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - newVal.Set(s) - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f StringSliceFlag) getName() string { - return f.Name -} - -type IntSlice []int - -func (f *IntSlice) Set(value string) error { - - tmp, err := strconv.Atoi(value) - if err != nil { - return err - } else { - *f = append(*f, tmp) - } - return nil -} - -func (f *IntSlice) String() string { - return fmt.Sprintf("%d", *f) -} - -func (f *IntSlice) Value() []int { - return *f -} - -type IntSliceFlag struct { - Name string - Value *IntSlice - Usage string - EnvVar string -} - -func (f IntSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) -} - -func (f IntSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f IntSliceFlag) getName() string { - return f.Name -} - -type BoolFlag struct { - Name string - Usage string - EnvVar string -} - -func (f BoolFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) -} - -func (f BoolFlag) Apply(set *flag.FlagSet) { - val := false - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - } - break - } - } - } - - eachName(f.Name, func(name string) { - set.Bool(name, val, f.Usage) - }) -} - -func (f BoolFlag) getName() string { - return f.Name -} - -type BoolTFlag struct { - Name string - Usage string - EnvVar string -} - -func (f BoolTFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) -} - -func (f BoolTFlag) Apply(set *flag.FlagSet) { - val := true - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Bool(name, val, f.Usage) - }) -} - -func (f BoolTFlag) getName() string { - return f.Name -} - -type StringFlag struct { - Name string - Value string - Usage string - EnvVar string -} - -func (f StringFlag) String() string { - var fmtString string - fmtString = "%s %v\t%v" - - if len(f.Value) > 0 { - fmtString = "%s \"%v\"\t%v" - } else { - fmtString = "%s %v\t%v" - } - - return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f StringFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - f.Value = envVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.String(name, f.Value, f.Usage) - }) -} - -func (f StringFlag) getName() string { - return f.Name -} - -type IntFlag struct { - Name string - Value int - Usage string - EnvVar string -} - -func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f IntFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = int(envValInt) - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Int(name, f.Value, f.Usage) - }) -} - -func (f IntFlag) getName() string { - return f.Name -} - -type DurationFlag struct { - Name string - Value time.Duration - Usage string - EnvVar string -} - -func (f DurationFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f DurationFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValDuration, err := time.ParseDuration(envVal) - if err == nil { - f.Value = envValDuration - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Duration(name, f.Value, f.Usage) - }) -} - -func (f DurationFlag) getName() string { - return f.Name -} - -type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string -} - -func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f Float64Flag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) - } - } - } - } - - eachName(f.Name, func(name string) { - set.Float64(name, f.Value, f.Usage) - }) -} - -func (f Float64Flag) getName() string { - return f.Name -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} - -func prefixedNames(fullName string) (prefixed string) { - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += prefixFor(name) + name - if i < len(parts)-1 { - prefixed += ", " - } - } - return -} - -func withEnvHint(envVar, str string) string { - envText := "" - if envVar != "" { - envText = fmt.Sprintf(" [$%s]", strings.Join(strings.Split(envVar, ","), ", $")) - } - return str + envText -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go deleted file mode 100644 index f0f096a2d5..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go +++ /dev/null @@ -1,742 +0,0 @@ -package cli_test - -import ( - "fmt" - "os" - "reflect" - "strings" - "testing" - - "github.com/codegangsta/cli" -) - -var boolFlagTests = []struct { - name string - expected string -}{ - {"help", "--help\t"}, - {"h", "-h\t"}, -} - -func TestBoolFlagHelpOutput(t *testing.T) { - - for _, test := range boolFlagTests { - flag := cli.BoolFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -var stringFlagTests = []struct { - name string - value string - expected string -}{ - {"help", "", "--help \t"}, - {"h", "", "-h \t"}, - {"h", "", "-h \t"}, - {"test", "Something", "--test \"Something\"\t"}, -} - -func TestStringFlagHelpOutput(t *testing.T) { - - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "derp") - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_FOO]") { - t.Errorf("%s does not end with [$APP_FOO]", output) - } - } -} - -var stringSliceFlagTests = []struct { - name string - value *cli.StringSlice - expected string -}{ - {"help", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "--help [--help option --help option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"test", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("Something") - return s - }(), "--test [--test option --test option]\t"}, -} - -func TestStringSliceFlagHelpOutput(t *testing.T) { - - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_QWWX", "11,4") - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_QWWX]") { - t.Errorf("%q does not end with [$APP_QWWX]", output) - } - } -} - -var intFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestIntFlagHelpOutput(t *testing.T) { - - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2") - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var durationFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestDurationFlagHelpOutput(t *testing.T) { - - for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2h3m6s") - for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var intSliceFlagTests = []struct { - name string - value *cli.IntSlice - expected string -}{ - {"help", &cli.IntSlice{}, "--help [--help option --help option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"test", func() *cli.IntSlice { - i := &cli.IntSlice{} - i.Set("9") - return i - }(), "--test [--test option --test option]\t"}, -} - -func TestIntSliceFlagHelpOutput(t *testing.T) { - - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SMURF", "42,3") - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_SMURF]") { - t.Errorf("%q does not end with [$APP_SMURF]", output) - } - } -} - -var float64FlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestFloat64FlagHelpOutput(t *testing.T) { - - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAZ", "99.4") - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAZ]") { - t.Errorf("%s does not end with [$APP_BAZ]", output) - } - } -} - -var genericFlagTests = []struct { - name string - value cli.Generic - expected string -}{ - {"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"}, - {"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"}, -} - -func TestGenericFlagHelpOutput(t *testing.T) { - - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_ZAP", "3") - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_ZAP]") { - t.Errorf("%s does not end with [$APP_ZAP]", output) - } - } -} - -func TestParseMultiString(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("serve") != "10" { - t.Errorf("main name not set") - } - if ctx.String("s") != "10" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiStringFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiStringSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiInt(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("serve") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("s") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiIntFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiIntSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiFloat64(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("serve") != 10.2 { - t.Errorf("main name not set") - } - if ctx.Float64("s") != 10.2 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10.2"}) -} - -func TestParseMultiFloat64FromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiFloat64FromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBool(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("serve") != true { - t.Errorf("main name not set") - } - if ctx.Bool("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolT(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("serve") != true { - t.Errorf("main name not set") - } - if ctx.BoolT("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolTFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolTFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -type Parser [2]string - -func (p *Parser) Set(value string) error { - parts := strings.Split(value, ",") - if len(parts) != 2 { - return fmt.Errorf("invalid format") - } - - (*p)[0] = parts[0] - (*p)[1] = parts[1] - - return nil -} - -func (p *Parser) String() string { - return fmt.Sprintf("%s,%s", p[0], p[1]) -} - -func TestParseGeneric(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10,20"}) -} - -func TestParseGenericFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SERVE", "20,30") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseGenericFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "99,2000") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { - t.Errorf("value not set from env") - } - }, - } - a.Run([]string{"run"}) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go b/Godeps/_workspace/src/github.com/codegangsta/cli/help.go deleted file mode 100644 index bfb2788519..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go +++ /dev/null @@ -1,211 +0,0 @@ -package cli - -import "fmt" - -// The text template for the Default help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...] - -VERSION: - {{.Version}}{{if or .Author .Email}} - -AUTHOR:{{if .Author}} - {{.Author}}{{if .Email}} - <{{.Email}}>{{end}}{{else}} - {{.Email}}{{end}}{{end}} - -COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} -GLOBAL OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}} -` - -// The text template for the command help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var CommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - command {{.Name}}{{if .Flags}} [command options]{{end}} [arguments...]{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if .Flags}} - -OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} -` - -// The text template for the subcommand help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var SubcommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} command{{if .Flags}} [command options]{{end}} [arguments...] - -COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} -OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}} -` - -var helpCommand = Command{ - Name: "help", - ShortName: "h", - Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { - args := c.Args() - if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) - } - }, -} - -var helpSubcommand = Command{ - Name: "help", - ShortName: "h", - Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { - args := c.Args() - if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) - } - }, -} - -// Prints help for the App -type helpPrinter func(templ string, data interface{}) - -var HelpPrinter helpPrinter = nil - -// Prints version for the App -var VersionPrinter = printVersion - -func ShowAppHelp(c *Context) { - HelpPrinter(AppHelpTemplate, c.App) -} - -// Prints the list of subcommands as the default app completion method -func DefaultAppComplete(c *Context) { - for _, command := range c.App.Commands { - fmt.Fprintln(c.App.Writer, command.Name) - if command.ShortName != "" { - fmt.Fprintln(c.App.Writer, command.ShortName) - } - } -} - -// Prints help for the given command -func ShowCommandHelp(c *Context, command string) { - for _, c := range c.App.Commands { - if c.HasName(command) { - HelpPrinter(CommandHelpTemplate, c) - return - } - } - - if c.App.CommandNotFound != nil { - c.App.CommandNotFound(c, command) - } else { - fmt.Fprintf(c.App.Writer, "No help topic for '%v'\n", command) - } -} - -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) -} - -// Prints the version number of the App -func ShowVersion(c *Context) { - VersionPrinter(c) -} - -func printVersion(c *Context) { - fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) -} - -// Prints the lists of commands within a given context -func ShowCompletions(c *Context) { - a := c.App - if a != nil && a.BashComplete != nil { - a.BashComplete(c) - } -} - -// Prints the custom completions for a given command -func ShowCommandCompletions(ctx *Context, command string) { - c := ctx.App.Command(command) - if c != nil && c.BashComplete != nil { - c.BashComplete(ctx) - } -} - -func checkVersion(c *Context) bool { - if c.GlobalBool("version") { - ShowVersion(c) - return true - } - - return false -} - -func checkHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowAppHelp(c) - return true - } - - return false -} - -func checkCommandHelp(c *Context, name string) bool { - if c.Bool("h") || c.Bool("help") { - ShowCommandHelp(c, name) - return true - } - - return false -} - -func checkSubcommandHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowSubcommandHelp(c) - return true - } - - return false -} - -func checkCompletions(c *Context) bool { - if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { - ShowCompletions(c) - return true - } - - return false -} - -func checkCommandCompletions(c *Context, name string) bool { - if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { - ShowCommandCompletions(c, name) - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go deleted file mode 100644 index cdc4feb2fc..0000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package cli_test - -import ( - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/dockerversion/dockerversion.go b/Godeps/_workspace/src/github.com/docker/docker/dockerversion/dockerversion.go deleted file mode 100644 index 1898d5c61f..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/dockerversion/dockerversion.go +++ /dev/null @@ -1,15 +0,0 @@ -package dockerversion - -// FIXME: this should be embedded in the docker/docker.go, -// but we can't because distro policy requires us to -// package a separate dockerinit binary, and that binary needs -// to know its version too. - -var ( - GITCOMMIT string - VERSION string - - IAMSTATIC string // whether or not Docker itself was compiled statically via ./hack/make.sh binary ("true" or not "true") - INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary - INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/engine/MAINTAINERS deleted file mode 100644 index aee10c8421..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Solomon Hykes (@shykes) diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go b/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go deleted file mode 100644 index 26f9953d66..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go +++ /dev/null @@ -1,252 +0,0 @@ -package engine - -import ( - "bufio" - "fmt" - "io" - "os" - "sort" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/utils" -) - -// Installer is a standard interface for objects which can "install" themselves -// on an engine by registering handlers. -// This can be used as an entrypoint for external plugins etc. -type Installer interface { - Install(*Engine) error -} - -type Handler func(*Job) Status - -var globalHandlers map[string]Handler - -func init() { - globalHandlers = make(map[string]Handler) -} - -func Register(name string, handler Handler) error { - _, exists := globalHandlers[name] - if exists { - return fmt.Errorf("Can't overwrite global handler for command %s", name) - } - globalHandlers[name] = handler - return nil -} - -func unregister(name string) { - delete(globalHandlers, name) -} - -// The Engine is the core of Docker. -// It acts as a store for *containers*, and allows manipulation of these -// containers by executing *jobs*. -type Engine struct { - handlers map[string]Handler - catchall Handler - hack Hack // data for temporary hackery (see hack.go) - id string - Stdout io.Writer - Stderr io.Writer - Stdin io.Reader - Logging bool - tasks sync.WaitGroup - l sync.RWMutex // lock for shutdown - shutdown bool - onShutdown []func() // shutdown handlers -} - -func (eng *Engine) Register(name string, handler Handler) error { - _, exists := eng.handlers[name] - if exists { - return fmt.Errorf("Can't overwrite handler for command %s", name) - } - eng.handlers[name] = handler - return nil -} - -func (eng *Engine) RegisterCatchall(catchall Handler) { - eng.catchall = catchall -} - -// New initializes a new engine. -func New() *Engine { - eng := &Engine{ - handlers: make(map[string]Handler), - id: utils.RandomString(), - Stdout: os.Stdout, - Stderr: os.Stderr, - Stdin: os.Stdin, - Logging: true, - } - eng.Register("commands", func(job *Job) Status { - for _, name := range eng.commands() { - job.Printf("%s\n", name) - } - return StatusOK - }) - // Copy existing global handlers - for k, v := range globalHandlers { - eng.handlers[k] = v - } - return eng -} - -func (eng *Engine) String() string { - return fmt.Sprintf("%s", eng.id[:8]) -} - -// Commands returns a list of all currently registered commands, -// sorted alphabetically. -func (eng *Engine) commands() []string { - names := make([]string, 0, len(eng.handlers)) - for name := range eng.handlers { - names = append(names, name) - } - sort.Strings(names) - return names -} - -// Job creates a new job which can later be executed. -// This function mimics `Command` from the standard os/exec package. -func (eng *Engine) Job(name string, args ...string) *Job { - job := &Job{ - Eng: eng, - Name: name, - Args: args, - Stdin: NewInput(), - Stdout: NewOutput(), - Stderr: NewOutput(), - env: &Env{}, - closeIO: true, - } - if eng.Logging { - job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) - } - - // Catchall is shadowed by specific Register. - if handler, exists := eng.handlers[name]; exists { - job.handler = handler - } else if eng.catchall != nil && name != "" { - // empty job names are illegal, catchall or not. - job.handler = eng.catchall - } - return job -} - -// OnShutdown registers a new callback to be called by Shutdown. -// This is typically used by services to perform cleanup. -func (eng *Engine) OnShutdown(h func()) { - eng.l.Lock() - eng.onShutdown = append(eng.onShutdown, h) - eng.l.Unlock() -} - -// Shutdown permanently shuts down eng as follows: -// - It refuses all new jobs, permanently. -// - It waits for all active jobs to complete (with no timeout) -// - It calls all shutdown handlers concurrently (if any) -// - It returns when all handlers complete, or after 15 seconds, -// whichever happens first. -func (eng *Engine) Shutdown() { - eng.l.Lock() - if eng.shutdown { - eng.l.Unlock() - return - } - eng.shutdown = true - eng.l.Unlock() - // We don't need to protect the rest with a lock, to allow - // for other calls to immediately fail with "shutdown" instead - // of hanging for 15 seconds. - // This requires all concurrent calls to check for shutdown, otherwise - // it might cause a race. - - // Wait for all jobs to complete. - // Timeout after 5 seconds. - tasksDone := make(chan struct{}) - go func() { - eng.tasks.Wait() - close(tasksDone) - }() - select { - case <-time.After(time.Second * 5): - case <-tasksDone: - } - - // Call shutdown handlers, if any. - // Timeout after 10 seconds. - var wg sync.WaitGroup - for _, h := range eng.onShutdown { - wg.Add(1) - go func(h func()) { - defer wg.Done() - h() - }(h) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-time.After(time.Second * 10): - case <-done: - } - return -} - -// IsShutdown returns true if the engine is in the process -// of shutting down, or already shut down. -// Otherwise it returns false. -func (eng *Engine) IsShutdown() bool { - eng.l.RLock() - defer eng.l.RUnlock() - return eng.shutdown -} - -// ParseJob creates a new job from a text description using a shell-like syntax. -// -// The following syntax is used to parse `input`: -// -// * Words are separated using standard whitespaces as separators. -// * Quotes and backslashes are not interpreted. -// * Words of the form 'KEY=[VALUE]' are added to the job environment. -// * All other words are added to the job arguments. -// -// For example: -// -// job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world") -// -// The resulting job will have: -// job.Args={"echo", "hello", "world"} -// job.Env={"VERBOSE":"1", "TEST":"true"} -// -func (eng *Engine) ParseJob(input string) (*Job, error) { - // FIXME: use a full-featured command parser - scanner := bufio.NewScanner(strings.NewReader(input)) - scanner.Split(bufio.ScanWords) - var ( - cmd []string - env Env - ) - for scanner.Scan() { - word := scanner.Text() - kv := strings.SplitN(word, "=", 2) - if len(kv) == 2 { - env.Set(kv[0], kv[1]) - } else { - cmd = append(cmd, word) - } - } - if len(cmd) == 0 { - return nil, fmt.Errorf("empty command: '%s'", input) - } - job := eng.Job(cmd[0], cmd[1:]...) - job.Env().Init(&env) - return job, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go deleted file mode 100644 index 96c3f0df30..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package engine - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/ioutils" -) - -func TestRegister(t *testing.T) { - if err := Register("dummy1", nil); err != nil { - t.Fatal(err) - } - - if err := Register("dummy1", nil); err == nil { - t.Fatalf("Expecting error, got none") - } - // Register is global so let's cleanup to avoid conflicts - defer unregister("dummy1") - - eng := New() - - //Should fail because global handlers are copied - //at the engine creation - if err := eng.Register("dummy1", nil); err == nil { - t.Fatalf("Expecting error, got none") - } - - if err := eng.Register("dummy2", nil); err != nil { - t.Fatal(err) - } - - if err := eng.Register("dummy2", nil); err == nil { - t.Fatalf("Expecting error, got none") - } - defer unregister("dummy2") -} - -func TestJob(t *testing.T) { - eng := New() - job1 := eng.Job("dummy1", "--level=awesome") - - if job1.handler != nil { - t.Fatalf("job1.handler should be empty") - } - - h := func(j *Job) Status { - j.Printf("%s\n", j.Name) - return 42 - } - - eng.Register("dummy2", h) - defer unregister("dummy2") - job2 := eng.Job("dummy2", "--level=awesome") - - if job2.handler == nil { - t.Fatalf("job2.handler shouldn't be nil") - } - - if job2.handler(job2) != 42 { - t.Fatalf("handler dummy2 was not found in job2") - } -} - -func TestEngineShutdown(t *testing.T) { - eng := New() - if eng.IsShutdown() { - t.Fatalf("Engine should not show as shutdown") - } - eng.Shutdown() - if !eng.IsShutdown() { - t.Fatalf("Engine should show as shutdown") - } -} - -func TestEngineCommands(t *testing.T) { - eng := New() - handler := func(job *Job) Status { return StatusOK } - eng.Register("foo", handler) - eng.Register("bar", handler) - eng.Register("echo", handler) - eng.Register("die", handler) - var output bytes.Buffer - commands := eng.Job("commands") - commands.Stdout.Add(&output) - commands.Run() - expected := "bar\ncommands\ndie\necho\nfoo\n" - if result := output.String(); result != expected { - t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) - } -} - -func TestEngineString(t *testing.T) { - eng1 := New() - eng2 := New() - s1 := eng1.String() - s2 := eng2.String() - if eng1 == eng2 { - t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) - } -} - -func TestParseJob(t *testing.T) { - eng := New() - // Verify that the resulting job calls to the right place - var called bool - eng.Register("echo", func(job *Job) Status { - called = true - return StatusOK - }) - input := "echo DEBUG=1 hello world VERBOSITY=42" - job, err := eng.ParseJob(input) - if err != nil { - t.Fatal(err) - } - if job.Name != "echo" { - t.Fatalf("Invalid job name: %v", job.Name) - } - if strings.Join(job.Args, ":::") != "hello:::world" { - t.Fatalf("Invalid job args: %v", job.Args) - } - if job.Env().Get("DEBUG") != "1" { - t.Fatalf("Invalid job env: %v", job.Env) - } - if job.Env().Get("VERBOSITY") != "42" { - t.Fatalf("Invalid job env: %v", job.Env) - } - if len(job.Env().Map()) != 2 { - t.Fatalf("Invalid job env: %v", job.Env) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("Job was not called") - } -} - -func TestCatchallEmptyName(t *testing.T) { - eng := New() - var called bool - eng.RegisterCatchall(func(job *Job) Status { - called = true - return StatusOK - }) - err := eng.Job("").Run() - if err == nil { - t.Fatalf("Engine.Job(\"\").Run() should return an error") - } - if called { - t.Fatalf("Engine.Job(\"\").Run() should return an error") - } -} - -// Ensure that a job within a job both using the same underlying standard -// output writer does not close the output of the outer job when the inner -// job's stdout is wrapped with a NopCloser. When not wrapped, it should -// close the outer job's output. -func TestNestedJobSharedOutput(t *testing.T) { - var ( - outerHandler Handler - innerHandler Handler - wrapOutput bool - ) - - outerHandler = func(job *Job) Status { - job.Stdout.Write([]byte("outer1")) - - innerJob := job.Eng.Job("innerJob") - - if wrapOutput { - innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout)) - } else { - innerJob.Stdout.Add(job.Stdout) - } - - if err := innerJob.Run(); err != nil { - t.Fatal(err) - } - - // If wrapOutput was *false* this write will do nothing. - // FIXME (jlhawn): It should cause an error to write to - // closed output. - job.Stdout.Write([]byte(" outer2")) - - return StatusOK - } - - innerHandler = func(job *Job) Status { - job.Stdout.Write([]byte(" inner")) - - return StatusOK - } - - eng := New() - eng.Register("outerJob", outerHandler) - eng.Register("innerJob", innerHandler) - - // wrapOutput starts *false* so the expected - // output of running the outer job will be: - // - // "outer1 inner" - // - outBuf := new(bytes.Buffer) - outerJob := eng.Job("outerJob") - outerJob.Stdout.Add(outBuf) - - if err := outerJob.Run(); err != nil { - t.Fatal(err) - } - - expectedOutput := "outer1 inner" - if outBuf.String() != expectedOutput { - t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) - } - - // Set wrapOutput to true so that the expected - // output of running the outer job will be: - // - // "outer1 inner outer2" - // - wrapOutput = true - outBuf.Reset() - outerJob = eng.Job("outerJob") - outerJob.Stdout.Add(outBuf) - - if err := outerJob.Run(); err != nil { - t.Fatal(err) - } - - expectedOutput = "outer1 inner outer2" - if outBuf.String() != expectedOutput { - t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/env.go b/Godeps/_workspace/src/github.com/docker/docker/engine/env.go deleted file mode 100644 index a16dc35cd9..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/env.go +++ /dev/null @@ -1,297 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -type Env []string - -// Get returns the last value associated with the given key. If there are no -// values associated with the key, Get returns the empty string. -func (env *Env) Get(key string) (value string) { - // not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315 - for _, kv := range *env { - if strings.Index(kv, "=") == -1 { - continue - } - parts := strings.SplitN(kv, "=", 2) - if parts[0] != key { - continue - } - if len(parts) < 2 { - value = "" - } else { - value = parts[1] - } - } - return -} - -func (env *Env) Exists(key string) bool { - _, exists := env.Map()[key] - return exists -} - -// Len returns the number of keys in the environment. -// Note that len(env) might be different from env.Len(), -// because the same key might be set multiple times. -func (env *Env) Len() int { - return len(env.Map()) -} - -func (env *Env) Init(src *Env) { - (*env) = make([]string, 0, len(*src)) - for _, val := range *src { - (*env) = append((*env), val) - } -} - -func (env *Env) GetBool(key string) (value bool) { - s := strings.ToLower(strings.Trim(env.Get(key), " \t")) - if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { - return false - } - return true -} - -func (env *Env) SetBool(key string, value bool) { - if value { - env.Set(key, "1") - } else { - env.Set(key, "0") - } -} - -func (env *Env) GetInt(key string) int { - return int(env.GetInt64(key)) -} - -func (env *Env) GetInt64(key string) int64 { - s := strings.Trim(env.Get(key), " \t") - val, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0 - } - return val -} - -func (env *Env) SetInt(key string, value int) { - env.Set(key, fmt.Sprintf("%d", value)) -} - -func (env *Env) SetInt64(key string, value int64) { - env.Set(key, fmt.Sprintf("%d", value)) -} - -// Returns nil if key not found -func (env *Env) GetList(key string) []string { - sval := env.Get(key) - if sval == "" { - return nil - } - l := make([]string, 0, 1) - if err := json.Unmarshal([]byte(sval), &l); err != nil { - l = append(l, sval) - } - return l -} - -func (env *Env) GetSubEnv(key string) *Env { - sval := env.Get(key) - if sval == "" { - return nil - } - buf := bytes.NewBufferString(sval) - var sub Env - if err := sub.Decode(buf); err != nil { - return nil - } - return &sub -} - -func (env *Env) SetSubEnv(key string, sub *Env) error { - var buf bytes.Buffer - if err := sub.Encode(&buf); err != nil { - return err - } - env.Set(key, string(buf.Bytes())) - return nil -} - -func (env *Env) GetJson(key string, iface interface{}) error { - sval := env.Get(key) - if sval == "" { - return nil - } - return json.Unmarshal([]byte(sval), iface) -} - -func (env *Env) SetJson(key string, value interface{}) error { - sval, err := json.Marshal(value) - if err != nil { - return err - } - env.Set(key, string(sval)) - return nil -} - -func (env *Env) SetList(key string, value []string) error { - return env.SetJson(key, value) -} - -func (env *Env) Set(key, value string) { - *env = append(*env, key+"="+value) -} - -func NewDecoder(src io.Reader) *Decoder { - return &Decoder{ - json.NewDecoder(src), - } -} - -type Decoder struct { - *json.Decoder -} - -func (decoder *Decoder) Decode() (*Env, error) { - m := make(map[string]interface{}) - if err := decoder.Decoder.Decode(&m); err != nil { - return nil, err - } - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - return env, nil -} - -// DecodeEnv decodes `src` as a json dictionary, and adds -// each decoded key-value pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error -// is returned. -func (env *Env) Decode(src io.Reader) error { - m := make(map[string]interface{}) - if err := json.NewDecoder(src).Decode(&m); err != nil { - return err - } - for k, v := range m { - env.SetAuto(k, v) - } - return nil -} - -func (env *Env) SetAuto(k string, v interface{}) { - // Issue 7941 - if the value in the incoming JSON is null then treat it - // as if they never specified the property at all. - if v == nil { - return - } - - // FIXME: we fix-convert float values to int, because - // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) - if fval, ok := v.(float64); ok { - env.SetInt64(k, int64(fval)) - } else if sval, ok := v.(string); ok { - env.Set(k, sval) - } else if val, err := json.Marshal(v); err == nil { - env.Set(k, string(val)) - } else { - env.Set(k, fmt.Sprintf("%v", v)) - } -} - -func changeFloats(v interface{}) interface{} { - switch v := v.(type) { - case float64: - return int(v) - case map[string]interface{}: - for key, val := range v { - v[key] = changeFloats(val) - } - case []interface{}: - for idx, val := range v { - v[idx] = changeFloats(val) - } - } - return v -} - -func (env *Env) Encode(dst io.Writer) error { - m := make(map[string]interface{}) - for k, v := range env.Map() { - var val interface{} - if err := json.Unmarshal([]byte(v), &val); err == nil { - // FIXME: we fix-convert float values to int, because - // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) - m[k] = changeFloats(val) - } else { - m[k] = v - } - } - if err := json.NewEncoder(dst).Encode(&m); err != nil { - return err - } - return nil -} - -func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { - // FIXME: return the number of bytes written to respect io.WriterTo - return 0, env.Encode(dst) -} - -func (env *Env) Import(src interface{}) (err error) { - defer func() { - if err != nil { - err = fmt.Errorf("ImportEnv: %s", err) - } - }() - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(src); err != nil { - return err - } - if err := env.Decode(&buf); err != nil { - return err - } - return nil -} - -func (env *Env) Map() map[string]string { - m := make(map[string]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] - } - return m -} - -// MultiMap returns a representation of env as a -// map of string arrays, keyed by string. -// This is the same structure as http headers for example, -// which allow each key to have multiple values. -func (env *Env) MultiMap() map[string][]string { - m := make(map[string][]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = append(m[parts[0]], parts[1]) - } - return m -} - -// InitMultiMap removes all values in env, then initializes -// new values from the contents of m. -func (env *Env) InitMultiMap(m map[string][]string) { - (*env) = make([]string, 0, len(m)) - for k, vals := range m { - for _, v := range vals { - env.Set(k, v) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go deleted file mode 100644 index b0caca9cbd..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go +++ /dev/null @@ -1,324 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "testing" - - "github.com/docker/docker/pkg/testutils" -) - -func TestEnvLenZero(t *testing.T) { - env := &Env{} - if env.Len() != 0 { - t.Fatalf("%d", env.Len()) - } -} - -func TestEnvLenNotZero(t *testing.T) { - env := &Env{} - env.Set("foo", "bar") - env.Set("ga", "bu") - if env.Len() != 2 { - t.Fatalf("%d", env.Len()) - } -} - -func TestEnvLenDup(t *testing.T) { - env := &Env{ - "foo=bar", - "foo=baz", - "a=b", - } - // len(env) != env.Len() - if env.Len() != 2 { - t.Fatalf("%d", env.Len()) - } -} - -func TestEnvGetDup(t *testing.T) { - env := &Env{ - "foo=bar", - "foo=baz", - "foo=bif", - } - expected := "bif" - if v := env.Get("foo"); v != expected { - t.Fatalf("expect %q, got %q", expected, v) - } -} - -func TestNewJob(t *testing.T) { - job := mkJob(t, "dummy", "--level=awesome") - if job.Name != "dummy" { - t.Fatalf("Wrong job name: %s", job.Name) - } - if len(job.Args) != 1 { - t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) - } - if job.Args[0] != "--level=awesome" { - t.Fatalf("Wrong job arguments: %s", job.Args[0]) - } -} - -func TestSetenv(t *testing.T) { - job := mkJob(t, "dummy") - job.Setenv("foo", "bar") - if val := job.Getenv("foo"); val != "bar" { - t.Fatalf("Getenv returns incorrect value: %s", val) - } - - job.Setenv("bar", "") - if val := job.Getenv("bar"); val != "" { - t.Fatalf("Getenv returns incorrect value: %s", val) - } - if val := job.Getenv("nonexistent"); val != "" { - t.Fatalf("Getenv returns incorrect value: %s", val) - } -} - -func TestSetenvBool(t *testing.T) { - job := mkJob(t, "dummy") - job.SetenvBool("foo", true) - if val := job.GetenvBool("foo"); !val { - t.Fatalf("GetenvBool returns incorrect value: %t", val) - } - - job.SetenvBool("bar", false) - if val := job.GetenvBool("bar"); val { - t.Fatalf("GetenvBool returns incorrect value: %t", val) - } - - if val := job.GetenvBool("nonexistent"); val { - t.Fatalf("GetenvBool returns incorrect value: %t", val) - } -} - -func TestSetenvInt(t *testing.T) { - job := mkJob(t, "dummy") - - job.SetenvInt("foo", -42) - if val := job.GetenvInt("foo"); val != -42 { - t.Fatalf("GetenvInt returns incorrect value: %d", val) - } - - job.SetenvInt("bar", 42) - if val := job.GetenvInt("bar"); val != 42 { - t.Fatalf("GetenvInt returns incorrect value: %d", val) - } - if val := job.GetenvInt("nonexistent"); val != 0 { - t.Fatalf("GetenvInt returns incorrect value: %d", val) - } -} - -func TestSetenvList(t *testing.T) { - job := mkJob(t, "dummy") - - job.SetenvList("foo", []string{"bar"}) - if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { - t.Fatalf("GetenvList returns incorrect value: %v", val) - } - - job.SetenvList("bar", nil) - if val := job.GetenvList("bar"); val != nil { - t.Fatalf("GetenvList returns incorrect value: %v", val) - } - if val := job.GetenvList("nonexistent"); val != nil { - t.Fatalf("GetenvList returns incorrect value: %v", val) - } -} - -func TestEnviron(t *testing.T) { - job := mkJob(t, "dummy") - job.Setenv("foo", "bar") - val, exists := job.Environ()["foo"] - if !exists { - t.Fatalf("foo not found in the environ") - } - if val != "bar" { - t.Fatalf("bar not found in the environ") - } -} - -func TestMultiMap(t *testing.T) { - e := &Env{} - e.Set("foo", "bar") - e.Set("bar", "baz") - e.Set("hello", "world") - m := e.MultiMap() - e2 := &Env{} - e2.Set("old_key", "something something something") - e2.InitMultiMap(m) - if v := e2.Get("old_key"); v != "" { - t.Fatalf("%#v", v) - } - if v := e2.Get("bar"); v != "baz" { - t.Fatalf("%#v", v) - } - if v := e2.Get("hello"); v != "world" { - t.Fatalf("%#v", v) - } -} - -func testMap(l int) [][2]string { - res := make([][2]string, l) - for i := 0; i < l; i++ { - t := [2]string{testutils.RandomString(5), testutils.RandomString(20)} - res[i] = t - } - return res -} - -func BenchmarkSet(b *testing.B) { - fix := testMap(100) - b.ResetTimer() - for i := 0; i < b.N; i++ { - env := &Env{} - for _, kv := range fix { - env.Set(kv[0], kv[1]) - } - } -} - -func BenchmarkSetJson(b *testing.B) { - fix := testMap(100) - type X struct { - f string - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - env := &Env{} - for _, kv := range fix { - if err := env.SetJson(kv[0], X{kv[1]}); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkGet(b *testing.B) { - fix := testMap(100) - env := &Env{} - for _, kv := range fix { - env.Set(kv[0], kv[1]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, kv := range fix { - env.Get(kv[0]) - } - } -} - -func BenchmarkGetJson(b *testing.B) { - fix := testMap(100) - env := &Env{} - type X struct { - f string - } - for _, kv := range fix { - env.SetJson(kv[0], X{kv[1]}) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, kv := range fix { - if err := env.GetJson(kv[0], &X{}); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkEncode(b *testing.B) { - fix := testMap(100) - env := &Env{} - type X struct { - f string - } - // half a json - for i, kv := range fix { - if i%2 != 0 { - if err := env.SetJson(kv[0], X{kv[1]}); err != nil { - b.Fatal(err) - } - continue - } - env.Set(kv[0], kv[1]) - } - var writer bytes.Buffer - b.ResetTimer() - for i := 0; i < b.N; i++ { - env.Encode(&writer) - writer.Reset() - } -} - -func BenchmarkDecode(b *testing.B) { - fix := testMap(100) - env := &Env{} - type X struct { - f string - } - // half a json - for i, kv := range fix { - if i%2 != 0 { - if err := env.SetJson(kv[0], X{kv[1]}); err != nil { - b.Fatal(err) - } - continue - } - env.Set(kv[0], kv[1]) - } - var writer bytes.Buffer - env.Encode(&writer) - denv := &Env{} - reader := bytes.NewReader(writer.Bytes()) - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := denv.Decode(reader) - if err != nil { - b.Fatal(err) - } - reader.Seek(0, 0) - } -} - -func TestLongNumbers(t *testing.T) { - type T struct { - TestNum int64 - } - v := T{67108864} - var buf bytes.Buffer - e := &Env{} - e.SetJson("Test", v) - if err := e.Encode(&buf); err != nil { - t.Fatal(err) - } - res := make(map[string]T) - if err := json.Unmarshal(buf.Bytes(), &res); err != nil { - t.Fatal(err) - } - if res["Test"].TestNum != v.TestNum { - t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) - } -} - -func TestLongNumbersArray(t *testing.T) { - type T struct { - TestNum []int64 - } - v := T{[]int64{67108864}} - var buf bytes.Buffer - e := &Env{} - e.SetJson("Test", v) - if err := e.Encode(&buf); err != nil { - t.Fatal(err) - } - res := make(map[string]T) - if err := json.Unmarshal(buf.Bytes(), &res); err != nil { - t.Fatal(err) - } - if res["Test"].TestNum[0] != v.TestNum[0] { - t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go b/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go deleted file mode 100644 index be4fadbe6e..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go +++ /dev/null @@ -1,21 +0,0 @@ -package engine - -type Hack map[string]interface{} - -func (eng *Engine) Hack_GetGlobalVar(key string) interface{} { - if eng.hack == nil { - return nil - } - val, exists := eng.hack[key] - if !exists { - return nil - } - return val -} - -func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) { - if eng.hack == nil { - eng.hack = make(Hack) - } - eng.hack[key] = val -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go deleted file mode 100644 index cfa11da7cd..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package engine - -import ( - "testing" -) - -var globalTestID string - -func mkJob(t *testing.T, name string, args ...string) *Job { - return New().Job(name, args...) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/http.go b/Godeps/_workspace/src/github.com/docker/docker/engine/http.go deleted file mode 100644 index 7e4dcd7bb4..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/http.go +++ /dev/null @@ -1,42 +0,0 @@ -package engine - -import ( - "net/http" - "path" -) - -// ServeHTTP executes a job as specified by the http request `r`, and sends the -// result as an http response. -// This method allows an Engine instance to be passed as a standard http.Handler interface. -// -// Note that the protocol used in this method is a convenience wrapper and is not the canonical -// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, -// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response -// once data has been written to the body, which makes it inconvenient to return metadata such -// as the exit status. -// -func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var ( - jobName = path.Base(r.URL.Path) - jobArgs, exists = r.URL.Query()["a"] - ) - if !exists { - jobArgs = []string{} - } - w.Header().Set("Job-Name", jobName) - for _, arg := range jobArgs { - w.Header().Add("Job-Args", arg) - } - job := eng.Job(jobName, jobArgs...) - job.Stdout.Add(w) - job.Stderr.Add(w) - // FIXME: distinguish job status from engine error in Run() - // The former should be passed as a special header, the former - // should cause a 500 status - w.WriteHeader(http.StatusOK) - // The exit status cannot be sent reliably with HTTP1, because headers - // can only be sent before the body. - // (we could possibly use http footers via chunked encoding, but I couldn't find - // how to use them in net/http) - job.Run() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/job.go b/Godeps/_workspace/src/github.com/docker/docker/engine/job.go deleted file mode 100644 index 6c11b13446..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/job.go +++ /dev/null @@ -1,242 +0,0 @@ -package engine - -import ( - "bytes" - "fmt" - "io" - "strings" - "time" - - log "github.com/Sirupsen/logrus" -) - -// A job is the fundamental unit of work in the docker engine. -// Everything docker can do should eventually be exposed as a job. -// For example: execute a process in a container, create a new container, -// download an archive from the internet, serve the http api, etc. -// -// The job API is designed after unix processes: a job has a name, arguments, -// environment variables, standard streams for input, output and error, and -// an exit status which can indicate success (0) or error (anything else). -// -// For status, 0 indicates success, and any other integers indicates an error. -// This allows for richer error reporting. -// -type Job struct { - Eng *Engine - Name string - Args []string - env *Env - Stdout *Output - Stderr *Output - Stdin *Input - handler Handler - status Status - end time.Time - closeIO bool -} - -type Status int - -const ( - StatusOK Status = 0 - StatusErr Status = 1 - StatusNotFound Status = 127 -) - -// Run executes the job and blocks until the job completes. -// If the job returns a failure status, an error is returned -// which includes the status. -func (job *Job) Run() error { - if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") { - return fmt.Errorf("engine is shutdown") - } - // FIXME: this is a temporary workaround to avoid Engine.Shutdown - // waiting 5 seconds for server/api.ServeApi to complete (which it never will) - // everytime the daemon is cleanly restarted. - // The permanent fix is to implement Job.Stop and Job.OnStop so that - // ServeApi can cooperate and terminate cleanly. - if job.Name != "serveapi" { - job.Eng.l.Lock() - job.Eng.tasks.Add(1) - job.Eng.l.Unlock() - defer job.Eng.tasks.Done() - } - // FIXME: make this thread-safe - // FIXME: implement wait - if !job.end.IsZero() { - return fmt.Errorf("%s: job has already completed", job.Name) - } - // Log beginning and end of the job - if job.Eng.Logging { - log.Infof("+job %s", job.CallString()) - defer func() { - log.Infof("-job %s%s", job.CallString(), job.StatusString()) - }() - } - var errorMessage = bytes.NewBuffer(nil) - job.Stderr.Add(errorMessage) - if job.handler == nil { - job.Errorf("%s: command not found", job.Name) - job.status = 127 - } else { - job.status = job.handler(job) - job.end = time.Now() - } - if job.closeIO { - // Wait for all background tasks to complete - if err := job.Stdout.Close(); err != nil { - return err - } - if err := job.Stderr.Close(); err != nil { - return err - } - if err := job.Stdin.Close(); err != nil { - return err - } - } - if job.status != 0 { - return fmt.Errorf("%s", Tail(errorMessage, 1)) - } - - return nil -} - -func (job *Job) CallString() string { - return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", ")) -} - -func (job *Job) StatusString() string { - // If the job hasn't completed, status string is empty - if job.end.IsZero() { - return "" - } - var okerr string - if job.status == StatusOK { - okerr = "OK" - } else { - okerr = "ERR" - } - return fmt.Sprintf(" = %s (%d)", okerr, job.status) -} - -// String returns a human-readable description of `job` -func (job *Job) String() string { - return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) -} - -func (job *Job) Env() *Env { - return job.env -} - -func (job *Job) EnvExists(key string) (value bool) { - return job.env.Exists(key) -} - -func (job *Job) Getenv(key string) (value string) { - return job.env.Get(key) -} - -func (job *Job) GetenvBool(key string) (value bool) { - return job.env.GetBool(key) -} - -func (job *Job) SetenvBool(key string, value bool) { - job.env.SetBool(key, value) -} - -func (job *Job) GetenvSubEnv(key string) *Env { - return job.env.GetSubEnv(key) -} - -func (job *Job) SetenvSubEnv(key string, value *Env) error { - return job.env.SetSubEnv(key, value) -} - -func (job *Job) GetenvInt64(key string) int64 { - return job.env.GetInt64(key) -} - -func (job *Job) GetenvInt(key string) int { - return job.env.GetInt(key) -} - -func (job *Job) SetenvInt64(key string, value int64) { - job.env.SetInt64(key, value) -} - -func (job *Job) SetenvInt(key string, value int) { - job.env.SetInt(key, value) -} - -// Returns nil if key not found -func (job *Job) GetenvList(key string) []string { - return job.env.GetList(key) -} - -func (job *Job) GetenvJson(key string, iface interface{}) error { - return job.env.GetJson(key, iface) -} - -func (job *Job) SetenvJson(key string, value interface{}) error { - return job.env.SetJson(key, value) -} - -func (job *Job) SetenvList(key string, value []string) error { - return job.env.SetJson(key, value) -} - -func (job *Job) Setenv(key, value string) { - job.env.Set(key, value) -} - -// DecodeEnv decodes `src` as a json dictionary, and adds -// each decoded key-value pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error -// is returned. -func (job *Job) DecodeEnv(src io.Reader) error { - return job.env.Decode(src) -} - -func (job *Job) EncodeEnv(dst io.Writer) error { - return job.env.Encode(dst) -} - -func (job *Job) ImportEnv(src interface{}) (err error) { - return job.env.Import(src) -} - -func (job *Job) Environ() map[string]string { - return job.env.Map() -} - -func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { - prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) - return fmt.Fprintf(job.Stderr, prefixedFormat, args...) -} - -func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { - return fmt.Fprintf(job.Stdout, format, args...) -} - -func (job *Job) Errorf(format string, args ...interface{}) Status { - if format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Fprintf(job.Stderr, format, args...) - return StatusErr -} - -func (job *Job) Error(err error) Status { - fmt.Fprintf(job.Stderr, "%s\n", err) - return StatusErr -} - -func (job *Job) StatusCode() int { - return int(job.status) -} - -func (job *Job) SetCloseIO(val bool) { - job.closeIO = val -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go deleted file mode 100644 index 67e723988e..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package engine - -import ( - "bytes" - "fmt" - "testing" -) - -func TestJobStatusOK(t *testing.T) { - eng := New() - eng.Register("return_ok", func(job *Job) Status { return StatusOK }) - err := eng.Job("return_ok").Run() - if err != nil { - t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) - } -} - -func TestJobStatusErr(t *testing.T) { - eng := New() - eng.Register("return_err", func(job *Job) Status { return StatusErr }) - err := eng.Job("return_err").Run() - if err == nil { - t.Fatalf("When a job returns StatusErr, Run() should return an error") - } -} - -func TestJobStatusNotFound(t *testing.T) { - eng := New() - eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) - err := eng.Job("return_not_found").Run() - if err == nil { - t.Fatalf("When a job returns StatusNotFound, Run() should return an error") - } -} - -func TestJobStdoutString(t *testing.T) { - eng := New() - // FIXME: test multiple combinations of output and status - eng.Register("say_something_in_stdout", func(job *Job) Status { - job.Printf("Hello world\n") - return StatusOK - }) - - job := eng.Job("say_something_in_stdout") - var outputBuffer = bytes.NewBuffer(nil) - job.Stdout.Add(outputBuffer) - if err := job.Run(); err != nil { - t.Fatal(err) - } - fmt.Println(outputBuffer) - var output = Tail(outputBuffer, 1) - if expectedOutput := "Hello world"; output != expectedOutput { - t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) - } -} - -func TestJobStderrString(t *testing.T) { - eng := New() - // FIXME: test multiple combinations of output and status - eng.Register("say_something_in_stderr", func(job *Job) Status { - job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") - return StatusOK - }) - - job := eng.Job("say_something_in_stderr") - var outputBuffer = bytes.NewBuffer(nil) - job.Stderr.Add(outputBuffer) - if err := job.Run(); err != nil { - t.Fatal(err) - } - var output = Tail(outputBuffer, 1) - if expectedOutput := "Something happened"; output != expectedOutput { - t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go deleted file mode 100644 index 13d8049267..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package engine - -import ( - "testing" - "time" -) - -func TestShutdownEmpty(t *testing.T) { - eng := New() - if eng.IsShutdown() { - t.Fatalf("IsShutdown should be false") - } - eng.Shutdown() - if !eng.IsShutdown() { - t.Fatalf("IsShutdown should be true") - } -} - -func TestShutdownAfterRun(t *testing.T) { - eng := New() - var called bool - eng.Register("foo", func(job *Job) Status { - called = true - return StatusOK - }) - if err := eng.Job("foo").Run(); err != nil { - t.Fatal(err) - } - eng.Shutdown() - if err := eng.Job("foo").Run(); err == nil { - t.Fatalf("%#v", *eng) - } -} - -// An approximate and racy, but better-than-nothing test that -// -func TestShutdownDuringRun(t *testing.T) { - var ( - jobDelay time.Duration = 500 * time.Millisecond - jobDelayLow time.Duration = 100 * time.Millisecond - jobDelayHigh time.Duration = 700 * time.Millisecond - ) - eng := New() - var completed bool - eng.Register("foo", func(job *Job) Status { - time.Sleep(jobDelay) - completed = true - return StatusOK - }) - go eng.Job("foo").Run() - time.Sleep(50 * time.Millisecond) - done := make(chan struct{}) - var startShutdown time.Time - go func() { - startShutdown = time.Now() - eng.Shutdown() - close(done) - }() - time.Sleep(50 * time.Millisecond) - if err := eng.Job("foo").Run(); err == nil { - t.Fatalf("run on shutdown should fail: %#v", *eng) - } - <-done - // Verify that Shutdown() blocks for roughly 500ms, instead - // of returning almost instantly. - // - // We use >100ms to leave ample margin for race conditions between - // goroutines. It's possible (but unlikely in reasonable testing - // conditions), that this test will cause a false positive or false - // negative. But it's probably better than not having any test - // for the 99.999% of time where testing conditions are reasonable. - if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() { - t.Fatalf("shutdown did not block long enough: %v", d) - } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() { - t.Fatalf("shutdown blocked too long: %v", d) - } - if !completed { - t.Fatalf("job did not complete") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go b/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go deleted file mode 100644 index ec703c96fa..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go +++ /dev/null @@ -1,223 +0,0 @@ -package engine - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Output struct { - sync.Mutex - dests []io.Writer - tasks sync.WaitGroup - used bool -} - -// Tail returns the n last lines of a buffer -// stripped out of the last \n, if any -// if n <= 0, returns an empty string -func Tail(buffer *bytes.Buffer, n int) string { - if n <= 0 { - return "" - } - bytes := buffer.Bytes() - if len(bytes) > 0 && bytes[len(bytes)-1] == '\n' { - bytes = bytes[:len(bytes)-1] - } - for i := buffer.Len() - 2; i >= 0; i-- { - if bytes[i] == '\n' { - n-- - if n == 0 { - return string(bytes[i+1:]) - } - } - } - return string(bytes) -} - -// NewOutput returns a new Output object with no destinations attached. -// Writing to an empty Output will cause the written data to be discarded. -func NewOutput() *Output { - return &Output{} -} - -// Return true if something was written on this output -func (o *Output) Used() bool { - o.Lock() - defer o.Unlock() - return o.used -} - -// Add attaches a new destination to the Output. Any data subsequently written -// to the output will be written to the new destination in addition to all the others. -// This method is thread-safe. -func (o *Output) Add(dst io.Writer) { - o.Lock() - defer o.Unlock() - o.dests = append(o.dests, dst) -} - -// Set closes and remove existing destination and then attaches a new destination to -// the Output. Any data subsequently written to the output will be written to the new -// destination in addition to all the others. This method is thread-safe. -func (o *Output) Set(dst io.Writer) { - o.Close() - o.Lock() - defer o.Unlock() - o.dests = []io.Writer{dst} -} - -// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, -// and returns its reading end for consumption by the caller. -// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package. -// This method is thread-safe. -func (o *Output) AddPipe() (io.Reader, error) { - r, w := io.Pipe() - o.Add(w) - return r, nil -} - -// Write writes the same data to all registered destinations. -// This method is thread-safe. -func (o *Output) Write(p []byte) (n int, err error) { - o.Lock() - defer o.Unlock() - o.used = true - var firstErr error - for _, dst := range o.dests { - _, err := dst.Write(p) - if err != nil && firstErr == nil { - firstErr = err - } - } - return len(p), firstErr -} - -// Close unregisters all destinations and waits for all background -// AddTail and AddString tasks to complete. -// The Close method of each destination is called if it exists. -func (o *Output) Close() error { - o.Lock() - defer o.Unlock() - var firstErr error - for _, dst := range o.dests { - if closer, ok := dst.(io.Closer); ok { - err := closer.Close() - if err != nil && firstErr == nil { - firstErr = err - } - } - } - o.tasks.Wait() - o.dests = nil - return firstErr -} - -type Input struct { - src io.Reader - sync.Mutex -} - -// NewInput returns a new Input object with no source attached. -// Reading to an empty Input will return io.EOF. -func NewInput() *Input { - return &Input{} -} - -// Read reads from the input in a thread-safe way. -func (i *Input) Read(p []byte) (n int, err error) { - i.Mutex.Lock() - defer i.Mutex.Unlock() - if i.src == nil { - return 0, io.EOF - } - return i.src.Read(p) -} - -// Closes the src -// Not thread safe on purpose -func (i *Input) Close() error { - if i.src != nil { - if closer, ok := i.src.(io.Closer); ok { - return closer.Close() - } - } - return nil -} - -// Add attaches a new source to the input. -// Add can only be called once per input. Subsequent calls will -// return an error. -func (i *Input) Add(src io.Reader) error { - i.Mutex.Lock() - defer i.Mutex.Unlock() - if i.src != nil { - return fmt.Errorf("Maximum number of sources reached: 1") - } - i.src = src - return nil -} - -// AddEnv starts a new goroutine which will decode all subsequent data -// as a stream of json-encoded objects, and point `dst` to the last -// decoded object. -// The result `env` can be queried using the type-neutral Env interface. -// It is not safe to query `env` until the Output is closed. -func (o *Output) AddEnv() (dst *Env, err error) { - src, err := o.AddPipe() - if err != nil { - return nil, err - } - dst = &Env{} - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err != nil { - return - } - *dst = *env - } - }() - return dst, nil -} - -func (o *Output) AddListTable() (dst *Table, err error) { - src, err := o.AddPipe() - if err != nil { - return nil, err - } - dst = NewTable("", 0) - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - content, err := ioutil.ReadAll(src) - if err != nil { - return - } - if _, err := dst.ReadListFrom(content); err != nil { - return - } - }() - return dst, nil -} - -func (o *Output) AddTable() (dst *Table, err error) { - src, err := o.AddPipe() - if err != nil { - return nil, err - } - dst = NewTable("", 0) - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - if _, err := dst.ReadFrom(src); err != nil { - return - } - }() - return dst, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go deleted file mode 100644 index 5cfd5d0e6c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package engine - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" -) - -type sentinelWriteCloser struct { - calledWrite bool - calledClose bool -} - -func (w *sentinelWriteCloser) Write(p []byte) (int, error) { - w.calledWrite = true - return len(p), nil -} - -func (w *sentinelWriteCloser) Close() error { - w.calledClose = true - return nil -} - -func TestOutputAddEnv(t *testing.T) { - input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" - o := NewOutput() - result, err := o.AddEnv() - if err != nil { - t.Fatal(err) - } - o.Write([]byte(input)) - o.Close() - if v := result.Get("foo"); v != "bar" { - t.Errorf("Expected %v, got %v", "bar", v) - } - if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { - t.Errorf("Expected %v, got %v", 42, v) - } - if v := result.Get("this-value-doesnt-exist"); v != "" { - t.Errorf("Expected %v, got %v", "", v) - } -} - -func TestOutputAddClose(t *testing.T) { - o := NewOutput() - var s sentinelWriteCloser - o.Add(&s) - if err := o.Close(); err != nil { - t.Fatal(err) - } - // Write data after the output is closed. - // Write should succeed, but no destination should receive it. - if _, err := o.Write([]byte("foo bar")); err != nil { - t.Fatal(err) - } - if !s.calledClose { - t.Fatal("Output.Close() didn't close the destination") - } -} - -func TestOutputAddPipe(t *testing.T) { - var testInputs = []string{ - "hello, world!", - "One\nTwo\nThree", - "", - "A line\nThen another nl-terminated line\n", - "A line followed by an empty line\n\n", - } - for _, input := range testInputs { - expectedOutput := input - o := NewOutput() - r, err := o.AddPipe() - if err != nil { - t.Fatal(err) - } - go func(o *Output) { - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - if err := o.Close(); err != nil { - t.Error(err) - } - }(o) - output, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if string(output) != expectedOutput { - t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) - } - } -} - -func TestTail(t *testing.T) { - var tests = make(map[string][]string) - tests["hello, world!"] = []string{ - "", - "hello, world!", - "hello, world!", - "hello, world!", - } - tests["One\nTwo\nThree"] = []string{ - "", - "Three", - "Two\nThree", - "One\nTwo\nThree", - } - for input, outputs := range tests { - for n, expectedOutput := range outputs { - output := Tail(bytes.NewBufferString(input), n) - if output != expectedOutput { - t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) - } - } - } -} - -func lastLine(txt string) string { - scanner := bufio.NewScanner(strings.NewReader(txt)) - var lastLine string - for scanner.Scan() { - lastLine = scanner.Text() - } - return lastLine -} - -func TestOutputAdd(t *testing.T) { - o := NewOutput() - b := &bytes.Buffer{} - o.Add(b) - input := "hello, world!" - if n, err := o.Write([]byte(input)); err != nil { - t.Fatal(err) - } else if n != len(input) { - t.Fatalf("Expected %d, got %d", len(input), n) - } - if output := b.String(); output != input { - t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) - } -} - -func TestOutputWriteError(t *testing.T) { - o := NewOutput() - buf := &bytes.Buffer{} - o.Add(buf) - r, w := io.Pipe() - input := "Hello there" - expectedErr := fmt.Errorf("This is an error") - r.CloseWithError(expectedErr) - o.Add(w) - n, err := o.Write([]byte(input)) - if err != expectedErr { - t.Fatalf("Output.Write() should return the first error encountered, if any") - } - if buf.String() != input { - t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") - } - if n != len(input) { - t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") - } -} - -func TestInputAddEmpty(t *testing.T) { - i := NewInput() - var b bytes.Buffer - if err := i.Add(&b); err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(i) - if err != nil { - t.Fatal(err) - } - if len(data) > 0 { - t.Fatalf("Read from empty input shoul yield no data") - } -} - -func TestInputAddTwo(t *testing.T) { - i := NewInput() - var b1 bytes.Buffer - // First add should succeed - if err := i.Add(&b1); err != nil { - t.Fatal(err) - } - var b2 bytes.Buffer - // Second add should fail - if err := i.Add(&b2); err == nil { - t.Fatalf("Adding a second source should return an error") - } -} - -func TestInputAddNotEmpty(t *testing.T) { - i := NewInput() - b := bytes.NewBufferString("hello world\nabc") - expectedResult := b.String() - i.Add(b) - result, err := ioutil.ReadAll(i) - if err != nil { - t.Fatal(err) - } - if string(result) != expectedResult { - t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/table.go b/Godeps/_workspace/src/github.com/docker/docker/engine/table.go deleted file mode 100644 index 4498bdf1ec..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/table.go +++ /dev/null @@ -1,140 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "io" - "sort" - "strconv" -) - -type Table struct { - Data []*Env - sortKey string - Chan chan *Env -} - -func NewTable(sortKey string, sizeHint int) *Table { - return &Table{ - make([]*Env, 0, sizeHint), - sortKey, - make(chan *Env), - } -} - -func (t *Table) SetKey(sortKey string) { - t.sortKey = sortKey -} - -func (t *Table) Add(env *Env) { - t.Data = append(t.Data, env) -} - -func (t *Table) Len() int { - return len(t.Data) -} - -func (t *Table) Less(a, b int) bool { - return t.lessBy(a, b, t.sortKey) -} - -func (t *Table) lessBy(a, b int, by string) bool { - keyA := t.Data[a].Get(by) - keyB := t.Data[b].Get(by) - intA, errA := strconv.ParseInt(keyA, 10, 64) - intB, errB := strconv.ParseInt(keyB, 10, 64) - if errA == nil && errB == nil { - return intA < intB - } - return keyA < keyB -} - -func (t *Table) Swap(a, b int) { - tmp := t.Data[a] - t.Data[a] = t.Data[b] - t.Data[b] = tmp -} - -func (t *Table) Sort() { - sort.Sort(t) -} - -func (t *Table) ReverseSort() { - sort.Sort(sort.Reverse(t)) -} - -func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { - if _, err := dst.Write([]byte{'['}); err != nil { - return -1, err - } - n = 1 - for i, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - if i != len(t.Data)-1 { - if _, err := dst.Write([]byte{','}); err != nil { - return -1, err - } - n++ - } - } - if _, err := dst.Write([]byte{']'}); err != nil { - return -1, err - } - return n + 1, nil -} - -func (t *Table) ToListString() (string, error) { - buffer := bytes.NewBuffer(nil) - if _, err := t.WriteListTo(buffer); err != nil { - return "", err - } - return buffer.String(), nil -} - -func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { - for _, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - } - return n, nil -} - -func (t *Table) ReadListFrom(src []byte) (n int64, err error) { - var array []interface{} - - if err := json.Unmarshal(src, &array); err != nil { - return -1, err - } - - for _, item := range array { - if m, ok := item.(map[string]interface{}); ok { - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - t.Add(env) - } - } - - return int64(len(src)), nil -} - -func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err == io.EOF { - return 0, nil - } else if err != nil { - return -1, err - } - t.Add(env) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go deleted file mode 100644 index 9a32ac9cdb..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "testing" -) - -func TestTableWriteTo(t *testing.T) { - table := NewTable("", 0) - e := &Env{} - e.Set("foo", "bar") - table.Add(e) - var buf bytes.Buffer - if _, err := table.WriteTo(&buf); err != nil { - t.Fatal(err) - } - output := make(map[string]string) - if err := json.Unmarshal(buf.Bytes(), &output); err != nil { - t.Fatal(err) - } - if len(output) != 1 { - t.Fatalf("Incorrect output: %v", output) - } - if val, exists := output["foo"]; !exists || val != "bar" { - t.Fatalf("Inccorect output: %v", output) - } -} - -func TestTableSortStringValue(t *testing.T) { - table := NewTable("Key", 0) - - e := &Env{} - e.Set("Key", "A") - table.Add(e) - - e = &Env{} - e.Set("Key", "D") - table.Add(e) - - e = &Env{} - e.Set("Key", "B") - table.Add(e) - - e = &Env{} - e.Set("Key", "C") - table.Add(e) - - table.Sort() - - if len := table.Len(); len != 4 { - t.Fatalf("Expected 4, got %d", len) - } - - if value := table.Data[0].Get("Key"); value != "A" { - t.Fatalf("Expected A, got %s", value) - } - - if value := table.Data[1].Get("Key"); value != "B" { - t.Fatalf("Expected B, got %s", value) - } - - if value := table.Data[2].Get("Key"); value != "C" { - t.Fatalf("Expected C, got %s", value) - } - - if value := table.Data[3].Get("Key"); value != "D" { - t.Fatalf("Expected D, got %s", value) - } -} - -func TestTableReverseSortStringValue(t *testing.T) { - table := NewTable("Key", 0) - - e := &Env{} - e.Set("Key", "A") - table.Add(e) - - e = &Env{} - e.Set("Key", "D") - table.Add(e) - - e = &Env{} - e.Set("Key", "B") - table.Add(e) - - e = &Env{} - e.Set("Key", "C") - table.Add(e) - - table.ReverseSort() - - if len := table.Len(); len != 4 { - t.Fatalf("Expected 4, got %d", len) - } - - if value := table.Data[0].Get("Key"); value != "D" { - t.Fatalf("Expected D, got %s", value) - } - - if value := table.Data[1].Get("Key"); value != "C" { - t.Fatalf("Expected B, got %s", value) - } - - if value := table.Data[2].Get("Key"); value != "B" { - t.Fatalf("Expected C, got %s", value) - } - - if value := table.Data[3].Get("Key"); value != "A" { - t.Fatalf("Expected A, got %s", value) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS deleted file mode 100644 index 2aac7265d2..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Cristian Staretu (@unclejack) -Tibor Vass (@tiborvass) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d9694f..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index 68e5c1d300..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,817 +0,0 @@ -package archive - -import ( - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/system" -) - -type ( - Archive io.ReadCloser - ArchiveReader io.Reader - Compression int - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - Name string - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar} -) - -const ( - Uncompressed Compression = iota - Bzip2 - Gzip - Xz -) - -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - log.Debugf("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return CmdStream(exec.Command(args[0], args[1:]...), archive) -} - -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil { - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return readBufWrapper, nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string -} - -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - - if fi.IsDir() && !strings.HasSuffix(name, "/") { - name = name + "/" - } - - hdr.Name = name - - nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's a regular file and has more than 1 link, - // it's hardlinked, so set the type flag accordingly - if fi.Mode().IsRegular() && nlink > 1 { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg { - file, err := os.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - log.Debugf("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { - return err - } - - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - return err - } - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and - if hdr.Typeflag != tar.TypeSymlink { - if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } else { - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -func escapeName(name string) string { - escaped := make([]byte, 0) - for i, c := range []byte(name) { - if i == 0 && c == '/' { - continue - } - // all printable chars except "-" which is 0x2d - if (0x20 <= c && c <= 0x7E) && c != 0x2d { - escaped = append(escaped, c) - } else { - escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) - } - } - return string(escaped) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - if options.IncludeFiles == nil { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - var renamedRelFilePath string // For when tar.Options.Name is set - for _, include := range options.IncludeFiles { - filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { - if err != nil { - log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the root path. Skip in both situations. - return nil - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns) - if err != nil { - log.Debugf("Error matching %s", relFilePath, err) - return err - } - } - - if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource - if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { - renamedRelFilePath = relFilePath - } - // Set this to make sure the items underneath also get renamed - if options.Name != "" { - relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - log.Debugf("Can't add file %s to tar: %s", srcPath, err) - } - return nil - }) - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.Debugf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - log.Debugf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - log.Debugf("Can't close pipe writer: %s", err) - } - }() - - return pipeReader, nil -} - -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/" - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - if !strings.HasSuffix(hdr.Name, "/") { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 0777) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, "..") { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if fi.IsDir() && hdr.Name == "." { - continue - } - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(archive io.Reader, dest string, options *TarOptions) error { - if archive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - decompressedArchive, err := DecompressStream(archive) - if err != nil { - return err - } - defer decompressedArchive.Close() - return Unpack(decompressedArchive, dest, options) -} - -func (archiver *Archiver) TarUntar(src, dst string) error { - log.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - return archiver.Untar(archive, dst, nil) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - if err := archiver.Untar(archive, dst, nil); err != nil { - return err - } - return nil -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - // Create dst, copy src's content into it - log.Debugf("Creating dest directory: %s", dst) - if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { - return err - } - log.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - log.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - // Clean up the trailing / - if dst[len(dst)-1] == '/' { - dst = path.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - return archiver.Untar(r, filepath.Dir(dst), nil) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/', the final destination path -// will be `dst/base(src)`. -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// CmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - if input != nil { - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - // Write stdin if any - go func() { - io.Copy(stdin, input) - stdin.Close() - }() - } - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - pipeR, pipeW := io.Pipe() - errChan := make(chan []byte) - // Collect stderr, we will use it in case of an error - go func() { - errText, e := ioutil.ReadAll(stderr) - if e != nil { - errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") - } - errChan <- errText - }() - // Copy stdout to the returned pipe - go func() { - _, err := io.Copy(pipeW, stdout) - if err != nil { - pipeW.CloseWithError(err) - } - errText := <-errChan - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) - } else { - pipeW.Close() - } - }() - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - return pipeR, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if err = f.Sync(); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go deleted file mode 100644 index 6cd95d5ad5..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "testing" - "time" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" -) - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, err := CmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, err := CmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, err := CmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestTarFiles(t *testing.T) { - // try without hardlinks - if err := checkNoChanges(1000, false); err != nil { - t.Fatal(err) - } - // try with hardlinks - if err := checkNoChanges(1000, true); err != nil { - t.Fatal(err) - } -} - -func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - return err - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - return err - } - defer os.RemoveAll(destDir) - - _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) - if err != nil { - return err - } - - err = TarUntar(srcDir, destDir) - if err != nil { - return err - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) - } - return nil -} - -func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { - archive, err := TarWithOptions(origin, options) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return nil, err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - compression := options.Compression - if detectedCompression.Extension() != compression.Extension() { - return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return nil, err - } - if _, err := os.Stat(tmp); err != nil { - return nil, err - } - - return ChangesDirs(origin, tmp) -} - -func TestTarUntar(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - } -} - -func TestTarWithOptions(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - cases := []struct { - opts *TarOptions - numChanges int - }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 1}, - {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, - } - for _, testCase := range cases { - changes, err := tarUntar(t, origin, testCase.opts) - if err != nil { - t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) - } - if len(changes) != testCase.numChanges { - t.Errorf("Expected %d changes, got %d for %+v:", - testCase.numChanges, len(changes), testCase.opts) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true) - if err != nil { - t.Fatal(err) - } -} - -// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. -// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. -func TestUntarUstarGnuConflict(t *testing.T) { - f, err := os.Open("testdata/broken.tar") - if err != nil { - t.Fatal(err) - } - found := false - tr := tar.NewReader(f) - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - t.Fatal(err) - } - if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { - found = true - break - } - } - if !found { - t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") - } -} - -func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { - t.Fatal(err) - } - - var i1, i2 uint64 - if i1, err = getNlink(path.Join(origin, "1")); err != nil { - t.Fatal(err) - } - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - if i1, err = getInode(path.Join(dest, "1")); err != nil { - t.Fatal(err) - } - if i2, err = getInode(path.Join(dest, "2")); err != nil { - t.Fatal(err) - } - - if i1 != i2 { - t.Errorf("expected matching inodes, but got %d and %d", i1, i2) - } -} - -func getNlink(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Nlink, nil -} - -func getInode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Ino, nil -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, false) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, true) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func TestUntarInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try writing to victim/newdir/newfile with a symlink in the path - { - // this header needs to be before the next one, or else there is an error - Name: "dir/loophole", - Typeflag: tar.TypeSymlink, - Linkname: "../../victim", - Mode: 0755, - }, - { - Name: "dir/loophole/newdir/newfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestTempArchiveCloseMultipleTimes(t *testing.T) { - reader := ioutil.NopCloser(strings.NewReader("hello")) - tempArchive, err := NewTempArchive(reader, "") - buf := make([]byte, 10) - n, err := tempArchive.Read(buf) - if n != 5 { - t.Fatalf("Expected to read 5 bytes. Read %d instead", n) - } - for i := 0; i < 3; i++ { - if err = tempArchive.Close(); err != nil { - t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index c0e8aee93c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !windows - -package archive - -import ( - "errors" - "syscall" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" -) - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - nlink = uint32(s.Nlink) - inode = uint64(s.Ino) - - // Currently go does not fil in the major/minors - if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK || - s.Mode&syscall.S_IFCHR == syscall.S_IFCHR { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index 3cc2493f6f..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows - -package archive - -import ( - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" -) - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 85217f6e08..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,413 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -type ChangeType int - -const ( - ChangeModify = iota - ChangeAdd - ChangeDelete -) - -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - var changes []Change - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - path = filepath.Join("/", path) - - // Skip root - if path == "/" { - return nil - } - - // Skip AUFS metadata - if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { - return err - } - - change := Change{ - Path: path, - } - - // Find out what kind of modification happened - file := filepath.Base(path) - // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, ".wh.") { - originalFile := file[len(".wh."):] - change.Path = filepath.Join(filepath.Dir(path), originalFile) - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -type FileInfo struct { - parent *FileInfo - name string - stat *system.Stat - children map[string]*FileInfo - capability []byte - added bool -} - -func (root *FileInfo) LookUp(path string) *FileInfo { - parent := root - if path == "/" { - return root - } - - pathElements := strings.Split(path, "/") - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - return "/" - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if oldStat.Mode() != newStat.Mode() || - oldStat.Uid() != newStat.Uid() || - oldStat.Gid() != newStat.Gid() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) || - !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - root := &FileInfo{ - name: "/", - children: make(map[string]*FileInfo), - } - return root -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - relPath = filepath.Join("/", relPath) - - if relPath == "/" { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - if oldDir != "" { - oldRoot, err1 = collectFileInfo(oldDir) - } - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, err - } - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var size int64 - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, _ := os.Lstat(file) - if fileInfo != nil && !fileInfo.IsDir() { - size += fileInfo.Size() - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change) (Archive, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - log.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - log.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - log.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go deleted file mode 100644 index 6b8f2354b8..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "sort" - "testing" - "time" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if err := cmd.Run(); err != nil { - return err - } - return nil -} - -// Helper to sort []Change by path -type byPath struct{ changes []Change } - -func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } -func (b byPath) Len() int { return len(b.changes) } -func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - } - - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - if err := os.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } - } - } -} - -// Create an directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } - - // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } - - // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } - - // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { - t.Fatal(err) - } - - // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { - t.Fatal(err) - } - - // Touch file - if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } - - // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } - - // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } - - // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } - - // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } - - // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - - // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } - - // Touch dir - if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } -} - -func TestChangesDirsMutated(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - sort.Sort(byPath{changes}) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dir3", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - layer, err := ExportChanges(dst, changes) - if err != nil { - t.Fatal(err) - } - - layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } - - if _, err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } - - changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index ca282071f5..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,169 +0,0 @@ -package archive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - if !strings.HasSuffix(hdr.Name, "/") { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, ".wh..wh.") { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { - return 0, err - } - } - continue - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - if strings.HasPrefix(rel, "..") { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, ".wh.") { - originalBase := base[len(".wh."):] - originalPath := filepath.Join(filepath.Dir(path), originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - return UnpackLayer(dest, layer) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go deleted file mode 100644 index 758c4115d5..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package archive - -import ( - "testing" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" -) - -func TestApplyLayerInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index cedd46a408..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar deleted file mode 100644 index 8f10ea6b87..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1e..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac0540..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go deleted file mode 100644 index 9048027203..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" -) - -var testUntarFns = map[string]func(string, io.Reader) error{ - "untar": func(dest string, r io.Reader) error { - return Untar(r, dest, nil) - }, - "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, ArchiveReader(r)) - return err - }, -} - -// testBreakout is a helper function that, within the provided `tmpdir` directory, -// creates a `victim` folder with a generated `hello` file in it. -// `untar` extracts to a directory named `dest`, the tar file created from `headers`. -// -// Here are the tested scenarios: -// - removed `victim` folder (write) -// - removed files from `victim` folder (write) -// - new files in `victim` folder (write) -// - modified files in `victim` folder (write) -// - file in `dest` with same content as `victim/hello` (read) -// -// When using testBreakout make sure you cover one of the scenarios listed above. -func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { - tmpdir, err := ioutil.TempDir("", tmpdir) - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - dest := filepath.Join(tmpdir, "dest") - if err := os.Mkdir(dest, 0755); err != nil { - return err - } - - victim := filepath.Join(tmpdir, "victim") - if err := os.Mkdir(victim, 0755); err != nil { - return err - } - hello := filepath.Join(victim, "hello") - helloData, err := time.Now().MarshalText() - if err != nil { - return err - } - if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { - return err - } - helloStat, err := os.Stat(hello) - if err != nil { - return err - } - - reader, writer := io.Pipe() - go func() { - t := tar.NewWriter(writer) - for _, hdr := range headers { - t.WriteHeader(hdr) - } - t.Close() - }() - - untar := testUntarFns[untarFn] - if untar == nil { - return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) - } - if err := untar(dest, reader); err != nil { - if _, ok := err.(breakoutError); !ok { - // If untar returns an error unrelated to an archive breakout, - // then consider this an unexpected error and abort. - return err - } - // Here, untar detected the breakout. - // Let's move on verifying that indeed there was no breakout. - fmt.Printf("breakoutError: %v\n", err) - } - - // Check victim folder - f, err := os.Open(victim) - if err != nil { - // codepath taken if victim folder was removed - return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) - } - defer f.Close() - - // Check contents of victim folder - // - // We are only interested in getting 2 files from the victim folder, because if all is well - // we expect only one result, the `hello` file. If there is a second result, it cannot - // hold the same name `hello` and we assume that a new file got created in the victim folder. - // That is enough to detect an archive breakout. - names, err := f.Readdirnames(2) - if err != nil { - // codepath taken if victim is not a folder - return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) - } - for _, name := range names { - if name != "hello" { - // codepath taken if new file was created in victim folder - return fmt.Errorf("archive breakout: new file %q", name) - } - } - - // Check victim/hello - f, err = os.Open(hello) - if err != nil { - // codepath taken if read permissions were removed - return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fi, err := f.Stat() - if err != nil { - return err - } - if helloStat.IsDir() != fi.IsDir() || - // TODO: cannot check for fi.ModTime() change - helloStat.Mode() != fi.Mode() || - helloStat.Size() != fi.Size() || - !bytes.Equal(helloData, b) { - // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) - } - - // Check that nothing in dest/ has the same content as victim/hello. - // Since victim/hello was generated with time.Now(), it is safe to assume - // that any file whose content matches exactly victim/hello, managed somehow - // to access victim/hello. - return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - if err != nil { - // skip directory if error - return filepath.SkipDir - } - // enter directory - return nil - } - if err != nil { - // skip file if error - return nil - } - b, err := ioutil.ReadFile(path) - if err != nil { - // Houston, we have a problem. Aborting (space)walk. - return err - } - if bytes.Equal(helloData, b) { - return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index b8b60197a3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "bytes" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index 4e4a91b91a..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,26 +0,0 @@ -package fileutils - -import ( - log "github.com/Sirupsen/logrus" - "path/filepath" -) - -// Matches returns true if relFilePath matches any of the patterns -func Matches(relFilePath string, patterns []string) (bool, error) { - for _, exclude := range patterns { - matched, err := filepath.Match(exclude, relFilePath) - if err != nil { - log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) - return false, err - } - if matched { - if filepath.Clean(relFilePath) == "." { - log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) - continue - } - log.Debugf("Skipping excluded path: %s", relFilePath) - return true, nil - } - } - return false, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 22f46fbd92..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,114 +0,0 @@ -package ioutils - -import ( - "bytes" - "io" - "sync" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -type bufReader struct { - sync.Mutex - buf *bytes.Buffer - reader io.Reader - err error - wait sync.Cond - drainBuf []byte -} - -func NewBufReader(r io.Reader) *bufReader { - reader := &bufReader{ - buf: &bytes.Buffer{}, - drainBuf: make([]byte, 1024), - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { - reader := &bufReader{ - buf: buffer, - drainBuf: drainBuffer, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - for { - n, err := r.reader.Read(r.drainBuf) - r.Lock() - if err != nil { - r.err = err - } else { - r.buf.Write(r.drainBuf[0:n]) - } - r.wait.Signal() - r.Unlock() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go deleted file mode 100644 index a7a2dad176..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package ioutils - -import ( - "bytes" - "io" - "io/ioutil" - "testing" -) - -func TestBufReader(t *testing.T) { - reader, writer := io.Pipe() - bufreader := NewBufReader(reader) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - <-done - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index c0b3608fe6..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,39 +0,0 @@ -package ioutils - -import "io" - -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -type NopFlusher struct{} - -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE deleted file mode 100644 index ac74d8f049..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/MAINTAINERS deleted file mode 100644 index e0f18f14f1..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Victor Vieux (@vieux) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md deleted file mode 100644 index da00efa336..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md +++ /dev/null @@ -1,40 +0,0 @@ -Package mflag (aka multiple-flag) implements command-line flag parsing. -It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) - -It adds: - -* both short and long flag version -`./example -s red` `./example --string blue` - -* multiple names for the same option -``` -$>./example -h -Usage of example: - -s, --string="": a simple string -``` - -___ -It is very flexible on purpose, so you can do things like: -``` -$>./example -h -Usage of example: - -s, -string, --string="": a simple string -``` - -Or: -``` -$>./example -h -Usage of example: - -oldflag, --newflag="": a simple string -``` - -You can also hide some flags from the usage, so if we want only `--newflag`: -``` -$>./example -h -Usage of example: - --newflag="": a simple string -$>./example -oldflag str -str -``` - -See [example.go](example/example.go) for more details. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go deleted file mode 100644 index 2e766dd1e5..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "fmt" - - flag "github.com/docker/docker/pkg/mflag" -) - -var ( - i int - str string - b, b2, h bool -) - -func init() { - flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") - flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") - flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") - flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") - flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") - flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage - flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") - flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") - flag.Parse() -} -func main() { - if h { - flag.PrintDefaults() - } else { - fmt.Printf("s/#hidden/-string: %s\n", str) - fmt.Printf("b: %t\n", b) - fmt.Printf("-bool: %t\n", b2) - fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) - fmt.Printf("ARGS: %v\n", flag.Args()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go deleted file mode 100644 index 94cb5b7f76..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go +++ /dev/null @@ -1,1084 +0,0 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - Package flag implements command-line flag parsing. - - Usage: - - Define flags using flag.String(), Bool(), Int(), etc. - - This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. - import "flag /github.com/docker/docker/pkg/mflag" - var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") - If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - // -flaghidden will work, but will be hidden from the usage - flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") - } - Or you can create custom flags that satisfy the Value interface (with - pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, []string{"name"}, "help message for flagname") - For such flags, the default value is just the initial value of the variable. - - You can also add "deprecated" flags, they are still usable, but are not shown - in the usage and will display a warning when you try to use them. `#` before - an option means this option is deprecated, if there is an following option - without `#` ahead, then that's the replacement, if not, it will just be removed: - var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") - this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or - this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` - var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") - will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` - so you can only use `-f`. - - You can also group one letter flags, bif you declare - var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") - var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") - you will be able to use the -vs or -sv - - After all flags are defined, call - flag.Parse() - to parse the command line into the defined flags. - - Flags may then be used directly. If you're using the flags themselves, - they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - - After parsing, the arguments after the flag are available as the - slice flag.Args() or individually as flag.Arg(i). - The arguments are indexed from 0 through flag.NArg()-1. - - Command line flag syntax: - -flag - -flag=x - -flag="x" - -flag='x' - -flag x // non-boolean flags only - One or two minus signs may be used; they are equivalent. - The last form is not permitted for boolean flags because the - meaning of the command - cmd -x * - will change if there is a file called 0, false, etc. You must - use the -flag=false form to turn off a boolean flag. - - Flag parsing stops just before the first non-flag argument - ("-" is a non-flag argument) or after the terminator "--". - - Integer flags accept 1234, 0664, 0x1234 and may be negative. - Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. - Duration flags accept any input valid for time.ParseDuration. - - The default set of command-line flags is controlled by - top-level functions. The FlagSet type allows one to define - independent sets of flags, such as to implement subcommands - in a command-line interface. The methods of FlagSet are - analogous to the top-level functions for the command-line - flag set. -*/ -package mflag - -import ( - "errors" - "fmt" - "io" - "os" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("flag: help requested") - -// ErrRetry is the error returned if you need to try letter by letter -var ErrRetry = errors.New("flag: retry") - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Get() interface{} { return bool(*b) } - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Get() interface{} { return int(*i) } - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Get() interface{} { return int64(*i) } - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Get() interface{} { return uint(*i) } - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Get() interface{} { return uint64(*i) } - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} - -func (s *stringValue) Get() interface{} { return string(*s) } - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Get() interface{} { return float64(*f) } - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Get() interface{} { return time.Duration(*d) } - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -// -// If a Value has an IsBoolFlag() bool method returning true, -// the command-line parser makes -name equivalent to -name=true -// rather than using the next command-line argument. -type Value interface { - String() string - Set(string) error -} - -// Getter is an interface that allows the contents of a Value to be retrieved. -// It wraps the Value interface, rather than being part of it, because it -// appeared after Go 1 and its compatibility rules. All Value types provided -// by this package satisfy the Getter interface. -type Getter interface { - Value - Get() interface{} -} - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// A FlagSet represents a set of defined flags. The zero value of a FlagSet -// has no name and has ContinueOnError error handling. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - name string - parsed bool - actual map[string]*Flag - formal map[string]*Flag - args []string // arguments after flags - errorHandling ErrorHandling - output io.Writer // nil means stderr; use Out() accessor - nArgRequirements []nArgRequirement -} - -// A Flag represents the state of a flag. -type Flag struct { - Names []string // name as it appears on command line - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message -} - -type flagSlice []string - -func (p flagSlice) Len() int { return len(p) } -func (p flagSlice) Less(i, j int) bool { - pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") - lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) - if lpi != lpj { - return lpi < lpj - } - return pi < pj -} -func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[string]*Flag) []*Flag { - var list flagSlice - - // The sorted list is based on the first name, when flag map might use the other names. - nameMap := make(map[string]string) - - for n, f := range flags { - fName := strings.TrimPrefix(f.Names[0], "#") - nameMap[fName] = n - if len(f.Names) == 1 { - list = append(list, fName) - continue - } - - found := false - for _, name := range list { - if name == fName { - found = true - break - } - } - if !found { - list = append(list, fName) - } - } - sort.Sort(list) - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[nameMap[name]] - } - return result -} - -// Name returns the name of the FlagSet. -func (f *FlagSet) Name() string { - return f.name -} - -// Out returns the destination for usage and error messages. -func (f *FlagSet) Out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(f.formal) { - fn(flag) - } -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(f.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.formal[name] -} - -// Indicates whether the specified flag was specified at all on the cmd line -func (f *FlagSet) IsSet(name string) bool { - return f.actual[name] != nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.formal[name] -} - -// Indicates whether the specified flag was specified at all on the cmd line -func IsSet(name string) bool { - return CommandLine.IsSet(name) -} - -type nArgRequirementType int - -// Indicator used to pass to BadArgs function -const ( - Exact nArgRequirementType = iota - Max - Min -) - -type nArgRequirement struct { - Type nArgRequirementType - N int -} - -// Require adds a requirement about the number of arguments for the FlagSet. -// The first parameter can be Exact, Max, or Min to respectively specify the exact, -// the maximum, or the minimal number of arguments required. -// The actual check is done in FlagSet.CheckArgs(). -func (f *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { - f.nArgRequirements = append(f.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) -} - -// CheckArgs uses the requirements set by FlagSet.Require() to validate -// the number of arguments. If the requirements are not met, -// an error message string is returned. -func (f *FlagSet) CheckArgs() (message string) { - for _, req := range f.nArgRequirements { - var arguments string - if req.N == 1 { - arguments = "1 argument" - } else { - arguments = fmt.Sprintf("%d arguments", req.N) - } - - str := func(kind string) string { - return fmt.Sprintf("%q requires %s%s", f.name, kind, arguments) - } - - switch req.Type { - case Exact: - if f.NArg() != req.N { - return str("") - } - case Max: - if f.NArg() > req.N { - return str("a maximum of ") - } - case Min: - if f.NArg() < req.N { - return str("a minimum of ") - } - } - } - return "" -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - flag, ok := f.formal[name] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - err := flag.Value.Set(value) - if err != nil { - return err - } - if f.actual == nil { - f.actual = make(map[string]*Flag) - } - f.actual[name] = flag - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - writer := tabwriter.NewWriter(f.Out(), 20, 1, 3, ' ', 0) - f.VisitAll(func(flag *Flag) { - format := " -%s=%s" - if _, ok := flag.Value.(*stringValue); ok { - // put quotes on the value - format = " -%s=%q" - } - names := []string{} - for _, name := range flag.Names { - if name[0] != '#' { - names = append(names, name) - } - } - if len(names) > 0 { - fmt.Fprintf(writer, format, strings.Join(names, ", -"), flag.DefValue) - for i, line := range strings.Split(flag.Usage, "\n") { - if i != 0 { - line = " " + line - } - fmt.Fprintln(writer, "\t", line) - } - } - }) - writer.Flush() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - if f.name == "" { - fmt.Fprintf(f.Out(), "Usage:\n") - } else { - fmt.Fprintf(f.Out(), "Usage of %s:\n", f.name) - } - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// FlagCount returns the number of flags that have been defined. -func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) } - -// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. -func (f *FlagSet) FlagCountUndeprecated() int { - count := 0 - for _, flag := range sortFlags(f.formal) { - for _, name := range flag.Names { - if name[0] != '#' { - count++ - break - } - } - } - return count -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { - f.Var(newBoolValue(value, p), names, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, names []string, value bool, usage string) { - CommandLine.Var(newBoolValue(value, p), names, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { - p := new(bool) - f.BoolVar(p, names, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(names []string, value bool, usage string) *bool { - return CommandLine.Bool(names, value, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { - f.Var(newIntValue(value, p), names, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, names []string, value int, usage string) { - CommandLine.Var(newIntValue(value, p), names, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(names []string, value int, usage string) *int { - p := new(int) - f.IntVar(p, names, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(names []string, value int, usage string) *int { - return CommandLine.Int(names, value, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { - f.Var(newInt64Value(value, p), names, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, names []string, value int64, usage string) { - CommandLine.Var(newInt64Value(value, p), names, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { - p := new(int64) - f.Int64Var(p, names, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(names []string, value int64, usage string) *int64 { - return CommandLine.Int64(names, value, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { - f.Var(newUintValue(value, p), names, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, names []string, value uint, usage string) { - CommandLine.Var(newUintValue(value, p), names, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { - p := new(uint) - f.UintVar(p, names, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(names []string, value uint, usage string) *uint { - return CommandLine.Uint(names, value, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { - f.Var(newUint64Value(value, p), names, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, names []string, value uint64, usage string) { - CommandLine.Var(newUint64Value(value, p), names, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64Var(p, names, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(names []string, value uint64, usage string) *uint64 { - return CommandLine.Uint64(names, value, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { - f.Var(newStringValue(value, p), names, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, names []string, value string, usage string) { - CommandLine.Var(newStringValue(value, p), names, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(names []string, value string, usage string) *string { - p := new(string) - f.StringVar(p, names, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(names []string, value string, usage string) *string { - return CommandLine.String(names, value, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { - f.Var(newFloat64Value(value, p), names, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, names []string, value float64, usage string) { - CommandLine.Var(newFloat64Value(value, p), names, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { - p := new(float64) - f.Float64Var(p, names, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(names []string, value float64, usage string) *float64 { - return CommandLine.Float64(names, value, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - f.Var(newDurationValue(value, p), names, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - CommandLine.Var(newDurationValue(value, p), names, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVar(p, names, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(names []string, value time.Duration, usage string) *time.Duration { - return CommandLine.Duration(names, value, usage) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, names []string, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{names, usage, value, value.String()} - for _, name := range names { - name = strings.TrimPrefix(name, "#") - _, alreadythere := f.formal[name] - if alreadythere { - var msg string - if f.name == "" { - msg = fmt.Sprintf("flag redefined: %s", name) - } else { - msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) - } - fmt.Fprintln(f.Out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[string]*Flag) - } - f.formal[name] = flag - } -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, names []string, usage string) { - CommandLine.Var(value, names, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(f.Out(), err) - if os.Args[0] == f.name { - fmt.Fprintf(f.Out(), "See '%s --help'.\n", os.Args[0]) - } else { - fmt.Fprintf(f.Out(), "See '%s %s --help'.\n", os.Args[0], f.name) - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -func trimQuotes(str string) string { - if len(str) == 0 { - return str - } - type quote struct { - start, end byte - } - - // All valid quote types. - quotes := []quote{ - // Double quotes - { - start: '"', - end: '"', - }, - - // Single quotes - { - start: '\'', - end: '\'', - }, - } - - for _, quote := range quotes { - // Only strip if outermost match. - if str[0] == quote.start && str[len(str)-1] == quote.end { - str = str[1 : len(str)-1] - break - } - } - - return str -} - -// parseOne parses one flag. It reports whether a flag was seen. -func (f *FlagSet) parseOne() (bool, string, error) { - if len(f.args) == 0 { - return false, "", nil - } - s := f.args[0] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - return false, "", nil - } - if s[1] == '-' && len(s) == 2 { // "--" terminates the flags - f.args = f.args[1:] - return false, "", nil - } - name := s[1:] - if len(name) == 0 || name[0] == '=' { - return false, "", f.failf("bad flag syntax: %s", s) - } - - // it's a flag. does it have an argument? - f.args = f.args[1:] - has_value := false - value := "" - if i := strings.Index(name, "="); i != -1 { - value = trimQuotes(name[i+1:]) - has_value = true - name = name[:i] - } - - m := f.formal - flag, alreadythere := m[name] // BUG - if !alreadythere { - if name == "-help" || name == "help" || name == "h" { // special case for nice help message. - f.usage() - return false, "", ErrHelp - } - if len(name) > 0 && name[0] == '-' { - return false, "", f.failf("flag provided but not defined: -%s", name) - } - return false, name, ErrRetry - } - if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg - if has_value { - if err := fv.Set(value); err != nil { - return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) - } - } else { - fv.Set("true") - } - } else { - // It must have a value, which might be the next argument. - if !has_value && len(f.args) > 0 { - // value is the next arg - has_value = true - value, f.args = f.args[0], f.args[1:] - } - if !has_value { - return false, "", f.failf("flag needs an argument: -%s", name) - } - if err := flag.Value.Set(value); err != nil { - return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) - } - } - if f.actual == nil { - f.actual = make(map[string]*Flag) - } - f.actual[name] = flag - for i, n := range flag.Names { - if n == fmt.Sprintf("#%s", name) { - replacement := "" - for j := i; j < len(flag.Names); j++ { - if flag.Names[j][0] != '#' { - replacement = flag.Names[j] - break - } - } - if replacement != "" { - fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) - } else { - fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) - } - } - } - return true, "", nil -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - f.parsed = true - f.args = arguments - for { - seen, name, err := f.parseOne() - if seen { - continue - } - if err == nil { - break - } - if err == ErrRetry { - if len(name) > 1 { - err = nil - for _, letter := range strings.Split(name, "") { - f.args = append([]string{"-" + letter}, f.args...) - seen2, _, err2 := f.parseOne() - if seen2 { - continue - } - if err2 != nil { - err = f.failf("flag provided but not defined: -%s", name) - break - } - } - if err == nil { - continue - } - } else { - err = f.failf("flag provided but not defined: -%s", name) - } - } - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -// The top-level functions such as BoolVar, Arg, and on are wrappers for the -// methods of CommandLine. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - } - return f -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go deleted file mode 100644 index 85f32c8aa4..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mflag - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - "testing" - "time" -) - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = NewFlagSet(os.Args[0], ContinueOnError) - Usage = usage -} -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, false, "bool value") - Int([]string{"test_int"}, 0, "int value") - Int64([]string{"test_int64"}, 0, "int64 value") - Uint([]string{"test_uint"}, 0, "uint value") - Uint64([]string{"test_uint64"}, 0, "uint64 value") - String([]string{"test_string"}, "0", "string value") - Float64([]string{"test_float64"}, 0, "float64 value") - Duration([]string{"test_duration"}, 0, "time.Duration value") - - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - m[name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", name) - } - } - } - } - VisitAll(visitor) - if len(m) != 8 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - desired = "1" - Visit(visitor) - if len(m) != 8 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { - for _, name := range f.Names { - flagNames = append(flagNames, name) - } - }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestGet(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, true, "bool value") - Int([]string{"test_int"}, 1, "int value") - Int64([]string{"test_int64"}, 2, "int64 value") - Uint([]string{"test_uint"}, 3, "uint value") - Uint64([]string{"test_uint64"}, 4, "uint64 value") - String([]string{"test_string"}, "5", "string value") - Float64([]string{"test_float64"}, 6, "float64 value") - Duration([]string{"test_duration"}, 7, "time.Duration value") - - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - g, ok := f.Value.(Getter) - if !ok { - t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) - return - } - switch name { - case "test_bool": - ok = g.Get() == true - case "test_int": - ok = g.Get() == int(1) - case "test_int64": - ok = g.Get() == int64(2) - case "test_uint": - ok = g.Get() == uint(3) - case "test_uint64": - ok = g.Get() == uint64(4) - case "test_string": - ok = g.Get() == "5" - case "test_float64": - ok = g.Get() == float64(6) - case "test_duration": - ok = g.Get() == time.Duration(7) - } - if !ok { - t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) - } - } - } - } - VisitAll(visitor) -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool([]string{"bool"}, false, "bool value") - bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") - f.Bool([]string{"bool3"}, false, "bool3 value") - bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") - intFlag := f.Int([]string{"-int"}, 0, "int value") - int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") - uintFlag := f.Uint([]string{"uint"}, 0, "uint value") - uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") - stringFlag := f.String([]string{"string"}, "0", "string value") - f.String([]string{"string2"}, "0", "string2 value") - singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") - doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") - mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") - mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") - nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") - nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") - float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") - durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") - extra := "one-extra-argument" - args := []string{ - "-bool", - "-bool2=true", - "-bool4=false", - "--int", "22", - "--int64", "0x23", - "-uint", "24", - "--uint64", "25", - "-string", "hello", - "-squote='single'", - `-dquote="double"`, - `-mquote='mixed"`, - `-mquote2="mixed2'`, - `-nquote="'single nested'"`, - `-nquote2='"double nested"'`, - "-float64", "2718e28", - "-duration", "2m", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if !f.IsSet("bool2") { - t.Error("bool2 should be marked as set") - } - if f.IsSet("bool3") { - t.Error("bool3 should not be marked as set") - } - if !f.IsSet("bool4") { - t.Error("bool4 should be marked as set") - } - if *bool4Flag != false { - t.Error("bool4 flag should be false, is ", *bool4Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if !f.IsSet("string") { - t.Error("string flag should be marked as set") - } - if f.IsSet("string2") { - t.Error("string2 flag should not be marked as set") - } - if *singleQuoteFlag != "single" { - t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) - } - if *doubleQuoteFlag != "double" { - t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) - } - if *mixedQuoteFlag != `'mixed"` { - t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) - } - if *mixed2QuoteFlag != `"mixed2'` { - t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) - } - if *nestedQuoteFlag != "'single nested'" { - t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) - } - if *nested2QuoteFlag != `"double nested"` { - t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func testPanic(f *FlagSet, t *testing.T) { - f.Int([]string{"-int"}, 0, "int value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - args := []string{ - "-int", "21", - } - f.Parse(args) -} - -func TestParsePanic(t *testing.T) { - ResetForTesting(func() {}) - testPanic(CommandLine, t) -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(CommandLine, t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.Var(&v, []string{"v"}, "usage") - if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -// Declare a user-defined boolean flag type. -type boolFlagVar struct { - count int -} - -func (b *boolFlagVar) String() string { - return fmt.Sprintf("%d", b.count) -} - -func (b *boolFlagVar) Set(value string) error { - if value == "true" { - b.count++ - } - return nil -} - -func (b *boolFlagVar) IsBoolFlag() bool { - return b.count < 4 -} - -func TestUserDefinedBool(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var b boolFlagVar - var err error - flags.Var(&b, []string{"b"}, "usage") - if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { - if b.count < 4 { - t.Error(err) - } - } - - if b.count != 4 { - t.Errorf("want: %d; got: %d", 4, b.count) - } - - if err == nil { - t.Error("expected error; got none") - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"-unknown"}) - if out := buf.String(); !strings.Contains(out, "-unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} - before := Bool([]string{"before"}, false, "") - if err := CommandLine.Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = Args() - after := Bool([]string{"after"}, false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"-flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by -flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"-help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, []string{"help"}, false, "help flag") - helpCalled = false - err = fs.Parse([]string{"-help"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -// Test the flag count functions. -func TestFlagCounts(t *testing.T) { - fs := NewFlagSet("help test", ContinueOnError) - var flag bool - fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") - fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") - - if fs.FlagCount() != 6 { - t.Fatal("FlagCount wrong. ", fs.FlagCount()) - } - if fs.FlagCountUndeprecated() != 4 { - t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) - } - if fs.NFlag() != 0 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } - err := fs.Parse([]string{"-fd", "-g", "-flag4"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if fs.NFlag() != 4 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } -} - -// Show up bug in sortFlags -func TestSortFlags(t *testing.T) { - fs := NewFlagSet("help TestSortFlags", ContinueOnError) - - var err error - - var b bool - fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") - - err = fs.Parse([]string{"--banana=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - count := 0 - - fs.VisitAll(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("VisitAll should not return a nil flag") - } - }) - flagcount := fs.FlagCount() - if flagcount != count { - t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) - } - // Make sure its idempotent - if flagcount != fs.FlagCount() { - t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) - } - - count = 0 - fs.Visit(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("Visit should not return a nil flag") - } - }) - nflag := fs.NFlag() - if nflag != count { - t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) - } - if nflag != fs.NFlag() { - t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/MAINTAINERS deleted file mode 100644 index 8c8902530a..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Erik Hollensbe (@erikh) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go deleted file mode 100644 index 8b045a3098..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go +++ /dev/null @@ -1,85 +0,0 @@ -package filters - -import ( - "encoding/json" - "errors" - "regexp" - "strings" -) - -type Args map[string][]string - -// Parse the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - var filters Args = prev - if prev == nil { - filters = Args{} - } - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrorBadFormat - } - - f := strings.SplitN(arg, "=", 2) - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - filters[name] = append(filters[name], value) - - return filters, nil -} - -var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") - -// packs the Args into an string for easy transport from client to server -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if len(a) == 0 { - return "", nil - } - - buf, err := json.Marshal(a) - if err != nil { - return "", err - } - return string(buf), nil -} - -// unpacks the filter Args -func FromParam(p string) (Args, error) { - args := Args{} - if len(p) == 0 { - return args, nil - } - err := json.Unmarshal([]byte(p), &args) - if err != nil { - return nil, err - } - return args, nil -} - -func (filters Args) Match(field, source string) bool { - fieldValues := filters[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - for _, name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go deleted file mode 100644 index a248350223..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package filters - -import ( - "sort" - "testing" -) - -func TestParseArgs(t *testing.T) { - // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` - flagArgs := []string{ - "created=today", - "image.name=ubuntu*", - "image.name=*untu", - } - var ( - args = Args{} - err error - ) - for i := range flagArgs { - args, err = ParseFlag(flagArgs[i], args) - if err != nil { - t.Errorf("failed to parse %s: %s", flagArgs[i], err) - } - } - if len(args["created"]) != 1 { - t.Errorf("failed to set this arg") - } - if len(args["image.name"]) != 2 { - t.Errorf("the args should have collapsed") - } -} - -func TestParam(t *testing.T) { - a := Args{ - "created": []string{"today"}, - "image.name": []string{"ubuntu*", "*untu"}, - } - - v, err := ToParam(a) - if err != nil { - t.Errorf("failed to marshal the filters: %s", err) - } - v1, err := FromParam(v) - if err != nil { - t.Errorf("%s", err) - } - for key, vals := range v1 { - if _, ok := a[key]; !ok { - t.Errorf("could not find key %s in original set", key) - } - sort.Strings(vals) - sort.Strings(a[key]) - if len(vals) != len(a[key]) { - t.Errorf("value lengths ought to match") - continue - } - for i := range vals { - if vals[i] != a[key][i] { - t.Errorf("expected %s, but got %s", a[key][i], vals[i]) - } - } - } -} - -func TestEmpty(t *testing.T) { - a := Args{} - v, err := ToParam(a) - if err != nil { - t.Errorf("failed to marshal the filters: %s", err) - } - v1, err := FromParam(v) - if err != nil { - t.Errorf("%s", err) - } - if len(a) != len(v1) { - t.Errorf("these should both be empty sets") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go deleted file mode 100644 index 70d09003a3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ /dev/null @@ -1,93 +0,0 @@ -package kernel - -import ( - "bytes" - "errors" - "fmt" -) - -type KernelVersionInfo struct { - Kernel int - Major int - Minor int - Flavor string -} - -func (k *KernelVersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// Compare two KernelVersionInfo struct. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b *KernelVersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -func GetKernelVersion() (*KernelVersionInfo, error) { - var ( - err error - ) - - uts, err := uname() - if err != nil { - return nil, err - } - - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) -} - -func ParseRelease(release string) (*KernelVersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &KernelVersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go deleted file mode 100644 index e211a63b7d..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package kernel - -import ( - "testing" -) - -func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { - var ( - a *KernelVersionInfo - ) - a, _ = ParseRelease(release) - - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) - } - if a.Flavor != b.Flavor { - t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) - } -} - -func TestParseRelease(t *testing.T) { - assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) - assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) - assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) -} - -func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) - } -} - -func TestCompareKernelVersion(t *testing.T) { - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, - 1) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 1) - assertKernelVersion(t, - &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, - &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index 8ca814c1fb..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package kernel - -import ( - "syscall" -) - -type Utsname syscall.Utsname - -func uname() (*syscall.Utsname, error) { - uts := &syscall.Utsname{} - - if err := syscall.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 00c5422589..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux - -package kernel - -import ( - "errors" -) - -type Utsname struct { - Release [65]byte -} - -func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem.go deleted file mode 100644 index af185f9f6b..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem.go +++ /dev/null @@ -1,40 +0,0 @@ -package operatingsystem - -import ( - "bytes" - "errors" - "io/ioutil" -) - -var ( - // file to use to detect if the daemon is running in a container - proc1Cgroup = "/proc/1/cgroup" - - // file to check to determine Operating System - etcOsRelease = "/etc/os-release" -) - -func GetOperatingSystem() (string, error) { - b, err := ioutil.ReadFile(etcOsRelease) - if err != nil { - return "", err - } - if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { - b = b[i+13:] - return string(b[:bytes.IndexByte(b, '"')]), nil - } - return "", errors.New("PRETTY_NAME not found") -} - -func IsContainerized() (bool, error) { - b, err := ioutil.ReadFile(proc1Cgroup) - if err != nil { - return false, err - } - for _, line := range bytes.Split(b, []byte{'\n'}) { - if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { - return true, nil - } - } - return false, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go deleted file mode 100644 index b7d54cbb1c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package operatingsystem - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestGetOperatingSystem(t *testing.T) { - var ( - backup = etcOsRelease - ubuntuTrusty = []byte(`NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu 14.04 LTS" -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) - gentoo = []byte(`NAME=Gentoo -ID=gentoo -PRETTY_NAME="Gentoo/Linux" -ANSI_COLOR="1;32" -HOME_URL="http://www.gentoo.org/" -SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" -BUG_REPORT_URL="https://bugs.gentoo.org/" -`) - noPrettyName = []byte(`NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) - ) - - dir := os.TempDir() - etcOsRelease = filepath.Join(dir, "etcOsRelease") - - defer func() { - os.Remove(etcOsRelease) - etcOsRelease = backup - }() - - for expect, osRelease := range map[string][]byte{ - "Ubuntu 14.04 LTS": ubuntuTrusty, - "Gentoo/Linux": gentoo, - "": noPrettyName, - } { - if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if s != expect { - if expect == "" { - t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) - } else { - t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) - } - } - } -} - -func TestIsContainerized(t *testing.T) { - var ( - backup = proc1Cgroup - nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ -13:hugetlb:/ -12:net_prio:/ -11:perf_event:/ -10:bfqio:/ -9:blkio:/ -8:net_cls:/ -7:freezer:/ -6:devices:/ -5:memory:/ -4:cpuacct:/ -3:cpu:/ -2:cpuset:/ -`) - containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -7:net_cls:/ -6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -1:cpuset:/`) - ) - - dir := os.TempDir() - proc1Cgroup = filepath.Join(dir, "proc1Cgroup") - - defer func() { - os.Remove(proc1Cgroup) - proc1Cgroup = backup - }() - - if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err := IsContainerized() - if err != nil { - t.Fatal(err) - } - if inContainer { - t.Fatal("Wrongly assuming containerized") - } - - if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err = IsContainerized() - if err != nil { - t.Fatal(err) - } - if !inContainer { - t.Fatal("Wrongly assuming non-containerized") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go deleted file mode 100644 index 6563190410..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go +++ /dev/null @@ -1,131 +0,0 @@ -package parsers - -import ( - "fmt" - "strconv" - "strings" -) - -// FIXME: Change this not to receive default value as parameter -func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { - addr = strings.TrimSpace(addr) - if addr == "" { - addr = fmt.Sprintf("unix://%s", defaultUnixAddr) - } - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], defaultTCPAddr) - case "unix": - return ParseUnixAddr(addrParts[1], defaultUnixAddr) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -func ParseUnixAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "unix://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("unix://%s", addr), nil -} - -func ParseTCPAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) - } - - hostParts := strings.Split(addr, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - host := hostParts[0] - if host == "" { - host = defaultAddr - } - - p, err := strconv.Atoi(hostParts[1]) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - return fmt.Sprintf("tcp://%s:%d", host, p), nil -} - -// Get a repos name and returns the right reposName + tag -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -func ParseRepositoryTag(repos string) (string, string) { - n := strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go deleted file mode 100644 index aac1e33e35..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package parsers - -import ( - "strings" - "testing" -) - -func TestParseHost(t *testing.T) { - var ( - defaultHttpHost = "127.0.0.1" - defaultUnix = "/var/run/docker.sock" - ) - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { - t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { - t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { - t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { - t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { - t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { - t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { - t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) - } - if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { - t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) - } -} - -func TestParseRepositoryTag(t *testing.T) { - if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) - } -} - -func TestParsePortMapping(t *testing.T) { - data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") - if err != nil { - t.Fatal(err) - } - - if len(data) != 3 { - t.FailNow() - } - if data["ip"] != "192.168.1.1" { - t.Fail() - } - if data["public"] != "80" { - t.Fail() - } - if data["private"] != "8080" { - t.Fail() - } -} - -func TestParsePortRange(t *testing.T) { - if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { - t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) - } -} - -func TestParsePortRangeIncorrectRange(t *testing.T) { - if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectEndRange(t *testing.T) { - if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectStartRange(t *testing.T) { - if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 5338a0cfb2..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,111 +0,0 @@ -// +build go1.3 - -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // Pool which returns bufio.Reader with a 32K buffer - BufioReader32KPool *BufioReaderPool - // Pool which returns bufio.Writer with a 32K buffer - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -type BufioReaderPool struct { - pool sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go deleted file mode 100644 index 48903c2396..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build !go1.3 - -package pools - -import ( - "bufio" - "io" - - "github.com/docker/docker/pkg/ioutils" -) - -var ( - BufioReader32KPool *BufioReaderPool - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -type BufioReaderPool struct { - size int -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{size: size} -} - -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - return bufio.NewReaderSize(r, bufPool.size) -} - -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) -} - -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - return readCloser.Close() - } - return nil - }) -} - -type BufioWriterPool struct { - size int -} - -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{size: size} -} - -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - return bufio.NewWriterSize(w, bufPool.size) -} - -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) -} - -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - return writeCloser.Close() - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b9082f..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS deleted file mode 100644 index 68a97d2fc2..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Victor Vieux (@vieux) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 63045186fe..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go deleted file mode 100644 index 9ef82d5523..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -func Lstat(path string) (*Stat, error) { - s := &syscall.Stat_t{} - err := syscall.Lstat(path, s) - if err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go deleted file mode 100644 index 9bab4d7b0c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "os" - "testing" -) - -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 213a7c7ade..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package system - -func Lstat(path string) (*Stat, error) { - // should not be called on cli code path - return nil, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e67..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index b7de3ff776..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,67 +0,0 @@ -package system - -import ( - "bufio" - "errors" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/docker/pkg/units" -) - -var ( - ErrMalformed = errors.New("malformed file") -) - -// Retrieve memory statistics of the host system and parse them into a MemInfo -// type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go deleted file mode 100644 index 377405ea69..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package system - -import ( - "strings" - "testing" - - "github.com/docker/docker/pkg/units" -) - -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 63b8b16e05..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package system - -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 06f9c6afbb..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index b4020c11b6..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows - -package system - -func Mknod(path string, mode uint32, dev int) error { - // should not be called on cli code path - return ErrNotSupportedPlatform -} - -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on windows, should not be called on cli code") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go deleted file mode 100644 index 5d47494d21..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go +++ /dev/null @@ -1,42 +0,0 @@ -package system - -import ( - "syscall" -) - -type Stat struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -func (s Stat) Mode() uint32 { - return s.mode -} - -func (s Stat) Uid() uint32 { - return s.uid -} - -func (s Stat) Gid() uint32 { - return s.gid -} - -func (s Stat) Rdev() uint64 { - return s.rdev -} - -func (s Stat) Size() int64 { - return s.size -} - -func (s Stat) Mtim() syscall.Timespec { - return s.mtim -} - -func (s Stat) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 47cebef5cf..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -package system - -import ( - "syscall" -) - -func fromStatT(s *syscall.Stat_t) (*Stat, error) { - return &Stat{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go deleted file mode 100644 index abcc8ea7a6..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package system - -import ( - "os" - "syscall" - "testing" -) - -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - - s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.Uid() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.Gid() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go deleted file mode 100644 index c4d53e6cd6..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux,!windows - -package system - -import ( - "syscall" -) - -func fromStatT(s *syscall.Stat_t) (*Stat, error) { - return &Stat{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 584e8940cc..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows - -package system - -import ( - "errors" - "syscall" -) - -func fromStatT(s *syscall.Win32FileAttributeData) (*Stat, error) { - return nil, errors.New("fromStatT should not be called on windows path") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index fddbecd390..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 3be563f89e..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package system - -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go deleted file mode 100644 index 4c6002fe8e..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,11 +0,0 @@ -package system - -import "syscall" - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index ceaa044c1c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,24 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 8f90298271..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - AT_FDCWD := -100 - AT_SYMLINK_NOFOLLOW := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go deleted file mode 100644 index 1dea47cc15..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{0, 0}, {0, 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index adf2734f27..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux,!freebsd,!darwin - -package system - -import "syscall" - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index 00edb201b5..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Returns a nil slice and nil error if the xattr is not set -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0060c167dc..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux - -package system - -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/MAINTAINERS deleted file mode 100644 index 6dde4769d7..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Cristian Staretu (@unclejack) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go deleted file mode 100644 index 8043d69d18..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go +++ /dev/null @@ -1,26 +0,0 @@ -package timeutils - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS deleted file mode 100644 index 96abeae570..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Victor Vieux (@vieux) -Jessie Frazelle (@jfrazelle) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go deleted file mode 100644 index cd33121496..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go +++ /dev/null @@ -1,31 +0,0 @@ -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.) -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%f years", d.Hours()/24/365) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go deleted file mode 100644 index a22947402b..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package units - -import ( - "testing" - "time" -) - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "8 weeks", HumanDuration(2*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month)) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go deleted file mode 100644 index 7cfb57ba51..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go +++ /dev/null @@ -1,91 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB") -func HumanSize(size float64) string { - return intToString(float64(size), 1000.0, decimapAbbrs) -} - -func BytesSize(size float64) string { - return intToString(size, 1024.0, binaryAbbrs) -} - -func intToString(size, unit float64, _map []string) string { - i := 0 - for size >= unit { - size = size / unit - i++ - } - return fmt.Sprintf("%.4g %s", size, _map[i]) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB") -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go deleted file mode 100644 index 67c3b81e6b..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, "32.3") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32.3Kb") - assertError(t, FromHumanSize, "32 mb") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, "32.3") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32.3Kb") - assertError(t, RAMInBytes, "32 mb") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go deleted file mode 100644 index cc802a654c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go +++ /dev/null @@ -1,63 +0,0 @@ -package version - -import ( - "strconv" - "strings" -) - -// Version provides utility methods for comparing versions. -type Version string - -func (v Version) compareTo(other Version) int { - var ( - currTab = strings.Split(string(v), ".") - otherTab = strings.Split(string(other), ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another version -func (v Version) LessThan(other Version) bool { - return v.compareTo(other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func (v Version) LessThanOrEqualTo(other Version) bool { - return v.compareTo(other) <= 0 -} - -// GreaterThan checks if a version is greater than another one -func (v Version) GreaterThan(other Version) bool { - return v.compareTo(other) == 1 -} - -// GreaterThanOrEqualTo checks ia version is greater than or equal to another -func (v Version) GreaterThanOrEqualTo(other Version) bool { - return v.compareTo(other) >= 0 -} - -// Equal checks if a version is equal to another -func (v Version) Equal(other Version) bool { - return v.compareTo(other) == 0 -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go deleted file mode 100644 index c02ec40fcb..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package version - -import ( - "testing" -) - -func assertVersion(t *testing.T, a, b string, result int) { - if r := Version(a).compareTo(Version(b)); r != result { - t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) - } -} - -func TestCompareVersion(t *testing.T) { - assertVersion(t, "1.12", "1.12", 0) - assertVersion(t, "1.0.0", "1", 0) - assertVersion(t, "1", "1.0.0", 0) - assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) - assertVersion(t, "1", "1.0.1", -1) - assertVersion(t, "1.0.1", "1", 1) - assertVersion(t, "1.0.1", "1.0.2", -1) - assertVersion(t, "1.0.2", "1.0.3", -1) - assertVersion(t, "1.0.3", "1.1", -1) - assertVersion(t, "1.1", "1.1.1", -1) - assertVersion(t, "1.1.1", "1.1.2", -1) - assertVersion(t, "1.1.2", "1.2", -1) - -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go b/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go deleted file mode 100644 index 871122ed59..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go +++ /dev/null @@ -1,36 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "strconv" -) - -func CreatePidFile(pidfile string) error { - if pidString, err := ioutil.ReadFile(pidfile); err == nil { - pid, err := strconv.Atoi(string(pidString)) - if err == nil { - if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil { - return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) - } - } - } - - file, err := os.Create(pidfile) - if err != nil { - return err - } - - defer file.Close() - - _, err = fmt.Fprintf(file, "%d", os.Getpid()) - return err -} - -func RemovePidFile(pidfile string) { - if err := os.Remove(pidfile); err != nil { - log.Printf("Error removing %s: %s", pidfile, err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/flags.go b/Godeps/_workspace/src/github.com/docker/docker/utils/flags.go deleted file mode 100644 index 33c72279bb..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/flags.go +++ /dev/null @@ -1,45 +0,0 @@ -package utils - -import ( - "fmt" - "os" - - flag "github.com/docker/docker/pkg/mflag" -) - -// ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls cmd.Parse(args) and prints a relevant error message if there are -// incorrect number of arguments. It returns error only if error handling is -// set to ContinueOnError and parsing fails. If error handling is set to -// ExitOnError, it's safe to ignore the return value. -// TODO: move this to a better package than utils -func ParseFlags(cmd *flag.FlagSet, args []string, withHelp bool) error { - var help *bool - if withHelp { - help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage") - } - if err := cmd.Parse(args); err != nil { - return err - } - if help != nil && *help { - cmd.Usage() - // just in case Usage does not exit - os.Exit(0) - } - if str := cmd.CheckArgs(); str != "" { - ReportError(cmd, str, withHelp) - } - return nil -} - -func ReportError(cmd *flag.FlagSet, str string, withHelp bool) { - if withHelp { - if os.Args[0] == cmd.Name() { - str += ". See '" + os.Args[0] + " --help'" - } else { - str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'" - } - } - fmt.Fprintf(cmd.Out(), "docker: %s.\n", str) - os.Exit(1) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/http.go b/Godeps/_workspace/src/github.com/docker/docker/utils/http.go deleted file mode 100644 index 24eaea56bc..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/http.go +++ /dev/null @@ -1,168 +0,0 @@ -package utils - -import ( - "io" - "net/http" - "strings" - - log "github.com/Sirupsen/logrus" -) - -// VersionInfo is used to model entities which has a version. -// It is basically a tupple with name and version. -type VersionInfo interface { - Name() string - Version() string -} - -func validVersion(version VersionInfo) bool { - const stopChars = " \t\r\n/" - name := version.Name() - vers := version.Version() - if len(name) == 0 || strings.ContainsAny(name, stopChars) { - return false - } - if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { - return false - } - return true -} - -// Convert versions to a string and append the string to the string base. -// -// Each VersionInfo will be converted to a string in the format of -// "product/version", where the "product" is get from the Name() method, while -// version is get from the Version() method. Several pieces of verson information -// will be concatinated and separated by space. -func appendVersions(base string, versions ...VersionInfo) string { - if len(versions) == 0 { - return base - } - - verstrs := make([]string, 0, 1+len(versions)) - if len(base) > 0 { - verstrs = append(verstrs, base) - } - - for _, v := range versions { - if !validVersion(v) { - continue - } - verstrs = append(verstrs, v.Name()+"/"+v.Version()) - } - return strings.Join(verstrs, " ") -} - -// HTTPRequestDecorator is used to change an instance of -// http.Request. It could be used to add more header fields, -// change body, etc. -type HTTPRequestDecorator interface { - // ChangeRequest() changes the request accordingly. - // The changed request will be returned or err will be non-nil - // if an error occur. - ChangeRequest(req *http.Request) (newReq *http.Request, err error) -} - -// HTTPUserAgentDecorator appends the product/version to the user agent field -// of a request. -type HTTPUserAgentDecorator struct { - versions []VersionInfo -} - -func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator { - return &HTTPUserAgentDecorator{ - versions: versions, - } -} - -func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { - if req == nil { - return req, nil - } - - userAgent := appendVersions(req.UserAgent(), h.versions...) - if len(userAgent) > 0 { - req.Header.Set("User-Agent", userAgent) - } - return req, nil -} - -type HTTPMetaHeadersDecorator struct { - Headers map[string][]string -} - -func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { - if h.Headers == nil { - return req, nil - } - for k, v := range h.Headers { - req.Header[k] = v - } - return req, nil -} - -type HTTPAuthDecorator struct { - login string - password string -} - -func NewHTTPAuthDecorator(login, password string) HTTPRequestDecorator { - return &HTTPAuthDecorator{ - login: login, - password: password, - } -} - -func (self *HTTPAuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) { - req.SetBasicAuth(self.login, self.password) - return req, nil -} - -// HTTPRequestFactory creates an HTTP request -// and applies a list of decorators on the request. -type HTTPRequestFactory struct { - decorators []HTTPRequestDecorator -} - -func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory { - return &HTTPRequestFactory{ - decorators: d, - } -} - -func (self *HTTPRequestFactory) AddDecorator(d ...HTTPRequestDecorator) { - self.decorators = append(self.decorators, d...) -} - -func (self *HTTPRequestFactory) GetDecorators() []HTTPRequestDecorator { - return self.decorators -} - -// NewRequest() creates a new *http.Request, -// applies all decorators in the HTTPRequestFactory on the request, -// then applies decorators provided by d on the request. -func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - - // By default, a nil factory should work. - if h == nil { - return req, nil - } - for _, dec := range h.decorators { - req, err = dec.ChangeRequest(req) - if err != nil { - return nil, err - } - } - for _, dec := range d { - req, err = dec.ChangeRequest(req) - if err != nil { - return nil, err - } - } - log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) - return req, err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go b/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go deleted file mode 100644 index 74d3112719..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go +++ /dev/null @@ -1,172 +0,0 @@ -package utils - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/pkg/timeutils" - "github.com/docker/docker/pkg/units" -) - -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -type JSONProgress struct { - terminalFd uintptr - Current int `json:"current,omitempty"` - Total int `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` -} - -func (p *JSONProgress) String() string { - var ( - width = 200 - pbBox string - numbersBox string - timeLeftBox string - ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - - if p.Current <= 0 && p.Total <= 0 { - return "" - } - current := units.HumanSize(float64(p.Current)) - if p.Total <= 0 { - return fmt.Sprintf("%8v", current) - } - total := units.HumanSize(float64(p.Total)) - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negetive gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated -} - -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - // [2K = erase entire current line - fmt.Fprintf(out, "%c[2K\r", 27) - endl = "\r" - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - diff = 0 - ) - for { - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - line = len(ids) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - diff = 0 - } else { - diff = len(ids) - line - } - if jm.ID != "" && isTerminal { - // [{diff}A = move cursor up diff rows - fmt.Fprintf(out, "%c[%dA", 27, diff) - } - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - // [{diff}B = move cursor down diff rows - fmt.Fprintf(out, "%c[%dB", 27, diff) - } - if err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go deleted file mode 100644 index b9103da1a4..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestError(t *testing.T) { - je := JSONError{404, "Not found"} - if je.Error() != "Not found" { - t.Fatalf("Expected 'Not found' got '%s'", je.Error()) - } -} - -func TestProgress(t *testing.T) { - jp := JSONProgress{} - if jp.String() != "" { - t.Fatalf("Expected empty string, got '%s'", jp.String()) - } - - expected := " 1 B" - jp2 := JSONProgress{Current: 1} - if jp2.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp2.String()) - } - - expected = "[=========================> ] 50 B/100 B" - jp3 := JSONProgress{Current: 50, Total: 100} - if jp3.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp3.String()) - } - - // this number can't be negetive gh#7136 - expected = "[==================================================>] 50 B/40 B" - jp4 := JSONProgress{Current: 50, Total: 40} - if jp4.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp4.String()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/progressreader.go b/Godeps/_workspace/src/github.com/docker/docker/utils/progressreader.go deleted file mode 100644 index 87eae8ba73..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/progressreader.go +++ /dev/null @@ -1,55 +0,0 @@ -package utils - -import ( - "io" - "time" -) - -// Reader with progress bar -type progressReader struct { - reader io.ReadCloser // Stream to read from - output io.Writer // Where to send progress bar to - progress JSONProgress - lastUpdate int // How many bytes read at least update - ID string - action string - sf *StreamFormatter - newLine bool -} - -func (r *progressReader) Read(p []byte) (n int, err error) { - read, err := r.reader.Read(p) - r.progress.Current += read - updateEvery := 1024 * 512 //512kB - if r.progress.Total > 0 { - // Update progress for every 1% read if 1% < 512kB - if increment := int(0.01 * float64(r.progress.Total)); increment < updateEvery { - updateEvery = increment - } - } - if r.progress.Current-r.lastUpdate > updateEvery || err != nil { - r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) - r.lastUpdate = r.progress.Current - } - // Send newline when complete - if r.newLine && err != nil && read == 0 { - r.output.Write(r.sf.FormatStatus("", "")) - } - return read, err -} -func (r *progressReader) Close() error { - r.progress.Current = r.progress.Total - r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) - return r.reader.Close() -} -func ProgressReader(r io.ReadCloser, size int, output io.Writer, sf *StreamFormatter, newline bool, ID, action string) *progressReader { - return &progressReader{ - reader: r, - output: NewWriteFlusher(output), - ID: ID, - action: action, - progress: JSONProgress{Total: size, Start: time.Now().UTC().Unix()}, - sf: sf, - newLine: newline, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/random.go b/Godeps/_workspace/src/github.com/docker/docker/utils/random.go deleted file mode 100644 index 907f28eec3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/random.go +++ /dev/null @@ -1,16 +0,0 @@ -package utils - -import ( - "crypto/rand" - "encoding/hex" - "io" -) - -func RandomString() string { - id := make([]byte, 32) - - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - return hex.EncodeToString(id) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go b/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go deleted file mode 100644 index d0bc295bb3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go +++ /dev/null @@ -1,112 +0,0 @@ -package utils - -import ( - "encoding/json" - "fmt" - "io" -) - -type StreamFormatter struct { - json bool -} - -func NewStreamFormatter(json bool) *StreamFormatter { - return &StreamFormatter{json} -} - -const streamNewline = "\r\n" - -var streamNewlineBytes = []byte(streamNewline) - -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") -} - -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + streamNewline) -} - -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*JSONError) - if !ok { - jsonError = &JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) - } - return []byte("Error: " + err.Error() + streamNewline) -} - -func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte { - if progress == nil { - progress = &JSONProgress{} - } - if sf.json { - - b, err := json.Marshal(&JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - }) - if err != nil { - return nil - } - return b - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -func (sf *StreamFormatter) Json() bool { - return sf.json -} - -type StdoutFormater struct { - io.Writer - *StreamFormatter -} - -func (sf *StdoutFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -type StderrFormater struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go deleted file mode 100644 index 20610f6c01..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package utils - -import ( - "encoding/json" - "errors" - "reflect" - "testing" -) - -func TestFormatStream(t *testing.T) { - sf := NewStreamFormatter(true) - res := sf.FormatStream("stream") - if string(res) != `{"stream":"stream"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatStatus(t *testing.T) { - sf := NewStreamFormatter(true) - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatSimpleError(t *testing.T) { - sf := NewStreamFormatter(true) - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatJSONError(t *testing.T) { - sf := NewStreamFormatter(true) - err := &JSONError{Code: 50, Message: "Json error"} - res := sf.FormatError(err) - if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatProgress(t *testing.T) { - sf := NewStreamFormatter(true) - progress := &JSONProgress{ - Current: 15, - Total: 30, - Start: 1, - } - res := sf.FormatProgress("id", "action", progress) - msg := &JSONMessage{} - if err := json.Unmarshal(res, msg); err != nil { - t.Fatal(err) - } - if msg.ID != "id" { - t.Fatalf("ID must be 'id', got: %s", msg.ID) - } - if msg.Status != "action" { - t.Fatalf("Status must be 'action', got: %s", msg.Status) - } - if msg.ProgressMessage != progress.String() { - t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) - } - if !reflect.DeepEqual(msg.Progress, progress) { - t.Fatal("Original progress not equals progress from FormatProgress") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn.go b/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn.go deleted file mode 100644 index a3231c7ee3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "net" - "time" -) - -func NewTimeoutConn(conn net.Conn, timeout time.Duration) net.Conn { - return &TimeoutConn{conn, timeout} -} - -// A net.Conn that sets a deadline for every Read or Write operation -type TimeoutConn struct { - net.Conn - timeout time.Duration -} - -func (c *TimeoutConn) Read(b []byte) (int, error) { - if c.timeout > 0 { - err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)) - if err != nil { - return 0, err - } - } - return c.Conn.Read(b) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn_test.go deleted file mode 100644 index d07b96cc06..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package utils - -import ( - "bufio" - "fmt" - "net" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestTimeoutConnRead(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "hello") - })) - defer ts.Close() - conn, err := net.Dial("tcp", ts.URL[7:]) - if err != nil { - t.Fatalf("failed to create connection to %q: %v", ts.URL, err) - } - tconn := NewTimeoutConn(conn, 1*time.Second) - - if _, err = bufio.NewReader(tconn).ReadString('\n'); err == nil { - t.Fatalf("expected timeout error, got none") - } - if _, err := fmt.Fprintf(tconn, "GET / HTTP/1.0\r\n\r\n"); err != nil { - t.Errorf("unexpected error: %v", err) - } - if _, err = bufio.NewReader(tconn).ReadString('\n'); err != nil { - t.Errorf("unexpected error: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go b/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go deleted file mode 100644 index e200f340db..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go +++ /dev/null @@ -1,16 +0,0 @@ -package utils - -import ( - "os" - "path/filepath" -) - -// TempDir returns the default directory to use for temporary files. -func TempDir(rootDir string) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - } - err := os.MkdirAll(tmpDir, 0700) - return tmpDir, err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go deleted file mode 100644 index 2447d11f11..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go +++ /dev/null @@ -1,547 +0,0 @@ -package utils - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/ioutils" -) - -type KeyValuePair struct { - Key string - Value string -} - -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -// Request a given URL and return an io.Reader -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -func Trunc(s string, maxlen int) string { - if len(s) <= maxlen { - return s - } - return s[:maxlen] -} - -// Figure out the absolute path of our own binary (if it's still around). -func SelfPath() string { - path, err := exec.LookPath(os.Args[0]) - if err != nil { - if os.IsNotExist(err) { - return "" - } - if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { - return "" - } - panic(err) - } - path, err = filepath.Abs(path) - if err != nil { - if os.IsNotExist(err) { - return "" - } - panic(err) - } - return path -} - -func dockerInitSha1(target string) string { - f, err := os.Open(target) - if err != nil { - return "" - } - defer f.Close() - h := sha1.New() - _, err = io.Copy(h, f) - if err != nil { - return "" - } - return hex.EncodeToString(h.Sum(nil)) -} - -func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) - if target == "" { - return false - } - if dockerversion.IAMSTATIC == "true" { - if selfPath == "" { - return false - } - if target == selfPath { - return true - } - targetFileInfo, err := os.Lstat(target) - if err != nil { - return false - } - selfPathFileInfo, err := os.Lstat(selfPath) - if err != nil { - return false - } - return os.SameFile(targetFileInfo, selfPathFileInfo) - } - return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 -} - -// Figure out the path of our dockerinit (which may be SelfPath()) -func DockerInitPath(localCopy string) string { - selfPath := SelfPath() - if isValidDockerInitPath(selfPath, selfPath) { - // if we're valid, don't bother checking anything else - return selfPath - } - var possibleInits = []string{ - localCopy, - dockerversion.INITPATH, - filepath.Join(filepath.Dir(selfPath), "dockerinit"), - - // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." - // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec - "/usr/libexec/docker/dockerinit", - "/usr/local/libexec/docker/dockerinit", - - // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." - // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA - "/usr/lib/docker/dockerinit", - "/usr/local/lib/docker/dockerinit", - } - for _, dockerInit := range possibleInits { - if dockerInit == "" { - continue - } - path, err := exec.LookPath(dockerInit) - if err == nil { - path, err = filepath.Abs(path) - if err != nil { - // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? - panic(err) - } - if isValidDockerInitPath(path, selfPath) { - return path - } - } - } - return "" -} - -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a langer prefix, or the full-length Id. -func TruncateID(id string) string { - shortLen := 12 - if len(id) < shortLen { - shortLen = len(id) - } - return id[:shortLen] -} - -// GenerateRandomID returns an unique id -func GenerateRandomID() string { - for { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - value := hex.EncodeToString(id) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numberic and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { - continue - } - return value - } -} - -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - err := fmt.Errorf("image ID '%s' is invalid", id) - return err - } - return nil -} - -// Code c/c from io.Copy() modified to handle escape sequence -func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - // char 16 is C-p - if nr == 1 && buf[0] == 16 { - nr, er = src.Read(buf) - // char 17 is C-q - if nr == 1 && buf[0] == 17 { - if err := src.Close(); err != nil { - return 0, err - } - return 0, nil - } - } - // ---- End of docker - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -type WriteFlusher struct { - sync.Mutex - w io.Writer - flusher http.Flusher -} - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() - n, err = wf.w.Write(b) - wf.flusher.Flush() - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() - wf.flusher.Flush() -} - -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f - } else { - flusher = &ioutils.NopFlusher{} - } - return &WriteFlusher{w: w, flusher: flusher} -} - -func NewHTTPRequestError(msg string, res *http.Response) error { - return &JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// An StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e *StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} - -func quote(word string, buf *bytes.Buffer) { - // Bail out early for "simple" strings - if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { - buf.WriteString(word) - return - } - - buf.WriteString("'") - - for i := 0; i < len(word); i++ { - b := word[i] - if b == '\'' { - // Replace literal ' with a close ', a \', and a open ' - buf.WriteString("'\\''") - } else { - buf.WriteByte(b) - } - } - - buf.WriteString("'") -} - -// Take a list of strings and escape them so they will be handled right -// when passed as arguments to an program via a shell -func ShellQuoteArguments(args []string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} - -var globalTestID string - -// TestDirectory creates a new temporary directory and returns its path. -// The contents of directory at path `templateDir` is copied into the -// new directory. -func TestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = RandomString()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if templateDir != "" { - if err = archive.CopyWithTar(templateDir, dir); err != nil { - return - } - } - return -} - -// GetCallerName introspects the call stack and returns the name of the -// function `depth` levels down in the stack. -func GetCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -func CopyFile(src, dst string) (int64, error) { - if src == dst { - return 0, nil - } - sf, err := os.Open(src) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(dst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReplaceOrAppendValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - defaults[i] = value - } else { - defaults = append(defaults, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) - i-- - } - } - - return defaults -} - -func DoesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -func StringsContainsNoCase(slice []string, s string) bool { - for _, ss := range slice { - if strings.ToLower(s) == strings.ToLower(ss) { - return true - } - } - return false -} - -// Reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadDockerIgnore(path string) ([]string, error) { - // Note that a missing .dockerignore file isn't treated as an error - reader, err := os.Open(path) - if err != nil { - if !os.IsNotExist(err) { - return nil, fmt.Errorf("Error reading '%s': %v", path, err) - } - return nil, nil - } - defer reader.Close() - - scanner := bufio.NewScanner(reader) - var excludes []string - - for scanner.Scan() { - pattern := strings.TrimSpace(scanner.Text()) - if pattern == "" { - continue - } - pattern = filepath.Clean(pattern) - excludes = append(excludes, pattern) - } - if err = scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading '%s': %v", path, err) - } - return excludes, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go deleted file mode 100644 index 9989f05e31..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build daemon - -package utils - -import ( - "os" - "path/filepath" - "syscall" -) - -// TreeSize walks a directory tree and returns its total size in bytes. -func TreeSize(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} - -// IsFileOwner checks whether the current user is the owner of the given file. -func IsFileOwner(f string) bool { - if fileInfo, err := os.Stat(f); err == nil && fileInfo != nil { - if stat, ok := fileInfo.Sys().(*syscall.Stat_t); ok && int(stat.Uid) == os.Getuid() { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go deleted file mode 100644 index ce304482b8..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package utils - -import ( - "os" - "testing" -) - -func TestReplaceAndAppendEnvVars(t *testing.T) { - var ( - d = []string{"HOME=/"} - o = []string{"HOME=/root", "TERM=xterm"} - ) - - env := ReplaceOrAppendEnvValues(d, o) - if len(env) != 2 { - t.Fatalf("expected len of 2 got %d", len(env)) - } - if env[0] != "HOME=/root" { - t.Fatalf("expected HOME=/root got '%s'", env[0]) - } - if env[1] != "TERM=xterm" { - t.Fatalf("expected TERM=xterm got '%s'", env[1]) - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go deleted file mode 100644 index e363aa793e..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tar implements access to tar archives. -// It aims to cover most of the variations, including those produced -// by GNU and BSD tars. -// -// References: -// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 -// http://www.gnu.org/software/tar/manual/html_node/Standard.html -// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html -package tar - -import ( - "bytes" - "errors" - "fmt" - "os" - "path" - "time" -) - -const ( - blockSize = 512 - - // Types - TypeReg = '0' // regular file - TypeRegA = '\x00' // regular file - TypeLink = '1' // hard link - TypeSymlink = '2' // symbolic link - TypeChar = '3' // character device node - TypeBlock = '4' // block device node - TypeDir = '5' // directory - TypeFifo = '6' // fifo node - TypeCont = '7' // reserved - TypeXHeader = 'x' // extended header - TypeXGlobalHeader = 'g' // global extended header - TypeGNULongName = 'L' // Next file has a long name - TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name - TypeGNUSparse = 'S' // sparse file -) - -// A Header represents a single header in a tar archive. -// Some fields may not be populated. -type Header struct { - Name string // name of header file entry - Mode int64 // permission and mode bits - Uid int // user id of owner - Gid int // group id of owner - Size int64 // length in bytes - ModTime time.Time // modified time - Typeflag byte // type of header entry - Linkname string // target name of link - Uname string // user name of owner - Gname string // group name of owner - Devmajor int64 // major number of character or block device - Devminor int64 // minor number of character or block device - AccessTime time.Time // access time - ChangeTime time.Time // status change time - Xattrs map[string]string -} - -// File name constants from the tar spec. -const ( - fileNameSize = 100 // Maximum number of bytes in a standard tar name. - fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. -) - -// FileInfo returns an os.FileInfo for the Header. -func (h *Header) FileInfo() os.FileInfo { - return headerFileInfo{h} -} - -// headerFileInfo implements os.FileInfo. -type headerFileInfo struct { - h *Header -} - -func (fi headerFileInfo) Size() int64 { return fi.h.Size } -func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } -func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } -func (fi headerFileInfo) Sys() interface{} { return fi.h } - -// Name returns the base name of the file. -func (fi headerFileInfo) Name() string { - if fi.IsDir() { - return path.Base(path.Clean(fi.h.Name)) - } - return path.Base(fi.h.Name) -} - -// Mode returns the permission and mode bits for the headerFileInfo. -func (fi headerFileInfo) Mode() (mode os.FileMode) { - // Set file permission bits. - mode = os.FileMode(fi.h.Mode).Perm() - - // Set setuid, setgid and sticky bits. - if fi.h.Mode&c_ISUID != 0 { - // setuid - mode |= os.ModeSetuid - } - if fi.h.Mode&c_ISGID != 0 { - // setgid - mode |= os.ModeSetgid - } - if fi.h.Mode&c_ISVTX != 0 { - // sticky - mode |= os.ModeSticky - } - - // Set file mode bits. - // clear perm, setuid, setgid and sticky bits. - m := os.FileMode(fi.h.Mode) &^ 07777 - if m == c_ISDIR { - // directory - mode |= os.ModeDir - } - if m == c_ISFIFO { - // named pipe (FIFO) - mode |= os.ModeNamedPipe - } - if m == c_ISLNK { - // symbolic link - mode |= os.ModeSymlink - } - if m == c_ISBLK { - // device file - mode |= os.ModeDevice - } - if m == c_ISCHR { - // Unix character device - mode |= os.ModeDevice - mode |= os.ModeCharDevice - } - if m == c_ISSOCK { - // Unix domain socket - mode |= os.ModeSocket - } - - switch fi.h.Typeflag { - case TypeLink, TypeSymlink: - // hard link, symbolic link - mode |= os.ModeSymlink - case TypeChar: - // character device node - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case TypeBlock: - // block device node - mode |= os.ModeDevice - case TypeDir: - // directory - mode |= os.ModeDir - case TypeFifo: - // fifo node - mode |= os.ModeNamedPipe - } - - return mode -} - -// sysStat, if non-nil, populates h from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, h *Header) error - -// Mode constants from the tar spec. -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -// Keywords for the PAX Extended Header -const ( - paxAtime = "atime" - paxCharset = "charset" - paxComment = "comment" - paxCtime = "ctime" // please note that ctime is not a valid pax header. - paxGid = "gid" - paxGname = "gname" - paxLinkpath = "linkpath" - paxMtime = "mtime" - paxPath = "path" - paxSize = "size" - paxUid = "uid" - paxUname = "uname" - paxXattr = "SCHILY.xattr." - paxNone = "" -) - -// FileInfoHeader creates a partially-populated Header from fi. -// If fi describes a symlink, FileInfoHeader records link as the link target. -// If fi describes a directory, a slash is appended to the name. -// Because os.FileInfo's Name method returns only the base name of -// the file it describes, it may be necessary to modify the Name field -// of the returned header to provide the full path name of the file. -func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { - if fi == nil { - return nil, errors.New("tar: FileInfo is nil") - } - fm := fi.Mode() - h := &Header{ - Name: fi.Name(), - ModTime: fi.ModTime(), - Mode: int64(fm.Perm()), // or'd with c_IS* constants later - } - switch { - case fm.IsRegular(): - h.Mode |= c_ISREG - h.Typeflag = TypeReg - h.Size = fi.Size() - case fi.IsDir(): - h.Typeflag = TypeDir - h.Mode |= c_ISDIR - h.Name += "/" - case fm&os.ModeSymlink != 0: - h.Typeflag = TypeSymlink - h.Mode |= c_ISLNK - h.Linkname = link - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - h.Mode |= c_ISCHR - h.Typeflag = TypeChar - } else { - h.Mode |= c_ISBLK - h.Typeflag = TypeBlock - } - case fm&os.ModeNamedPipe != 0: - h.Typeflag = TypeFifo - h.Mode |= c_ISFIFO - case fm&os.ModeSocket != 0: - h.Mode |= c_ISSOCK - default: - return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) - } - if fm&os.ModeSetuid != 0 { - h.Mode |= c_ISUID - } - if fm&os.ModeSetgid != 0 { - h.Mode |= c_ISGID - } - if fm&os.ModeSticky != 0 { - h.Mode |= c_ISVTX - } - if sysStat != nil { - return h, sysStat(fi, h) - } - return h, nil -} - -var zeroBlock = make([]byte, blockSize) - -// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. -// We compute and return both. -func checksum(header []byte) (unsigned int64, signed int64) { - for i := 0; i < len(header); i++ { - if i == 148 { - // The chksum field (header[148:156]) is special: it should be treated as space bytes. - unsigned += ' ' * 8 - signed += ' ' * 8 - i += 7 - continue - } - unsigned += int64(header[i]) - signed += int64(int8(header[i])) - } - return -} - -type slicer []byte - -func (sp *slicer) next(n int) (b []byte) { - s := *sp - b, *sp = s[0:n], s[n:] - return -} - -func isASCII(s string) bool { - for _, c := range s { - if c >= 0x80 { - return false - } - } - return true -} - -func toASCII(s string) string { - if isASCII(s) { - return s - } - var buf bytes.Buffer - for _, c := range s { - if c < 0x80 { - buf.WriteByte(byte(c)) - } - } - return buf.String() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go deleted file mode 100644 index 351eaa0e6c..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar_test - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "log" - "os" -) - -func Example() { - // Create a buffer to write our archive to. - buf := new(bytes.Buffer) - - // Create a new tar archive. - tw := tar.NewWriter(buf) - - // Add some files to the archive. - var files = []struct { - Name, Body string - }{ - {"readme.txt", "This archive contains some text files."}, - {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, - {"todo.txt", "Get animal handling licence."}, - } - for _, file := range files { - hdr := &tar.Header{ - Name: file.Name, - Size: int64(len(file.Body)), - } - if err := tw.WriteHeader(hdr); err != nil { - log.Fatalln(err) - } - if _, err := tw.Write([]byte(file.Body)); err != nil { - log.Fatalln(err) - } - } - // Make sure to check the error on Close. - if err := tw.Close(); err != nil { - log.Fatalln(err) - } - - // Open the tar archive for reading. - r := bytes.NewReader(buf.Bytes()) - tr := tar.NewReader(r) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - log.Fatalln(err) - } - fmt.Printf("Contents of %s:\n", hdr.Name) - if _, err := io.Copy(os.Stdout, tr); err != nil { - log.Fatalln(err) - } - fmt.Println() - } - - // Output: - // Contents of readme.txt: - // This archive contains some text files. - // Contents of gopher.txt: - // Gopher names: - // George - // Geoffrey - // Gonzo - // Contents of todo.txt: - // Get animal handling licence. -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go deleted file mode 100644 index a27559d0f0..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go +++ /dev/null @@ -1,820 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -// TODO(dsymonds): -// - pax extensions - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "os" - "strconv" - "strings" - "time" -) - -var ( - ErrHeader = errors.New("archive/tar: invalid tar header") -) - -const maxNanoSecondIntSize = 9 - -// A Reader provides sequential access to the contents of a tar archive. -// A tar archive consists of a sequence of files. -// The Next method advances to the next file in the archive (including the first), -// and then it can be treated as an io.Reader to access the file's data. -type Reader struct { - r io.Reader - err error - pad int64 // amount of padding (ignored) after current file entry - curr numBytesReader // reader for current file entry - hdrBuff [blockSize]byte // buffer to use in readHeader -} - -// A numBytesReader is an io.Reader with a numBytes method, returning the number -// of bytes remaining in the underlying encoded data. -type numBytesReader interface { - io.Reader - numBytes() int64 -} - -// A regFileReader is a numBytesReader for reading file data from a tar archive. -type regFileReader struct { - r io.Reader // underlying reader - nb int64 // number of unread bytes for current file entry -} - -// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive. -type sparseFileReader struct { - rfr *regFileReader // reads the sparse-encoded file data - sp []sparseEntry // the sparse map for the file - pos int64 // keeps track of file position - tot int64 // total size of the file -} - -// Keywords for GNU sparse files in a PAX extended header -const ( - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - -// Keywords for old GNU sparse headers -const ( - oldGNUSparseMainHeaderOffset = 386 - oldGNUSparseMainHeaderIsExtendedOffset = 482 - oldGNUSparseMainHeaderNumEntries = 4 - oldGNUSparseExtendedHeaderIsExtendedOffset = 504 - oldGNUSparseExtendedHeaderNumEntries = 21 - oldGNUSparseOffsetSize = 12 - oldGNUSparseNumBytesSize = 12 -) - -// NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { return &Reader{r: r} } - -// Next advances to the next entry in the tar archive. -func (tr *Reader) Next() (*Header, error) { - var hdr *Header - if tr.err == nil { - tr.skipUnread() - } - if tr.err != nil { - return hdr, tr.err - } - hdr = tr.readHeader() - if hdr == nil { - return hdr, tr.err - } - // Check for PAX/GNU header. - switch hdr.Typeflag { - case TypeXHeader: - // PAX extended header - headers, err := parsePAX(tr) - if err != nil { - return nil, err - } - // We actually read the whole file, - // but this skips alignment padding - tr.skipUnread() - hdr = tr.readHeader() - mergePAX(hdr, headers) - - // Check for a PAX format sparse file - sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers) - if err != nil { - tr.err = err - return nil, err - } - if sp != nil { - // Current file is a PAX format GNU sparse file. - // Set the current file reader to a sparse file reader. - tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} - } - return hdr, nil - case TypeGNULongName: - // We have a GNU long name header. Its contents are the real file name. - realname, err := ioutil.ReadAll(tr) - if err != nil { - return nil, err - } - hdr, err := tr.Next() - hdr.Name = cString(realname) - return hdr, err - case TypeGNULongLink: - // We have a GNU long link header. - realname, err := ioutil.ReadAll(tr) - if err != nil { - return nil, err - } - hdr, err := tr.Next() - hdr.Linkname = cString(realname) - return hdr, err - } - return hdr, tr.err -} - -// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then -// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to -// be treated as a regular file. -func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { - var sparseFormat string - - // Check for sparse format indicators - major, majorOk := headers[paxGNUSparseMajor] - minor, minorOk := headers[paxGNUSparseMinor] - sparseName, sparseNameOk := headers[paxGNUSparseName] - _, sparseMapOk := headers[paxGNUSparseMap] - sparseSize, sparseSizeOk := headers[paxGNUSparseSize] - sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] - - // Identify which, if any, sparse format applies from which PAX headers are set - if majorOk && minorOk { - sparseFormat = major + "." + minor - } else if sparseNameOk && sparseMapOk { - sparseFormat = "0.1" - } else if sparseSizeOk { - sparseFormat = "0.0" - } else { - // Not a PAX format GNU sparse file. - return nil, nil - } - - // Check for unknown sparse format - if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { - return nil, nil - } - - // Update hdr from GNU sparse PAX headers - if sparseNameOk { - hdr.Name = sparseName - } - if sparseSizeOk { - realSize, err := strconv.ParseInt(sparseSize, 10, 0) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize - } else if sparseRealSizeOk { - realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize - } - - // Set up the sparse map, according to the particular sparse format in use - var sp []sparseEntry - var err error - switch sparseFormat { - case "0.0", "0.1": - sp, err = readGNUSparseMap0x1(headers) - case "1.0": - sp, err = readGNUSparseMap1x0(tr.curr) - } - return sp, err -} - -// mergePAX merges well known headers according to PAX standard. -// In general headers with the same name as those found -// in the header struct overwrite those found in the header -// struct with higher precision or longer values. Esp. useful -// for name and linkname fields. -func mergePAX(hdr *Header, headers map[string]string) error { - for k, v := range headers { - switch k { - case paxPath: - hdr.Name = v - case paxLinkpath: - hdr.Linkname = v - case paxGname: - hdr.Gname = v - case paxUname: - hdr.Uname = v - case paxUid: - uid, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Uid = int(uid) - case paxGid: - gid, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Gid = int(gid) - case paxAtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.AccessTime = t - case paxMtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.ModTime = t - case paxCtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.ChangeTime = t - case paxSize: - size, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Size = int64(size) - default: - if strings.HasPrefix(k, paxXattr) { - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) - } - hdr.Xattrs[k[len(paxXattr):]] = v - } - } - } - return nil -} - -// parsePAXTime takes a string of the form %d.%d as described in -// the PAX specification. -func parsePAXTime(t string) (time.Time, error) { - buf := []byte(t) - pos := bytes.IndexByte(buf, '.') - var seconds, nanoseconds int64 - var err error - if pos == -1 { - seconds, err = strconv.ParseInt(t, 10, 0) - if err != nil { - return time.Time{}, err - } - } else { - seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) - if err != nil { - return time.Time{}, err - } - nano_buf := string(buf[pos+1:]) - // Pad as needed before converting to a decimal. - // For example .030 -> .030000000 -> 30000000 nanoseconds - if len(nano_buf) < maxNanoSecondIntSize { - // Right pad - nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) - } else if len(nano_buf) > maxNanoSecondIntSize { - // Right truncate - nano_buf = nano_buf[:maxNanoSecondIntSize] - } - nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) - if err != nil { - return time.Time{}, err - } - } - ts := time.Unix(seconds, nanoseconds) - return ts, nil -} - -// parsePAX parses PAX headers. -// If an extended header (type 'x') is invalid, ErrHeader is returned -func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - // For GNU PAX sparse format 0.0 support. - // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. - var sparseMap bytes.Buffer - - headers := make(map[string]string) - // Each record is constructed as - // "%d %s=%s\n", length, keyword, value - for len(buf) > 0 { - // or the header was empty to start with. - var sp int - // The size field ends at the first space. - sp = bytes.IndexByte(buf, ' ') - if sp == -1 { - return nil, ErrHeader - } - // Parse the first token as a decimal integer. - n, err := strconv.ParseInt(string(buf[:sp]), 10, 0) - if err != nil { - return nil, ErrHeader - } - // Extract everything between the decimal and the n -1 on the - // beginning to eat the ' ', -1 on the end to skip the newline. - var record []byte - record, buf = buf[sp+1:n-1], buf[n:] - // The first equals is guaranteed to mark the end of the key. - // Everything else is value. - eq := bytes.IndexByte(record, '=') - if eq == -1 { - return nil, ErrHeader - } - key, value := record[:eq], record[eq+1:] - - keyStr := string(key) - if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { - // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. - sparseMap.Write(value) - sparseMap.Write([]byte{','}) - } else { - // Normal key. Set the value in the headers map. - headers[keyStr] = string(value) - } - } - if sparseMap.Len() != 0 { - // Add sparse info to headers, chopping off the extra comma - sparseMap.Truncate(sparseMap.Len() - 1) - headers[paxGNUSparseMap] = sparseMap.String() - } - return headers, nil -} - -// cString parses bytes as a NUL-terminated C-style string. -// If a NUL byte is not found then the whole slice is returned as a string. -func cString(b []byte) string { - n := 0 - for n < len(b) && b[n] != 0 { - n++ - } - return string(b[0:n]) -} - -func (tr *Reader) octal(b []byte) int64 { - // Check for binary format first. - if len(b) > 0 && b[0]&0x80 != 0 { - var x int64 - for i, c := range b { - if i == 0 { - c &= 0x7f // ignore signal bit in first byte - } - x = x<<8 | int64(c) - } - return x - } - - // Because unused fields are filled with NULs, we need - // to skip leading NULs. Fields may also be padded with - // spaces or NULs. - // So we remove leading and trailing NULs and spaces to - // be sure. - b = bytes.Trim(b, " \x00") - - if len(b) == 0 { - return 0 - } - x, err := strconv.ParseUint(cString(b), 8, 64) - if err != nil { - tr.err = err - } - return int64(x) -} - -// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. -func (tr *Reader) skipUnread() { - nr := tr.numBytes() + tr.pad // number of bytes to skip - tr.curr, tr.pad = nil, 0 - if sr, ok := tr.r.(io.Seeker); ok { - if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { - return - } - } - _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr) -} - -func (tr *Reader) verifyChecksum(header []byte) bool { - if tr.err != nil { - return false - } - - given := tr.octal(header[148:156]) - unsigned, signed := checksum(header) - return given == unsigned || given == signed -} - -func (tr *Reader) readHeader() *Header { - header := tr.hdrBuff[:] - copy(header, zeroBlock) - - if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { - return nil - } - - // Two blocks of zero bytes marks the end of the archive. - if bytes.Equal(header, zeroBlock[0:blockSize]) { - if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { - return nil - } - if bytes.Equal(header, zeroBlock[0:blockSize]) { - tr.err = io.EOF - } else { - tr.err = ErrHeader // zero block and then non-zero block - } - return nil - } - - if !tr.verifyChecksum(header) { - tr.err = ErrHeader - return nil - } - - // Unpack - hdr := new(Header) - s := slicer(header) - - hdr.Name = cString(s.next(100)) - hdr.Mode = tr.octal(s.next(8)) - hdr.Uid = int(tr.octal(s.next(8))) - hdr.Gid = int(tr.octal(s.next(8))) - hdr.Size = tr.octal(s.next(12)) - hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0) - s.next(8) // chksum - hdr.Typeflag = s.next(1)[0] - hdr.Linkname = cString(s.next(100)) - - // The remainder of the header depends on the value of magic. - // The original (v7) version of tar had no explicit magic field, - // so its magic bytes, like the rest of the block, are NULs. - magic := string(s.next(8)) // contains version field as well. - var format string - switch { - case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) - if string(header[508:512]) == "tar\x00" { - format = "star" - } else { - format = "posix" - } - case magic == "ustar \x00": // old GNU tar - format = "gnu" - } - - switch format { - case "posix", "gnu", "star": - hdr.Uname = cString(s.next(32)) - hdr.Gname = cString(s.next(32)) - devmajor := s.next(8) - devminor := s.next(8) - if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { - hdr.Devmajor = tr.octal(devmajor) - hdr.Devminor = tr.octal(devminor) - } - var prefix string - switch format { - case "posix", "gnu": - prefix = cString(s.next(155)) - case "star": - prefix = cString(s.next(131)) - hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0) - hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0) - } - if len(prefix) > 0 { - hdr.Name = prefix + "/" + hdr.Name - } - } - - if tr.err != nil { - tr.err = ErrHeader - return nil - } - - // Maximum value of hdr.Size is 64 GB (12 octal digits), - // so there's no risk of int64 overflowing. - nb := int64(hdr.Size) - tr.pad = -nb & (blockSize - 1) // blockSize is a power of two - - // Set the current file reader. - tr.curr = ®FileReader{r: tr.r, nb: nb} - - // Check for old GNU sparse format entry. - if hdr.Typeflag == TypeGNUSparse { - // Get the real size of the file. - hdr.Size = tr.octal(header[483:495]) - - // Read the sparse map. - sp := tr.readOldGNUSparseMap(header) - if tr.err != nil { - return nil - } - // Current file is a GNU sparse file. Update the current file reader. - tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} - } - - return hdr -} - -// A sparseEntry holds a single entry in a sparse file's sparse map. -// A sparse entry indicates the offset and size in a sparse file of a -// block of data. -type sparseEntry struct { - offset int64 - numBytes int64 -} - -// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. -// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, -// then one or more extension headers are used to store the rest of the sparse map. -func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { - isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 - spCap := oldGNUSparseMainHeaderNumEntries - if isExtended { - spCap += oldGNUSparseExtendedHeaderNumEntries - } - sp := make([]sparseEntry, 0, spCap) - s := slicer(header[oldGNUSparseMainHeaderOffset:]) - - // Read the four entries from the main tar header - for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { - offset := tr.octal(s.next(oldGNUSparseOffsetSize)) - numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) - if tr.err != nil { - tr.err = ErrHeader - return nil - } - if offset == 0 && numBytes == 0 { - break - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - - for isExtended { - // There are more entries. Read an extension header and parse its entries. - sparseHeader := make([]byte, blockSize) - if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { - return nil - } - isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 - s = slicer(sparseHeader) - for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { - offset := tr.octal(s.next(oldGNUSparseOffsetSize)) - numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) - if tr.err != nil { - tr.err = ErrHeader - return nil - } - if offset == 0 && numBytes == 0 { - break - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - } - return sp -} - -// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0. -// The sparse map is stored just before the file data and padded out to the nearest block boundary. -func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { - buf := make([]byte, 2*blockSize) - sparseHeader := buf[:blockSize] - - // readDecimal is a helper function to read a decimal integer from the sparse map - // while making sure to read from the file in blocks of size blockSize - readDecimal := func() (int64, error) { - // Look for newline - nl := bytes.IndexByte(sparseHeader, '\n') - if nl == -1 { - if len(sparseHeader) >= blockSize { - // This is an error - return 0, ErrHeader - } - oldLen := len(sparseHeader) - newLen := oldLen + blockSize - if cap(sparseHeader) < newLen { - // There's more header, but we need to make room for the next block - copy(buf, sparseHeader) - sparseHeader = buf[:newLen] - } else { - // There's more header, and we can just reslice - sparseHeader = sparseHeader[:newLen] - } - - // Now that sparseHeader is large enough, read next block - if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil { - return 0, err - } - - // Look for a newline in the new data - nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n') - if nl == -1 { - // This is an error - return 0, ErrHeader - } - nl += oldLen // We want the position from the beginning - } - // Now that we've found a newline, read a number - n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0) - if err != nil { - return 0, ErrHeader - } - - // Update sparseHeader to consume this number - sparseHeader = sparseHeader[nl+1:] - return n, nil - } - - // Read the first block - if _, err := io.ReadFull(r, sparseHeader); err != nil { - return nil, err - } - - // The first line contains the number of entries - numEntries, err := readDecimal() - if err != nil { - return nil, err - } - - // Read all the entries - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - // Read the offset - offset, err := readDecimal() - if err != nil { - return nil, err - } - // Read numBytes - numBytes, err := readDecimal() - if err != nil { - return nil, err - } - - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - - return sp, nil -} - -// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1. -// The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) { - // Get number of entries - numEntriesStr, ok := headers[paxGNUSparseNumBlocks] - if !ok { - return nil, ErrHeader - } - numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) - if err != nil { - return nil, ErrHeader - } - - sparseMap := strings.Split(headers[paxGNUSparseMap], ",") - - // There should be two numbers in sparseMap for each entry - if int64(len(sparseMap)) != 2*numEntries { - return nil, ErrHeader - } - - // Loop through the entries in the sparse map - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0) - if err != nil { - return nil, ErrHeader - } - numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - - return sp, nil -} - -// numBytes returns the number of bytes left to read in the current file's entry -// in the tar archive, or 0 if there is no current file. -func (tr *Reader) numBytes() int64 { - if tr.curr == nil { - // No current file, so no bytes - return 0 - } - return tr.curr.numBytes() -} - -// Read reads from the current entry in the tar archive. -// It returns 0, io.EOF when it reaches the end of that entry, -// until Next is called to advance to the next entry. -func (tr *Reader) Read(b []byte) (n int, err error) { - if tr.curr == nil { - return 0, io.EOF - } - n, err = tr.curr.Read(b) - if err != nil && err != io.EOF { - tr.err = err - } - return -} - -func (rfr *regFileReader) Read(b []byte) (n int, err error) { - if rfr.nb == 0 { - // file consumed - return 0, io.EOF - } - if int64(len(b)) > rfr.nb { - b = b[0:rfr.nb] - } - n, err = rfr.r.Read(b) - rfr.nb -= int64(n) - - if err == io.EOF && rfr.nb > 0 { - err = io.ErrUnexpectedEOF - } - return -} - -// numBytes returns the number of bytes left to read in the file's data in the tar archive. -func (rfr *regFileReader) numBytes() int64 { - return rfr.nb -} - -// readHole reads a sparse file hole ending at offset toOffset -func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { - n64 := toOffset - sfr.pos - if n64 > int64(len(b)) { - n64 = int64(len(b)) - } - n := int(n64) - for i := 0; i < n; i++ { - b[i] = 0 - } - sfr.pos += n64 - return n -} - -// Read reads the sparse file data in expanded form. -func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { - if len(sfr.sp) == 0 { - // No more data fragments to read from. - if sfr.pos < sfr.tot { - // We're in the last hole - n = sfr.readHole(b, sfr.tot) - return - } - // Otherwise, we're at the end of the file - return 0, io.EOF - } - if sfr.pos < sfr.sp[0].offset { - // We're in a hole - n = sfr.readHole(b, sfr.sp[0].offset) - return - } - - // We're not in a hole, so we'll read from the next data fragment - posInFragment := sfr.pos - sfr.sp[0].offset - bytesLeft := sfr.sp[0].numBytes - posInFragment - if int64(len(b)) > bytesLeft { - b = b[0:bytesLeft] - } - - n, err = sfr.rfr.Read(b) - sfr.pos += int64(n) - - if int64(n) == bytesLeft { - // We're done with this fragment - sfr.sp = sfr.sp[1:] - } - - if err == io.EOF && sfr.pos < sfr.tot { - // We reached the end of the last fragment's data, but there's a final hole - err = nil - } - return -} - -// numBytes returns the number of bytes left to read in the sparse file's -// sparse-encoded data in the tar archive. -func (sfr *sparseFileReader) numBytes() int64 { - return sfr.rfr.nb -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go deleted file mode 100644 index 9601ffe459..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go +++ /dev/null @@ -1,743 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "crypto/md5" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" - "time" -) - -type untarTest struct { - file string - headers []*Header - cksums []string -} - -var gnuTarTest = &untarTest{ - file: "testdata/gnu.tar", - headers: []*Header{ - { - Name: "small.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 5, - ModTime: time.Unix(1244428340, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - }, - { - Name: "small2.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 11, - ModTime: time.Unix(1244436044, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - }, - }, - cksums: []string{ - "e38b27eaccb4391bdec553a7f3ae6b2f", - "c65bd2e50a56a2138bf1716f2fd56fe9", - }, -} - -var sparseTarTest = &untarTest{ - file: "testdata/sparse-formats.tar", - headers: []*Header{ - { - Name: "sparse-gnu", - Mode: 420, - Uid: 1000, - Gid: 1000, - Size: 200, - ModTime: time.Unix(1392395740, 0), - Typeflag: 0x53, - Linkname: "", - Uname: "david", - Gname: "david", - Devmajor: 0, - Devminor: 0, - }, - { - Name: "sparse-posix-0.0", - Mode: 420, - Uid: 1000, - Gid: 1000, - Size: 200, - ModTime: time.Unix(1392342187, 0), - Typeflag: 0x30, - Linkname: "", - Uname: "david", - Gname: "david", - Devmajor: 0, - Devminor: 0, - }, - { - Name: "sparse-posix-0.1", - Mode: 420, - Uid: 1000, - Gid: 1000, - Size: 200, - ModTime: time.Unix(1392340456, 0), - Typeflag: 0x30, - Linkname: "", - Uname: "david", - Gname: "david", - Devmajor: 0, - Devminor: 0, - }, - { - Name: "sparse-posix-1.0", - Mode: 420, - Uid: 1000, - Gid: 1000, - Size: 200, - ModTime: time.Unix(1392337404, 0), - Typeflag: 0x30, - Linkname: "", - Uname: "david", - Gname: "david", - Devmajor: 0, - Devminor: 0, - }, - { - Name: "end", - Mode: 420, - Uid: 1000, - Gid: 1000, - Size: 4, - ModTime: time.Unix(1392398319, 0), - Typeflag: 0x30, - Linkname: "", - Uname: "david", - Gname: "david", - Devmajor: 0, - Devminor: 0, - }, - }, - cksums: []string{ - "6f53234398c2449fe67c1812d993012f", - "6f53234398c2449fe67c1812d993012f", - "6f53234398c2449fe67c1812d993012f", - "6f53234398c2449fe67c1812d993012f", - "b0061974914468de549a2af8ced10316", - }, -} - -var untarTests = []*untarTest{ - gnuTarTest, - sparseTarTest, - { - file: "testdata/star.tar", - headers: []*Header{ - { - Name: "small.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 5, - ModTime: time.Unix(1244592783, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - AccessTime: time.Unix(1244592783, 0), - ChangeTime: time.Unix(1244592783, 0), - }, - { - Name: "small2.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 11, - ModTime: time.Unix(1244592783, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - AccessTime: time.Unix(1244592783, 0), - ChangeTime: time.Unix(1244592783, 0), - }, - }, - }, - { - file: "testdata/v7.tar", - headers: []*Header{ - { - Name: "small.txt", - Mode: 0444, - Uid: 73025, - Gid: 5000, - Size: 5, - ModTime: time.Unix(1244593104, 0), - Typeflag: '\x00', - }, - { - Name: "small2.txt", - Mode: 0444, - Uid: 73025, - Gid: 5000, - Size: 11, - ModTime: time.Unix(1244593104, 0), - Typeflag: '\x00', - }, - }, - }, - { - file: "testdata/pax.tar", - headers: []*Header{ - { - Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", - Mode: 0664, - Uid: 1000, - Gid: 1000, - Uname: "shane", - Gname: "shane", - Size: 7, - ModTime: time.Unix(1350244992, 23960108), - ChangeTime: time.Unix(1350244992, 23960108), - AccessTime: time.Unix(1350244992, 23960108), - Typeflag: TypeReg, - }, - { - Name: "a/b", - Mode: 0777, - Uid: 1000, - Gid: 1000, - Uname: "shane", - Gname: "shane", - Size: 0, - ModTime: time.Unix(1350266320, 910238425), - ChangeTime: time.Unix(1350266320, 910238425), - AccessTime: time.Unix(1350266320, 910238425), - Typeflag: TypeSymlink, - Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", - }, - }, - }, - { - file: "testdata/nil-uid.tar", // golang.org/issue/5290 - headers: []*Header{ - { - Name: "P1050238.JPG.log", - Mode: 0664, - Uid: 0, - Gid: 0, - Size: 14, - ModTime: time.Unix(1365454838, 0), - Typeflag: TypeReg, - Linkname: "", - Uname: "eyefi", - Gname: "eyefi", - Devmajor: 0, - Devminor: 0, - }, - }, - }, - { - file: "testdata/xattrs.tar", - headers: []*Header{ - { - Name: "small.txt", - Mode: 0644, - Uid: 1000, - Gid: 10, - Size: 5, - ModTime: time.Unix(1386065770, 448252320), - Typeflag: '0', - Uname: "alex", - Gname: "wheel", - AccessTime: time.Unix(1389782991, 419875220), - ChangeTime: time.Unix(1389782956, 794414986), - Xattrs: map[string]string{ - "user.key": "value", - "user.key2": "value2", - // Interestingly, selinux encodes the terminating null inside the xattr - "security.selinux": "unconfined_u:object_r:default_t:s0\x00", - }, - }, - { - Name: "small2.txt", - Mode: 0644, - Uid: 1000, - Gid: 10, - Size: 11, - ModTime: time.Unix(1386065770, 449252304), - Typeflag: '0', - Uname: "alex", - Gname: "wheel", - AccessTime: time.Unix(1389782991, 419875220), - ChangeTime: time.Unix(1386065770, 449252304), - Xattrs: map[string]string{ - "security.selinux": "unconfined_u:object_r:default_t:s0\x00", - }, - }, - }, - }, -} - -func TestReader(t *testing.T) { -testLoop: - for i, test := range untarTests { - f, err := os.Open(test.file) - if err != nil { - t.Errorf("test %d: Unexpected error: %v", i, err) - continue - } - defer f.Close() - tr := NewReader(f) - for j, header := range test.headers { - hdr, err := tr.Next() - if err != nil || hdr == nil { - t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err) - f.Close() - continue testLoop - } - if !reflect.DeepEqual(*hdr, *header) { - t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v", - i, j, *hdr, *header) - } - } - hdr, err := tr.Next() - if err == io.EOF { - continue testLoop - } - if hdr != nil || err != nil { - t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err) - } - } -} - -func TestPartialRead(t *testing.T) { - f, err := os.Open("testdata/gnu.tar") - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer f.Close() - - tr := NewReader(f) - - // Read the first four bytes; Next() should skip the last byte. - hdr, err := tr.Next() - if err != nil || hdr == nil { - t.Fatalf("Didn't get first file: %v", err) - } - buf := make([]byte, 4) - if _, err := io.ReadFull(tr, buf); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { - t.Errorf("Contents = %v, want %v", buf, expected) - } - - // Second file - hdr, err = tr.Next() - if err != nil || hdr == nil { - t.Fatalf("Didn't get second file: %v", err) - } - buf = make([]byte, 6) - if _, err := io.ReadFull(tr, buf); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if expected := []byte("Google"); !bytes.Equal(buf, expected) { - t.Errorf("Contents = %v, want %v", buf, expected) - } -} - -func TestIncrementalRead(t *testing.T) { - test := gnuTarTest - f, err := os.Open(test.file) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer f.Close() - - tr := NewReader(f) - - headers := test.headers - cksums := test.cksums - nread := 0 - - // loop over all files - for ; ; nread++ { - hdr, err := tr.Next() - if hdr == nil || err == io.EOF { - break - } - - // check the header - if !reflect.DeepEqual(*hdr, *headers[nread]) { - t.Errorf("Incorrect header:\nhave %+v\nwant %+v", - *hdr, headers[nread]) - } - - // read file contents in little chunks EOF, - // checksumming all the way - h := md5.New() - rdbuf := make([]uint8, 8) - for { - nr, err := tr.Read(rdbuf) - if err == io.EOF { - break - } - if err != nil { - t.Errorf("Read: unexpected error %v\n", err) - break - } - h.Write(rdbuf[0:nr]) - } - // verify checksum - have := fmt.Sprintf("%x", h.Sum(nil)) - want := cksums[nread] - if want != have { - t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) - } - } - if nread != len(headers) { - t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) - } -} - -func TestNonSeekable(t *testing.T) { - test := gnuTarTest - f, err := os.Open(test.file) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer f.Close() - - type readerOnly struct { - io.Reader - } - tr := NewReader(readerOnly{f}) - nread := 0 - - for ; ; nread++ { - _, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - } - - if nread != len(test.headers) { - t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread) - } -} - -func TestParsePAXHeader(t *testing.T) { - paxTests := [][3]string{ - {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths - {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length - {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}} - for _, test := range paxTests { - key, expected, raw := test[0], test[1], test[2] - reader := bytes.NewReader([]byte(raw)) - headers, err := parsePAX(reader) - if err != nil { - t.Errorf("Couldn't parse correctly formatted headers: %v", err) - continue - } - if strings.EqualFold(headers[key], expected) { - t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected) - continue - } - trailer := make([]byte, 100) - n, err := reader.Read(trailer) - if err != io.EOF || n != 0 { - t.Error("Buffer wasn't consumed") - } - } - badHeader := bytes.NewReader([]byte("3 somelongkey=")) - if _, err := parsePAX(badHeader); err != ErrHeader { - t.Fatal("Unexpected success when parsing bad header") - } -} - -func TestParsePAXTime(t *testing.T) { - // Some valid PAX time values - timestamps := map[string]time.Time{ - "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case - "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value - "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value - "1350244992": time.Unix(1350244992, 0), // Low precision value - } - for input, expected := range timestamps { - ts, err := parsePAXTime(input) - if err != nil { - t.Fatal(err) - } - if !ts.Equal(expected) { - t.Fatalf("Time parsing failure %s %s", ts, expected) - } - } -} - -func TestMergePAX(t *testing.T) { - hdr := new(Header) - // Test a string, integer, and time based value. - headers := map[string]string{ - "path": "a/b/c", - "uid": "1000", - "mtime": "1350244992.023960108", - } - err := mergePAX(hdr, headers) - if err != nil { - t.Fatal(err) - } - want := &Header{ - Name: "a/b/c", - Uid: 1000, - ModTime: time.Unix(1350244992, 23960108), - } - if !reflect.DeepEqual(hdr, want) { - t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) - } -} - -func TestSparseEndToEnd(t *testing.T) { - test := sparseTarTest - f, err := os.Open(test.file) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer f.Close() - - tr := NewReader(f) - - headers := test.headers - cksums := test.cksums - nread := 0 - - // loop over all files - for ; ; nread++ { - hdr, err := tr.Next() - if hdr == nil || err == io.EOF { - break - } - - // check the header - if !reflect.DeepEqual(*hdr, *headers[nread]) { - t.Errorf("Incorrect header:\nhave %+v\nwant %+v", - *hdr, headers[nread]) - } - - // read and checksum the file data - h := md5.New() - _, err = io.Copy(h, tr) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - // verify checksum - have := fmt.Sprintf("%x", h.Sum(nil)) - want := cksums[nread] - if want != have { - t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) - } - } - if nread != len(headers) { - t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) - } -} - -type sparseFileReadTest struct { - sparseData []byte - sparseMap []sparseEntry - realSize int64 - expected []byte -} - -var sparseFileReadTests = []sparseFileReadTest{ - { - sparseData: []byte("abcde"), - sparseMap: []sparseEntry{ - {offset: 0, numBytes: 2}, - {offset: 5, numBytes: 3}, - }, - realSize: 8, - expected: []byte("ab\x00\x00\x00cde"), - }, - { - sparseData: []byte("abcde"), - sparseMap: []sparseEntry{ - {offset: 0, numBytes: 2}, - {offset: 5, numBytes: 3}, - }, - realSize: 10, - expected: []byte("ab\x00\x00\x00cde\x00\x00"), - }, - { - sparseData: []byte("abcde"), - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 2}, - }, - realSize: 8, - expected: []byte("\x00abc\x00\x00de"), - }, - { - sparseData: []byte("abcde"), - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 2}, - }, - realSize: 10, - expected: []byte("\x00abc\x00\x00de\x00\x00"), - }, - { - sparseData: []byte(""), - sparseMap: nil, - realSize: 2, - expected: []byte("\x00\x00"), - }, -} - -func TestSparseFileReader(t *testing.T) { - for i, test := range sparseFileReadTests { - r := bytes.NewReader(test.sparseData) - nb := int64(r.Len()) - sfr := &sparseFileReader{ - rfr: ®FileReader{r: r, nb: nb}, - sp: test.sparseMap, - pos: 0, - tot: test.realSize, - } - if sfr.numBytes() != nb { - t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb) - } - buf, err := ioutil.ReadAll(sfr) - if err != nil { - t.Errorf("test %d: Unexpected error: %v", i, err) - } - if e := test.expected; !bytes.Equal(buf, e) { - t.Errorf("test %d: Contents = %v, want %v", i, buf, e) - } - if sfr.numBytes() != 0 { - t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i) - } - } -} - -func TestSparseIncrementalRead(t *testing.T) { - sparseMap := []sparseEntry{{10, 2}} - sparseData := []byte("Go") - expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00" - - r := bytes.NewReader(sparseData) - nb := int64(r.Len()) - sfr := &sparseFileReader{ - rfr: ®FileReader{r: r, nb: nb}, - sp: sparseMap, - pos: 0, - tot: int64(len(expected)), - } - - // We'll read the data 6 bytes at a time, with a hole of size 10 at - // the beginning and one of size 8 at the end. - var outputBuf bytes.Buffer - buf := make([]byte, 6) - for { - n, err := sfr.Read(buf) - if err == io.EOF { - break - } - if err != nil { - t.Errorf("Read: unexpected error %v\n", err) - } - if n > 0 { - _, err := outputBuf.Write(buf[:n]) - if err != nil { - t.Errorf("Write: unexpected error %v\n", err) - } - } - } - got := outputBuf.String() - if got != expected { - t.Errorf("Contents = %v, want %v", got, expected) - } -} - -func TestReadGNUSparseMap0x1(t *testing.T) { - headers := map[string]string{ - paxGNUSparseNumBlocks: "4", - paxGNUSparseMap: "0,5,10,5,20,5,30,5", - } - expected := []sparseEntry{ - {offset: 0, numBytes: 5}, - {offset: 10, numBytes: 5}, - {offset: 20, numBytes: 5}, - {offset: 30, numBytes: 5}, - } - - sp, err := readGNUSparseMap0x1(headers) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if !reflect.DeepEqual(sp, expected) { - t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) - } -} - -func TestReadGNUSparseMap1x0(t *testing.T) { - // This test uses lots of holes so the sparse header takes up more than two blocks - numEntries := 100 - expected := make([]sparseEntry, 0, numEntries) - sparseMap := new(bytes.Buffer) - - fmt.Fprintf(sparseMap, "%d\n", numEntries) - for i := 0; i < numEntries; i++ { - offset := int64(2048 * i) - numBytes := int64(1024) - expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes}) - fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes) - } - - // Make the header the smallest multiple of blockSize that fits the sparseMap - headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize - bufLen := blockSize * headerBlocks - buf := make([]byte, bufLen) - copy(buf, sparseMap.Bytes()) - - // Get an reader to read the sparse map - r := bytes.NewReader(buf) - - // Read the sparse map - sp, err := readGNUSparseMap1x0(r) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if !reflect.DeepEqual(sp, expected) { - t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) - } -} - -func TestUninitializedRead(t *testing.T) { - test := gnuTarTest - f, err := os.Open(test.file) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer f.Close() - - tr := NewReader(f) - _, err = tr.Read([]byte{}) - if err == nil || err != io.EOF { - t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF) - } - -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go deleted file mode 100644 index cf9cc79c59..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux dragonfly openbsd solaris - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atim.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctim.Unix()) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go deleted file mode 100644 index 6f17dbe307..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd netbsd - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atimespec.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctimespec.Unix()) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go deleted file mode 100644 index cb843db4cf..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin dragonfly freebsd openbsd netbsd solaris - -package tar - -import ( - "os" - "syscall" -) - -func init() { - sysStat = statUnix -} - -func statUnix(fi os.FileInfo, h *Header) error { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - h.Uid = int(sys.Uid) - h.Gid = int(sys.Gid) - // TODO(bradfitz): populate username & group. os/user - // doesn't cache LookupId lookups, and lacks group - // lookup functions. - h.AccessTime = statAtime(sys) - h.ChangeTime = statCtime(sys) - // TODO(bradfitz): major/minor device numbers? - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go deleted file mode 100644 index ed333f3ea4..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "io/ioutil" - "os" - "path" - "reflect" - "strings" - "testing" - "time" -) - -func TestFileInfoHeader(t *testing.T) { - fi, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - h, err := FileInfoHeader(fi, "") - if err != nil { - t.Fatalf("FileInfoHeader: %v", err) - } - if g, e := h.Name, "small.txt"; g != e { - t.Errorf("Name = %q; want %q", g, e) - } - if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e { - t.Errorf("Mode = %#o; want %#o", g, e) - } - if g, e := h.Size, int64(5); g != e { - t.Errorf("Size = %v; want %v", g, e) - } - if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { - t.Errorf("ModTime = %v; want %v", g, e) - } - // FileInfoHeader should error when passing nil FileInfo - if _, err := FileInfoHeader(nil, ""); err == nil { - t.Fatalf("Expected error when passing nil to FileInfoHeader") - } -} - -func TestFileInfoHeaderDir(t *testing.T) { - fi, err := os.Stat("testdata") - if err != nil { - t.Fatal(err) - } - h, err := FileInfoHeader(fi, "") - if err != nil { - t.Fatalf("FileInfoHeader: %v", err) - } - if g, e := h.Name, "testdata/"; g != e { - t.Errorf("Name = %q; want %q", g, e) - } - // Ignoring c_ISGID for golang.org/issue/4867 - if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e { - t.Errorf("Mode = %#o; want %#o", g, e) - } - if g, e := h.Size, int64(0); g != e { - t.Errorf("Size = %v; want %v", g, e) - } - if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { - t.Errorf("ModTime = %v; want %v", g, e) - } -} - -func TestFileInfoHeaderSymlink(t *testing.T) { - h, err := FileInfoHeader(symlink{}, "some-target") - if err != nil { - t.Fatal(err) - } - if g, e := h.Name, "some-symlink"; g != e { - t.Errorf("Name = %q; want %q", g, e) - } - if g, e := h.Linkname, "some-target"; g != e { - t.Errorf("Linkname = %q; want %q", g, e) - } -} - -type symlink struct{} - -func (symlink) Name() string { return "some-symlink" } -func (symlink) Size() int64 { return 0 } -func (symlink) Mode() os.FileMode { return os.ModeSymlink } -func (symlink) ModTime() time.Time { return time.Time{} } -func (symlink) IsDir() bool { return false } -func (symlink) Sys() interface{} { return nil } - -func TestRoundTrip(t *testing.T) { - data := []byte("some file contents") - - var b bytes.Buffer - tw := NewWriter(&b) - hdr := &Header{ - Name: "file.txt", - Uid: 1 << 21, // too big for 8 octal digits - Size: int64(len(data)), - ModTime: time.Now(), - } - // tar only supports second precision. - hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond) - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("tw.WriteHeader: %v", err) - } - if _, err := tw.Write(data); err != nil { - t.Fatalf("tw.Write: %v", err) - } - if err := tw.Close(); err != nil { - t.Fatalf("tw.Close: %v", err) - } - - // Read it back. - tr := NewReader(&b) - rHdr, err := tr.Next() - if err != nil { - t.Fatalf("tr.Next: %v", err) - } - if !reflect.DeepEqual(rHdr, hdr) { - t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) - } - rData, err := ioutil.ReadAll(tr) - if err != nil { - t.Fatalf("Read: %v", err) - } - if !bytes.Equal(rData, data) { - t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data) - } -} - -type headerRoundTripTest struct { - h *Header - fm os.FileMode -} - -func TestHeaderRoundTrip(t *testing.T) { - golden := []headerRoundTripTest{ - // regular file. - { - h: &Header{ - Name: "test.txt", - Mode: 0644 | c_ISREG, - Size: 12, - ModTime: time.Unix(1360600916, 0), - Typeflag: TypeReg, - }, - fm: 0644, - }, - // hard link. - { - h: &Header{ - Name: "hard.txt", - Mode: 0644 | c_ISLNK, - Size: 0, - ModTime: time.Unix(1360600916, 0), - Typeflag: TypeLink, - }, - fm: 0644 | os.ModeSymlink, - }, - // symbolic link. - { - h: &Header{ - Name: "link.txt", - Mode: 0777 | c_ISLNK, - Size: 0, - ModTime: time.Unix(1360600852, 0), - Typeflag: TypeSymlink, - }, - fm: 0777 | os.ModeSymlink, - }, - // character device node. - { - h: &Header{ - Name: "dev/null", - Mode: 0666 | c_ISCHR, - Size: 0, - ModTime: time.Unix(1360578951, 0), - Typeflag: TypeChar, - }, - fm: 0666 | os.ModeDevice | os.ModeCharDevice, - }, - // block device node. - { - h: &Header{ - Name: "dev/sda", - Mode: 0660 | c_ISBLK, - Size: 0, - ModTime: time.Unix(1360578954, 0), - Typeflag: TypeBlock, - }, - fm: 0660 | os.ModeDevice, - }, - // directory. - { - h: &Header{ - Name: "dir/", - Mode: 0755 | c_ISDIR, - Size: 0, - ModTime: time.Unix(1360601116, 0), - Typeflag: TypeDir, - }, - fm: 0755 | os.ModeDir, - }, - // fifo node. - { - h: &Header{ - Name: "dev/initctl", - Mode: 0600 | c_ISFIFO, - Size: 0, - ModTime: time.Unix(1360578949, 0), - Typeflag: TypeFifo, - }, - fm: 0600 | os.ModeNamedPipe, - }, - // setuid. - { - h: &Header{ - Name: "bin/su", - Mode: 0755 | c_ISREG | c_ISUID, - Size: 23232, - ModTime: time.Unix(1355405093, 0), - Typeflag: TypeReg, - }, - fm: 0755 | os.ModeSetuid, - }, - // setguid. - { - h: &Header{ - Name: "group.txt", - Mode: 0750 | c_ISREG | c_ISGID, - Size: 0, - ModTime: time.Unix(1360602346, 0), - Typeflag: TypeReg, - }, - fm: 0750 | os.ModeSetgid, - }, - // sticky. - { - h: &Header{ - Name: "sticky.txt", - Mode: 0600 | c_ISREG | c_ISVTX, - Size: 7, - ModTime: time.Unix(1360602540, 0), - Typeflag: TypeReg, - }, - fm: 0600 | os.ModeSticky, - }, - } - - for i, g := range golden { - fi := g.h.FileInfo() - h2, err := FileInfoHeader(fi, "") - if err != nil { - t.Error(err) - continue - } - if strings.Contains(fi.Name(), "/") { - t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) - } - name := path.Base(g.h.Name) - if fi.IsDir() { - name += "/" - } - if got, want := h2.Name, name; got != want { - t.Errorf("i=%d: Name: got %v, want %v", i, got, want) - } - if got, want := h2.Size, g.h.Size; got != want { - t.Errorf("i=%d: Size: got %v, want %v", i, got, want) - } - if got, want := h2.Mode, g.h.Mode; got != want { - t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) - } - if got, want := fi.Mode(), g.fm; got != want { - t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) - } - if got, want := h2.ModTime, g.h.ModTime; got != want { - t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) - } - if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { - t.Errorf("i=%d: Sys didn't return original *Header", i) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar deleted file mode 100644 index fc899dc8dc..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar deleted file mode 100644 index cc9cfaa33c..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar deleted file mode 100644 index 9bc24b6587..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt deleted file mode 100644 index b249bfc518..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt +++ /dev/null @@ -1 +0,0 @@ -Kilts \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt deleted file mode 100644 index 394ee3ecd0..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt +++ /dev/null @@ -1 +0,0 @@ -Google.com diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar deleted file mode 100644 index 8bd4e74d50..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar deleted file mode 100644 index 59e2d4e604..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar deleted file mode 100644 index 29679d9a30..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar deleted file mode 100644 index eb65fc9410..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar deleted file mode 100644 index 5960ee8247..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar deleted file mode 100644 index 753e883ceb..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar deleted file mode 100644 index e6d816ad07..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar deleted file mode 100644 index 9701950edd..0000000000 Binary files a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go deleted file mode 100644 index dafb2cabf3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -// TODO(dsymonds): -// - catch more errors (no first header, etc.) - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "path" - "strconv" - "strings" - "time" -) - -var ( - ErrWriteTooLong = errors.New("archive/tar: write too long") - ErrFieldTooLong = errors.New("archive/tar: header field too long") - ErrWriteAfterClose = errors.New("archive/tar: write after close") - errNameTooLong = errors.New("archive/tar: name too long") - errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") -) - -// A Writer provides sequential writing of a tar archive in POSIX.1 format. -// A tar archive consists of a sequence of files. -// Call WriteHeader to begin a new file, and then call Write to supply that file's data, -// writing at most hdr.Size bytes in total. -type Writer struct { - w io.Writer - err error - nb int64 // number of unwritten bytes for current file entry - pad int64 // amount of padding to write after current file entry - closed bool - usedBinary bool // whether the binary numeric field extension was used - preferPax bool // use pax header instead of binary numeric header - hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header - paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } - -// Flush finishes writing the current file (optional). -func (tw *Writer) Flush() error { - if tw.nb > 0 { - tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) - return tw.err - } - - n := tw.nb + tw.pad - for n > 0 && tw.err == nil { - nr := n - if nr > blockSize { - nr = blockSize - } - var nw int - nw, tw.err = tw.w.Write(zeroBlock[0:nr]) - n -= int64(nw) - } - tw.nb = 0 - tw.pad = 0 - return tw.err -} - -// Write s into b, terminating it with a NUL if there is room. -// If the value is too long for the field and allowPax is true add a paxheader record instead -func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) { - needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s) - if needsPaxHeader { - paxHeaders[paxKeyword] = s - return - } - if len(s) > len(b) { - if tw.err == nil { - tw.err = ErrFieldTooLong - } - return - } - ascii := toASCII(s) - copy(b, ascii) - if len(ascii) < len(b) { - b[len(ascii)] = 0 - } -} - -// Encode x as an octal ASCII string and write it into b with leading zeros. -func (tw *Writer) octal(b []byte, x int64) { - s := strconv.FormatInt(x, 8) - // leading zeros, but leave room for a NUL. - for len(s)+1 < len(b) { - s = "0" + s - } - tw.cString(b, s, false, paxNone, nil) -} - -// Write x into b, either as octal or as binary (GNUtar/star extension). -// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead -func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) { - // Try octal first. - s := strconv.FormatInt(x, 8) - if len(s) < len(b) { - tw.octal(b, x) - return - } - - // If it is too long for octal, and pax is preferred, use a pax header - if allowPax && tw.preferPax { - tw.octal(b, 0) - s := strconv.FormatInt(x, 10) - paxHeaders[paxKeyword] = s - return - } - - // Too big: use binary (big-endian). - tw.usedBinary = true - for i := len(b) - 1; x > 0 && i >= 0; i-- { - b[i] = byte(x) - x >>= 8 - } - b[0] |= 0x80 // highest bit indicates binary format -} - -var ( - minTime = time.Unix(0, 0) - // There is room for 11 octal digits (33 bits) of mtime. - maxTime = minTime.Add((1<<33 - 1) * time.Second) -) - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -func (tw *Writer) WriteHeader(hdr *Header) error { - return tw.writeHeader(hdr, true) -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -// As this method is called internally by writePax header to allow it to -// suppress writing the pax header. -func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { - if tw.closed { - return ErrWriteAfterClose - } - if tw.err == nil { - tw.Flush() - } - if tw.err != nil { - return tw.err - } - - // a map to hold pax header records, if any are needed - paxHeaders := make(map[string]string) - - // TODO(shanemhansen): we might want to use PAX headers for - // subsecond time resolution, but for now let's just capture - // too long fields or non ascii characters - - var header []byte - - // We need to select which scratch buffer to use carefully, - // since this method is called recursively to write PAX headers. - // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. - // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is - // already being used by the non-recursive call, so we must use paxHdrBuff. - header = tw.hdrBuff[:] - if !allowPax { - header = tw.paxHdrBuff[:] - } - copy(header, zeroBlock) - s := slicer(header) - - // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax - pathHeaderBytes := s.next(fileNameSize) - - tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders) - - // Handle out of range ModTime carefully. - var modTime int64 - if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { - modTime = hdr.ModTime.Unix() - } - - tw.octal(s.next(8), hdr.Mode) // 100:108 - tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116 - tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124 - tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136 - tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity - s.next(8) // chksum (148:156) - s.next(1)[0] = hdr.Typeflag // 156:157 - - tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders) - - copy(s.next(8), []byte("ustar\x0000")) // 257:265 - tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297 - tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329 - tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337 - tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345 - - // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax - prefixHeaderBytes := s.next(155) - tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix - - // Use the GNU magic instead of POSIX magic if we used any GNU extensions. - if tw.usedBinary { - copy(header[257:265], []byte("ustar \x00")) - } - - _, paxPathUsed := paxHeaders[paxPath] - // try to use a ustar header when only the name is too long - if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { - suffix := hdr.Name - prefix := "" - if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) { - var err error - prefix, suffix, err = tw.splitUSTARLongName(hdr.Name) - if err == nil { - // ok we can use a ustar long name instead of pax, now correct the fields - - // remove the path field from the pax header. this will suppress the pax header - delete(paxHeaders, paxPath) - - // update the path fields - tw.cString(pathHeaderBytes, suffix, false, paxNone, nil) - tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) - - // Use the ustar magic if we used ustar long names. - if len(prefix) > 0 && !tw.usedBinary { - copy(header[257:265], []byte("ustar\x00")) - } - } - } - } - - // The chksum field is terminated by a NUL and a space. - // This is different from the other octal fields. - chksum, _ := checksum(header) - tw.octal(header[148:155], chksum) - header[155] = ' ' - - if tw.err != nil { - // problem with header; probably integer too big for a field. - return tw.err - } - - if allowPax { - for k, v := range hdr.Xattrs { - paxHeaders[paxXattr+k] = v - } - } - - if len(paxHeaders) > 0 { - if !allowPax { - return errInvalidHeader - } - if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { - return err - } - } - tw.nb = int64(hdr.Size) - tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize - - _, tw.err = tw.w.Write(header) - return tw.err -} - -// writeUSTARLongName splits a USTAR long name hdr.Name. -// name must be < 256 characters. errNameTooLong is returned -// if hdr.Name can't be split. The splitting heuristic -// is compatible with gnu tar. -func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) { - length := len(name) - if length > fileNamePrefixSize+1 { - length = fileNamePrefixSize + 1 - } else if name[length-1] == '/' { - length-- - } - i := strings.LastIndex(name[:length], "/") - // nlen contains the resulting length in the name field. - // plen contains the resulting length in the prefix field. - nlen := len(name) - i - 1 - plen := i - if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { - err = errNameTooLong - return - } - prefix, suffix = name[:i], name[i+1:] - return -} - -// writePaxHeader writes an extended pax header to the -// archive. -func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { - // Prepare extended header - ext := new(Header) - ext.Typeflag = TypeXHeader - // Setting ModTime is required for reader parsing to - // succeed, and seems harmless enough. - ext.ModTime = hdr.ModTime - // The spec asks that we namespace our pseudo files - // with the current pid. - pid := os.Getpid() - dir, file := path.Split(hdr.Name) - fullName := path.Join(dir, - fmt.Sprintf("PaxHeaders.%d", pid), file) - - ascii := toASCII(fullName) - if len(ascii) > 100 { - ascii = ascii[:100] - } - ext.Name = ascii - // Construct the body - var buf bytes.Buffer - - for k, v := range paxHeaders { - fmt.Fprint(&buf, paxHeader(k+"="+v)) - } - - ext.Size = int64(len(buf.Bytes())) - if err := tw.writeHeader(ext, false); err != nil { - return err - } - if _, err := tw.Write(buf.Bytes()); err != nil { - return err - } - if err := tw.Flush(); err != nil { - return err - } - return nil -} - -// paxHeader formats a single pax record, prefixing it with the appropriate length -func paxHeader(msg string) string { - const padding = 2 // Extra padding for space and newline - size := len(msg) + padding - size += len(strconv.Itoa(size)) - record := fmt.Sprintf("%d %s\n", size, msg) - if len(record) != size { - // Final adjustment if adding size increased - // the number of digits in size - size = len(record) - record = fmt.Sprintf("%d %s\n", size, msg) - } - return record -} - -// Write writes to the current entry in the tar archive. -// Write returns the error ErrWriteTooLong if more than -// hdr.Size bytes are written after WriteHeader. -func (tw *Writer) Write(b []byte) (n int, err error) { - if tw.closed { - err = ErrWriteTooLong - return - } - overwrite := false - if int64(len(b)) > tw.nb { - b = b[0:tw.nb] - overwrite = true - } - n, err = tw.w.Write(b) - tw.nb -= int64(n) - if err == nil && overwrite { - err = ErrWriteTooLong - return - } - tw.err = err - return -} - -// Close closes the tar archive, flushing any unwritten -// data to the underlying writer. -func (tw *Writer) Close() error { - if tw.err != nil || tw.closed { - return tw.err - } - tw.Flush() - tw.closed = true - if tw.err != nil { - return tw.err - } - - // trailer: two zero blocks - for i := 0; i < 2; i++ { - _, tw.err = tw.w.Write(zeroBlock) - if tw.err != nil { - break - } - } - return tw.err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go deleted file mode 100644 index 5e42e322f9..0000000000 --- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go +++ /dev/null @@ -1,491 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" - "testing/iotest" - "time" -) - -type writerTestEntry struct { - header *Header - contents string -} - -type writerTest struct { - file string // filename of expected output - entries []*writerTestEntry -} - -var writerTests = []*writerTest{ - // The writer test file was produced with this command: - // tar (GNU tar) 1.26 - // ln -s small.txt link.txt - // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt - { - file: "testdata/writer.tar", - entries: []*writerTestEntry{ - { - header: &Header{ - Name: "small.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 5, - ModTime: time.Unix(1246508266, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - }, - contents: "Kilts", - }, - { - header: &Header{ - Name: "small2.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 11, - ModTime: time.Unix(1245217492, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - }, - contents: "Google.com\n", - }, - { - header: &Header{ - Name: "link.txt", - Mode: 0777, - Uid: 1000, - Gid: 1000, - Size: 0, - ModTime: time.Unix(1314603082, 0), - Typeflag: '2', - Linkname: "small.txt", - Uname: "strings", - Gname: "strings", - }, - // no contents - }, - }, - }, - // The truncated test file was produced using these commands: - // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt - // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar - { - file: "testdata/writer-big.tar", - entries: []*writerTestEntry{ - { - header: &Header{ - Name: "tmp/16gig.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, - Size: 16 << 30, - ModTime: time.Unix(1254699560, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - }, - // fake contents - contents: strings.Repeat("\x00", 4<<10), - }, - }, - }, - // The truncated test file was produced using these commands: - // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt - // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar - { - file: "testdata/writer-big-long.tar", - entries: []*writerTestEntry{ - { - header: &Header{ - Name: strings.Repeat("longname/", 15) + "16gig.txt", - Mode: 0644, - Uid: 1000, - Gid: 1000, - Size: 16 << 30, - ModTime: time.Unix(1399583047, 0), - Typeflag: '0', - Uname: "guillaume", - Gname: "guillaume", - }, - // fake contents - contents: strings.Repeat("\x00", 4<<10), - }, - }, - }, - // This file was produced using gnu tar 1.17 - // gnutar -b 4 --format=ustar (longname/)*15 + file.txt - { - file: "testdata/ustar.tar", - entries: []*writerTestEntry{ - { - header: &Header{ - Name: strings.Repeat("longname/", 15) + "file.txt", - Mode: 0644, - Uid: 0765, - Gid: 024, - Size: 06, - ModTime: time.Unix(1360135598, 0), - Typeflag: '0', - Uname: "shane", - Gname: "staff", - }, - contents: "hello\n", - }, - }, - }, -} - -// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. -func bytestr(offset int, b []byte) string { - const rowLen = 32 - s := fmt.Sprintf("%04x ", offset) - for _, ch := range b { - switch { - case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': - s += fmt.Sprintf(" %c", ch) - default: - s += fmt.Sprintf(" %02x", ch) - } - } - return s -} - -// Render a pseudo-diff between two blocks of bytes. -func bytediff(a []byte, b []byte) string { - const rowLen = 32 - s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) - for offset := 0; len(a)+len(b) > 0; offset += rowLen { - na, nb := rowLen, rowLen - if na > len(a) { - na = len(a) - } - if nb > len(b) { - nb = len(b) - } - sa := bytestr(offset, a[0:na]) - sb := bytestr(offset, b[0:nb]) - if sa != sb { - s += fmt.Sprintf("-%v\n+%v\n", sa, sb) - } - a = a[na:] - b = b[nb:] - } - return s -} - -func TestWriter(t *testing.T) { -testLoop: - for i, test := range writerTests { - expected, err := ioutil.ReadFile(test.file) - if err != nil { - t.Errorf("test %d: Unexpected error: %v", i, err) - continue - } - - buf := new(bytes.Buffer) - tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB - big := false - for j, entry := range test.entries { - big = big || entry.header.Size > 1<<10 - if err := tw.WriteHeader(entry.header); err != nil { - t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) - continue testLoop - } - if _, err := io.WriteString(tw, entry.contents); err != nil { - t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) - continue testLoop - } - } - // Only interested in Close failures for the small tests. - if err := tw.Close(); err != nil && !big { - t.Errorf("test %d: Failed closing archive: %v", i, err) - continue testLoop - } - - actual := buf.Bytes() - if !bytes.Equal(expected, actual) { - t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", - i, bytediff(expected, actual)) - } - if testing.Short() { // The second test is expensive. - break - } - } -} - -func TestPax(t *testing.T) { - // Create an archive with a large name - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat: %v", err) - } - // Force a PAX long name to be written - longName := strings.Repeat("ab", 100) - contents := strings.Repeat(" ", int(hdr.Size)) - hdr.Name = longName - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { - t.Fatal("Expected at least one PAX header to be written.") - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Name != longName { - t.Fatal("Couldn't recover long file name") - } -} - -func TestPaxSymlink(t *testing.T) { - // Create an archive with a large linkname - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - hdr.Typeflag = TypeSymlink - if err != nil { - t.Fatalf("os.Stat:1 %v", err) - } - // Force a PAX long linkname to be written - longLinkname := strings.Repeat("1234567890/1234567890", 10) - hdr.Linkname = longLinkname - - hdr.Size = 0 - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { - t.Fatal("Expected at least one PAX header to be written.") - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Linkname != longLinkname { - t.Fatal("Couldn't recover long link name") - } -} - -func TestPaxNonAscii(t *testing.T) { - // Create an archive with non ascii. These should trigger a pax header - // because pax headers have a defined utf-8 encoding. - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat:1 %v", err) - } - - // some sample data - chineseFilename := "文仢名" - chineseGroupname := "η΅„" - chineseUsername := "η”¨ζˆΆε" - - hdr.Name = chineseFilename - hdr.Gname = chineseGroupname - hdr.Uname = chineseUsername - - contents := strings.Repeat(" ", int(hdr.Size)) - - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { - t.Fatal("Expected at least one PAX header to be written.") - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Name != chineseFilename { - t.Fatal("Couldn't recover unicode name") - } - if hdr.Gname != chineseGroupname { - t.Fatal("Couldn't recover unicode group") - } - if hdr.Uname != chineseUsername { - t.Fatal("Couldn't recover unicode user") - } -} - -func TestPaxXattrs(t *testing.T) { - xattrs := map[string]string{ - "user.key": "value", - } - - // Create an archive with an xattr - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat: %v", err) - } - contents := "Kilts" - hdr.Xattrs = xattrs - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Test that we can get the xattrs back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(hdr.Xattrs, xattrs) { - t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", - hdr.Xattrs, xattrs) - } -} - -func TestPAXHeader(t *testing.T) { - medName := strings.Repeat("CD", 50) - longName := strings.Repeat("AB", 100) - paxTests := [][2]string{ - {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"}, - {"a=b", "6 a=b\n"}, // Single digit length - {"a=names", "11 a=names\n"}, // Test case involving carries - {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)}, - {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}} - - for _, test := range paxTests { - key, expected := test[0], test[1] - if result := paxHeader(key); result != expected { - t.Fatalf("paxHeader: got %s, expected %s", result, expected) - } - } -} - -func TestUSTARLongName(t *testing.T) { - // Create an archive with a path that failed to split with USTAR extension in previous versions. - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - hdr.Typeflag = TypeDir - if err != nil { - t.Fatalf("os.Stat:1 %v", err) - } - // Force a PAX long name to be written. The name was taken from a practical example - // that fails and replaced ever char through numbers to anonymize the sample. - longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" - hdr.Name = longName - - hdr.Size = 0 - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Name != longName { - t.Fatal("Couldn't recover long name") - } -} - -func TestValidTypeflagWithPAXHeader(t *testing.T) { - var buffer bytes.Buffer - tw := NewWriter(&buffer) - - fileName := strings.Repeat("ab", 100) - - hdr := &Header{ - Name: fileName, - Size: 4, - Typeflag: 0, - } - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("Failed to write header: %s", err) - } - if _, err := tw.Write([]byte("fooo")); err != nil { - t.Fatalf("Failed to write the file's data: %s", err) - } - tw.Close() - - tr := NewReader(&buffer) - - for { - header, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatalf("Failed to read header: %s", err) - } - if header.Typeflag != 0 { - t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8ab3..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585ad..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175feb..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db38186..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33cb1..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go deleted file mode 100644 index c111f3531a..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "io/ioutil" - "net" - "os" - "path" - "testing" -) - -func TestGenerateCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedClientCert(key) - if err != nil { - t.Fatal(err) - } -} - -func TestGenerateCACertPool(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - cert1, err := GenerateCACert(caKey1, key) - if err != nil { - t.Fatal(err) - } - cert2, err := GenerateCACert(caKey2, key) - if err != nil { - t.Fatal(err) - } - - d, err := ioutil.TempDir("/tmp", "cert-test") - if err != nil { - t.Fatal(err) - } - caFile := path.Join(d, "ca.pem") - f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) - if err != nil { - t.Fatal(err) - } - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) - if err != nil { - t.Fatal(err) - } - f.Close() - - certs, err := LoadCertificateBundle(caFile) - if err != nil { - t.Fatal(err) - } - if len(certs) != 2 { - t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) - } - - pool, err := LoadCertificatePool(caFile) - if err != nil { - t.Fatal(err) - } - - if len(pool.Subjects()) != 2 { - t.Fatalf("Invalid certificate pool") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159c1..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3ca..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go deleted file mode 100644 index 26ac381497..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "testing" -) - -func generateECTestKeys(t *testing.T) []PrivateKey { - p256Key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - p384Key, err := GenerateECP384PrivateKey() - if err != nil { - t.Fatal(err) - } - - p521Key, err := GenerateECP521PrivateKey() - if err != nil { - t.Fatal(err) - } - - return []PrivateKey{p256Key, p384Key, p521Key} -} - -func TestECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - if ecKey.KeyType() != "EC" { - t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) - } - } -} - -func TestECSignVerify(t *testing.T) { - ecKeys := generateECTestKeys(t) - - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = ecKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - cryptoPrivateKey := ecKey.CryptoPrivateKey() - cryptoPublicKey := ecKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} - -func TestExtendedFields(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - key.AddExtendedField("test", "foobar") - val := key.GetExtendedField("test") - - gotVal, ok := val.(string) - if !ok { - t.Fatalf("value is not a string") - } else if gotVal != val { - t.Fatalf("value %q is not equal to %q", gotVal, val) - } - -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca6f..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go deleted file mode 100644 index 997e554c04..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package libtrust - -import ( - "testing" -) - -func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { - if len(sliceA) != len(sliceB) { - t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) - } - - for i, itemA := range sliceA { - itemB := sliceB[i] - if itemA != itemB { - t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) - } - } -} - -func TestFilter(t *testing.T) { - keys := make([]PublicKey, 0, 8) - - // Create 8 keys and add host entries. - for i := 0; i < cap(keys); i++ { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - // we use both []interface{} and []string here because jwt uses - // []interface{} format, while PEM uses []string - switch { - case i == 0: - // Don't add entries for this key, key 0. - break - case i%2 == 0: - // Should catch keys 2, 4, and 6. - key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) - case i == 7: - // Should catch only the last key, and make it match any hostname. - key.AddExtendedField("hosts", []string{"*"}) - default: - // should catch keys 1, 3, 5. - key.AddExtendedField("hosts", []string{"*.example.com"}) - } - - keys = append(keys, key) - } - - // Should match 2 keys, the empty one, and the one that matches all hosts. - matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) - if err != nil { - t.Fatal(err) - } - expectedMatch := []PublicKey{keys[0], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match 1 key, the one that matches any host. - matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match keys that end in "example.com", and the key that matches anything. - matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match all of the keys except the empty key. - matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = keys[1:] - compareKeySlices(t, expectedMatch, matchedKeys) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787dd9..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a769..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go deleted file mode 100644 index b4f2697984..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto/rand" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/docker/libtrust/testutil" -) - -func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { - testMap := map[string]interface{}{ - "name": "dmcgowan/mycontainer", - "config": map[string]interface{}{ - "ports": []int{9101, 9102}, - "run": "/bin/echo \"Hello\"", - }, - "layers": []string{ - "2893c080-27f5-11e4-8c21-0800200c9a66", - "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", - "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", - "0b6da891-7f7f-4abf-9c97-7887549e696c", - "1d960389-ae4f-4011-85fd-18d0f96a67ad", - }, - } - formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` - formattedSection = fmt.Sprintf(formattedSection, sigKey) - if indent != "" { - buf := bytes.NewBuffer(nil) - json.Indent(buf, []byte(formattedSection), "", indent) - return testMap, buf.Bytes() - } - return testMap, []byte(formattedSection) - -} - -func TestSignJSON(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - -} - -func TestSignMap(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func TestFormattedJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - - var unmarshalled map[string]interface{} - err = json.Unmarshal(b, &unmarshalled) - if err != nil { - t.Fatalf("Could not unmarshall after parse: %s", err) - } - -} - -func TestFormattedFlatJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", "") - unindented, err := json.Marshal(testMap) - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(unindented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { - parent := ca - parentKey := key - chain := make([]*x509.Certificate, 6) - for i := 5; i > 0; i-- { - intermediatekey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - return trustKey, chain -} - -func TestChainVerify(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err != nil { - t.Fatalf("Error verifying content: %s", err) - } - if len(chains) != 1 { - t.Fatalf("Unexpected chains length: %d", len(chains)) - } - if len(chains[0]) != 7 { - t.Fatalf("Unexpected chain length: %d", len(chains[0])) - } -} - -func TestInvalidChain(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain[:5]) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err == nil { - t.Fatalf("Expected error verifying with bad chain") - } - if len(chains) != 0 { - t.Fatalf("Unexpected chains returned from invalid verify") - } -} - -func TestMergeSignatures(t *testing.T) { - pk1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 1: %v", err) - } - - pk2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 2: %v", err) - } - - payload := make([]byte, 1<<10) - if _, err = io.ReadFull(rand.Reader, payload); err != nil { - t.Fatalf("error generating payload: %v", err) - } - - payload, _ = json.Marshal(map[string]interface{}{"data": payload}) - - sig1, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 1: %v", err) - } - - if err := sig1.Sign(pk1); err != nil { - t.Fatalf("unexpected error signing with pk1: %v", err) - } - - sig2, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 2: %v", err) - } - - if err := sig2.Sign(pk2); err != nil { - t.Fatalf("unexpected error signing with pk2: %v", err) - } - - // Now, we actually merge into sig1 - if err := sig1.Merge(sig2); err != nil { - t.Fatalf("unexpected error merging: %v", err) - } - - // Verify the new signature package - pubkeys, err := sig1.Verify() - if err != nil { - t.Fatalf("unexpected error during verify: %v", err) - } - - // Make sure the pubkeys match the two private keys from before - privkeys := map[string]PrivateKey{ - pk1.KeyID(): pk1, - pk2.KeyID(): pk2, - } - - found := map[string]struct{}{} - - for _, pubkey := range pubkeys { - if _, ok := privkeys[pubkey.KeyID()]; !ok { - t.Fatalf("unexpected public key found during verification: %v", pubkey) - } - - found[pubkey.KeyID()] = struct{}{} - } - - // Make sure we've found all the private keys from verification - for keyid, _ := range privkeys { - if _, ok := found[keyid]; !ok { - t.Fatalf("public key %v not found during verification", keyid) - } - } - - // Create another signature, with a different payload, and ensure we get an error. - sig3, err := NewJSONSignature([]byte("{}")) - if err != nil { - t.Fatalf("unexpected error making signature for sig3: %v", err) - } - - if err := sig1.Merge(sig3); err == nil { - t.Fatalf("error expected during invalid merge with different payload") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/Godeps/_workspace/src/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2a8..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de5455..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go deleted file mode 100644 index 57e691f2ed..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package libtrust - -import ( - "errors" - "io/ioutil" - "os" - "testing" -) - -func makeTempFile(t *testing.T, prefix string) (filename string) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - t.Fatal(err) - } - - filename = file.Name() - file.Close() - - return -} - -func TestKeyFiles(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) - - key, err = GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) -} - -func testKeyFiles(t *testing.T, key PrivateKey) { - var err error - - privateKeyFilename := makeTempFile(t, "private_key") - privateKeyFilenamePEM := privateKeyFilename + ".pem" - privateKeyFilenameJWK := privateKeyFilename + ".jwk" - - publicKeyFilename := makeTempFile(t, "public_key") - publicKeyFilenamePEM := publicKeyFilename + ".pem" - publicKeyFilenameJWK := publicKeyFilename + ".jwk" - - if err = SaveKey(privateKeyFilenamePEM, key); err != nil { - t.Fatal(err) - } - - if err = SaveKey(privateKeyFilenameJWK, key); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { - t.Fatal(err) - } - - loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - if key.KeyID() != loadedPEMKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedPEMPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - os.Remove(privateKeyFilename) - os.Remove(privateKeyFilenamePEM) - os.Remove(privateKeyFilenameJWK) - os.Remove(publicKeyFilename) - os.Remove(publicKeyFilenamePEM) - os.Remove(publicKeyFilenameJWK) -} - -func TestTrustedHostKeysFile(t *testing.T) { - trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") - trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" - trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" - - testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) - testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) - - os.Remove(trustedHostKeysFilename) - os.Remove(trustedHostKeysFilenamePEM) - os.Remove(trustedHostKeysFilenameJWK) -} - -func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { - hostAddress1 := "docker.example.com:2376" - hostKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey1.AddExtendedField("hosts", []string{hostAddress1}) - err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - - hostAddress2 := "192.168.59.103:2376" - hostKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey2.AddExtendedField("hosts", hostAddress2) - err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - -} - -func TestTrustedClientKeysFile(t *testing.T) { - trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") - trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" - trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" - - testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) - testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) - - os.Remove(trustedClientKeysFilename) - os.Remove(trustedClientKeysFilenamePEM) - os.Remove(trustedClientKeysFilenameJWK) -} - -func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { - clientKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } - - clientKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae3574..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go deleted file mode 100644 index f6c59cc42b..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package libtrust - -import ( - "testing" -) - -type generateFunc func() (PrivateKey, error) - -func runGenerateBench(b *testing.B, f generateFunc, name string) { - for i := 0; i < b.N; i++ { - _, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - } -} - -func runFingerprintBench(b *testing.B, f generateFunc, name string) { - b.StopTimer() - // Don't count this relatively slow generation call. - key, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - if key.KeyID() == "" { - b.Fatalf("Error generating key ID for %s", name) - } - } -} - -func BenchmarkECP256Generate(b *testing.B) { - runGenerateBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Generate(b *testing.B) { - runGenerateBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Generate(b *testing.B) { - runGenerateBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} - -func BenchmarkECP256Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf20..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go deleted file mode 100644 index 5ec7707aa6..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "log" - "testing" -) - -var rsaKeys []PrivateKey - -func init() { - var err error - rsaKeys, err = generateRSATestKeys() - if err != nil { - log.Fatal(err) - } -} - -func generateRSATestKeys() (keys []PrivateKey, err error) { - log.Println("Generating RSA 2048-bit Test Key") - rsa2048Key, err := GenerateRSA2048PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 3072-bit Test Key") - rsa3072Key, err := GenerateRSA3072PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 4096-bit Test Key") - rsa4096Key, err := GenerateRSA4096PrivateKey() - if err != nil { - return - } - - log.Println("Done generating RSA Test Keys!") - keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} - - return -} - -func TestRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - if rsaKey.KeyType() != "RSA" { - t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) - } - } -} - -func TestRSASignVerify(t *testing.T) { - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = rsaKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalRSAKeys(t *testing.T) { - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - - // It's a good idea to validate the Private Key to make sure our - // (un)marshal process didn't corrupt the extra parameters. - k := privKey2.(*rsaPrivateKey) - err = k.PrivateKey.Validate() - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - cryptoPrivateKey := rsaKey.CryptoPrivateKey() - cryptoPublicKey := rsaKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go deleted file mode 100644 index 89debf6b64..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go +++ /dev/null @@ -1,94 +0,0 @@ -package testutil - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "time" -) - -// GenerateTrustCA generates a new certificate authority for testing. -func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "CA Root", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateIntermediate generates an intermediate certificate for testing using -// the parent certificate (likely a CA) and the provided keys. -func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Intermediate", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateTrustCert generates a new trust certificate for testing. Unlike the -// intermediate certificates, this certificate should be used for signature -// only, not creating certificates. -func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Trust Cert", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageDigitalSignature, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md deleted file mode 100644 index 24124db216..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md +++ /dev/null @@ -1,50 +0,0 @@ -## Libtrust TLS Config Demo - -This program generates key pairs and trust files for a TLS client and server. - -To generate the keys, run: - -``` -$ go run genkeys.go -``` - -The generated files are: - -``` -$ ls -l client_data/ server_data/ -client_data/: -total 24 --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json --rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json - -server_data/: -total 24 --rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json -``` - -The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. - -To start the server, run: - -``` -$ go run server.go -``` - -This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. - -To make a request using the client, run: - -``` -$ go run client.go -``` - -This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. - -The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). - -``` -curl --cert cert.pem --key key.pem -k https://localhost:8888 -``` diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go deleted file mode 100644 index 0a699a0ee2..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - // Load Client Key. - clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate Client Certificate. - selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) - if err != nil { - log.Fatal(err) - } - - // Load trusted host keys. - hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - // Ensure the host we want to connect to is trusted! - host, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) - if err != nil { - log.Fatalf("%q is not a known and trusted host", host) - } - - // Generate a CA pool with the trusted host's key. - caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) - if err != nil { - log.Fatal(err) - } - - // Create HTTP Client. - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedClientCert.Raw}, - PrivateKey: clientKey.CryptoPrivateKey(), - Leaf: selfSignedClientCert, - }, - }, - RootCAs: caPool, - }, - }, - } - - var makeRequest = func(url string) { - resp, err := client.Get(url) - if err != nil { - log.Fatal(err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - - log.Println(resp.Status) - log.Println(string(body)) - } - - // Make the request to the trusted server! - makeRequest(fmt.Sprintf("https://%s", serverAddress)) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go deleted file mode 100644 index c65f3b6b44..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "encoding/pem" - "fmt" - "log" - "net" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - clientPrivateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) - if err != nil { - log.Fatal(err) - } - - keyPEMBlock, err := key.PEMBlock() - if err != nil { - log.Fatal(err) - } - - encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) - fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) - - cert, err := libtrust.GenerateSelfSignedClientCert(key) - if err != nil { - log.Fatal(err) - } - - encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) - fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) - - trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - hostname, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - - trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) - if err != nil { - log.Fatal(err) - } - - caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) - if err != nil { - log.Fatal(err) - } - - encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) - fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go deleted file mode 100644 index 9dc8842ad9..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "log" - - "github.com/docker/libtrust" -) - -func main() { - // Generate client key. - clientKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Add a comment for the client key. - clientKey.AddExtendedField("comment", "TLS Demo Client") - - // Save the client key, public and private versions. - err = libtrust.SaveKey("client_data/private_key.pem", clientKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate server key. - serverKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Set the list of addresses to use for the server. - serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) - - // Save the server key, public and private versions. - err = libtrust.SaveKey("server_data/private_key.pem", serverKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Authorized Keys file for server. - err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Known Host Keys file for client. - err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go deleted file mode 100644 index d3cb2ea91f..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go +++ /dev/null @@ -1,80 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "html" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "server_data/private_key.pem" - authorizedClientsFilename = "server_data/trusted_clients.pem" -) - -func requestHandler(w http.ResponseWriter, r *http.Request) { - clientCert := r.TLS.PeerCertificates[0] - keyID := clientCert.Subject.CommonName - log.Printf("Request from keyID: %s\n", keyID) - fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) -} - -func main() { - // Load server key. - serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate server certificate. - selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( - serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, - ) - if err != nil { - log.Fatal(err) - } - - // Load authorized client keys. - authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) - if err != nil { - log.Fatal(err) - } - - // Create CA pool using trusted client keys. - caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) - if err != nil { - log.Fatal(err) - } - - // Create TLS config, requiring client certificates. - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedServerCert.Raw}, - PrivateKey: serverKey.CryptoPrivateKey(), - Leaf: selfSignedServerCert, - }, - }, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: caPool, - } - - // Create HTTP server with simple request handler. - server := &http.Server{ - Addr: serverAddress, - Handler: http.HandlerFunc(requestHandler), - } - - // Listen and server HTTPS using the libtrust TLS config. - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - log.Fatal(err) - } - tlsListener := tls.NewListener(listener, tlsConfig) - server.Serve(tlsListener) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go deleted file mode 100644 index 72b0fc3664..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go +++ /dev/null @@ -1,50 +0,0 @@ -package trustgraph - -import "github.com/docker/libtrust" - -// TrustGraph represents a graph of authorization mapping -// public keys to nodes and grants between nodes. -type TrustGraph interface { - // Verifies that the given public key is allowed to perform - // the given action on the given node according to the trust - // graph. - Verify(libtrust.PublicKey, string, uint16) (bool, error) - - // GetGrants returns an array of all grant chains which are used to - // allow the requested permission. - GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) -} - -// Grant represents a transfer of permission from one part of the -// trust graph to another. This is the only way to delegate -// permission between two different sub trees in the graph. -type Grant struct { - // Subject is the namespace being granted - Subject string - - // Permissions is a bit map of permissions - Permission uint16 - - // Grantee represents the node being granted - // a permission scope. The grantee can be - // either a namespace item or a key id where namespace - // items will always start with a '/'. - Grantee string - - // statement represents the statement used to create - // this object. - statement *Statement -} - -// Permissions -// Read node 0x01 (can read node, no sub nodes) -// Write node 0x02 (can write to node object, cannot create subnodes) -// Read subtree 0x04 (delegates read to each sub node) -// Write subtree 0x08 (delegates write to each sub node, included create on the subject) -// -// Permission shortcuts -// ReadItem = 0x01 -// WriteItem = 0x03 -// ReadAccess = 0x07 -// WriteAccess = 0x0F -// Delegate = 0x0F diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go deleted file mode 100644 index 247bfa7aa6..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go +++ /dev/null @@ -1,133 +0,0 @@ -package trustgraph - -import ( - "strings" - - "github.com/docker/libtrust" -) - -type grantNode struct { - grants []*Grant - children map[string]*grantNode -} - -type memoryGraph struct { - roots map[string]*grantNode -} - -func newGrantNode() *grantNode { - return &grantNode{ - grants: []*Grant{}, - children: map[string]*grantNode{}, - } -} - -// NewMemoryGraph returns a new in memory trust graph created from -// a static list of grants. This graph is immutable after creation -// and any alterations should create a new instance. -func NewMemoryGraph(grants []*Grant) TrustGraph { - roots := map[string]*grantNode{} - for _, grant := range grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - if part != "" { - node.grants = append(node.grants, grant) - } - nodes = node.children - } - } - return &memoryGraph{roots} -} - -func (g *memoryGraph) getGrants(name string) []*Grant { - nameParts := strings.Split(name, "/") - nodes := g.roots - var node *grantNode - var nodeOk bool - for _, part := range nameParts { - node, nodeOk = nodes[part] - if !nodeOk { - return nil - } - nodes = node.children - } - return node.grants -} - -func isSubName(name, sub string) bool { - if strings.HasPrefix(name, sub) { - if len(name) == len(sub) || name[len(sub)] == '/' { - return true - } - } - return false -} - -type walkFunc func(*Grant, []*Grant) bool - -func foundWalkFunc(*Grant, []*Grant) bool { - return true -} - -func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { - if visited == nil { - visited = map[*Grant]bool{} - } - grants := g.getGrants(start) - subGrants := make([]*Grant, 0, len(grants)) - for _, grant := range grants { - if visited[grant] { - continue - } - visited[grant] = true - if grant.Permission&permission == permission { - if isSubName(target, grant.Subject) { - if f(grant, chain) { - return true - } - } else { - subGrants = append(subGrants, grant) - } - } - } - for _, grant := range subGrants { - var chainCopy []*Grant - if collect { - chainCopy = make([]*Grant, len(chain)+1) - copy(chainCopy, chain) - chainCopy[len(chainCopy)-1] = grant - } else { - chainCopy = nil - } - - if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { - return true - } - } - return false -} - -func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { - return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil -} - -func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { - grants := [][]*Grant{} - collect := func(grant *Grant, chain []*Grant) bool { - grantChain := make([]*Grant, len(chain)+1) - copy(grantChain, chain) - grantChain[len(grantChain)-1] = grant - grants = append(grants, grantChain) - return false - } - g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) - return grants, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go deleted file mode 100644 index 49fd0f3b54..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package trustgraph - -import ( - "fmt" - "testing" - - "github.com/docker/libtrust" -) - -func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { - grants := make([]*Grant, count) - keys := make([]libtrust.PrivateKey, count) - for i := 0; i < count; i++ { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - grant := &Grant{ - Subject: fmt.Sprintf("/user-%d", i+1), - Permission: 0x0f, - Grantee: pk.KeyID(), - } - keys[i] = pk - grants[i] = grant - } - return grants, keys -} - -func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { - if ok, err := g.Verify(k, target, permission); err != nil { - t.Fatalf("Unexpected error during verification: %s", err) - } else if !ok { - t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) - } -} - -func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { - if ok, err := g.Verify(k, target, permission); err != nil { - t.Fatalf("Unexpected error during verification: %s", err) - } else if ok { - t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) - } -} - -func TestVerify(t *testing.T) { - grants, keys := createTestKeysAndGrants(4) - extraGrants := make([]*Grant, 3) - extraGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - extraGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - extraGrants[2] = &Grant{ - Subject: "/user-4", - Permission: 0x07, - Grantee: "/user-1", - } - grants = append(grants, extraGrants...) - - g := NewMemoryGraph(grants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) - testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) -} - -func TestCircularWalk(t *testing.T) { - grants, keys := createTestKeysAndGrants(3) - user1Grant := &Grant{ - Subject: "/user-2", - Permission: 0x0f, - Grantee: "/user-1", - } - user2Grant := &Grant{ - Subject: "/user-1", - Permission: 0x0f, - Grantee: "/user-2", - } - grants = append(grants, user1Grant, user2Grant) - - g := NewMemoryGraph(grants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) -} - -func assertGrantSame(t *testing.T, actual, expected *Grant) { - if actual != expected { - t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) - } -} - -func TestGetGrants(t *testing.T) { - grants, keys := createTestKeysAndGrants(5) - extraGrants := make([]*Grant, 4) - extraGrants[0] = &Grant{ - Subject: "/user-3/friend-project", - Permission: 0x0f, - Grantee: "/user-2/friends", - } - extraGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - extraGrants[2] = &Grant{ - Subject: "/user-2/friends", - Permission: 0x0f, - Grantee: "/user-5/fun-project", - } - extraGrants[3] = &Grant{ - Subject: "/user-5/fun-project", - Permission: 0x0f, - Grantee: "/user-1", - } - grants = append(grants, extraGrants...) - - g := NewMemoryGraph(grants) - - grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) - if err != nil { - t.Fatalf("Error getting grants: %s", err) - } - if len(grantChains) != 1 { - t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) - } - if len(grantChains[0]) != 2 { - t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) - } - assertGrantSame(t, grantChains[0][0], grants[3]) - assertGrantSame(t, grantChains[0][1], extraGrants[1]) - - grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) - if err != nil { - t.Fatalf("Error getting grants: %s", err) - } - if len(grantChains) != 1 { - t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) - } - if len(grantChains[0]) != 4 { - t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) - } - assertGrantSame(t, grantChains[0][0], grants[0]) - assertGrantSame(t, grantChains[0][1], extraGrants[3]) - assertGrantSame(t, grantChains[0][2], extraGrants[2]) - assertGrantSame(t, grantChains[0][3], extraGrants[0]) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go deleted file mode 100644 index 7a74b553cd..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go +++ /dev/null @@ -1,227 +0,0 @@ -package trustgraph - -import ( - "crypto/x509" - "encoding/json" - "io" - "io/ioutil" - "sort" - "strings" - "time" - - "github.com/docker/libtrust" -) - -type jsonGrant struct { - Subject string `json:"subject"` - Permission uint16 `json:"permission"` - Grantee string `json:"grantee"` -} - -type jsonRevocation struct { - Subject string `json:"subject"` - Revocation uint16 `json:"revocation"` - Grantee string `json:"grantee"` -} - -type jsonStatement struct { - Revocations []*jsonRevocation `json:"revocations"` - Grants []*jsonGrant `json:"grants"` - Expiration time.Time `json:"expiration"` - IssuedAt time.Time `json:"issuedAt"` -} - -func (g *jsonGrant) Grant(statement *Statement) *Grant { - return &Grant{ - Subject: g.Subject, - Permission: g.Permission, - Grantee: g.Grantee, - statement: statement, - } -} - -// Statement represents a set of grants made from a verifiable -// authority. A statement has an expiration associated with it -// set by the authority. -type Statement struct { - jsonStatement - - signature *libtrust.JSONSignature -} - -// IsExpired returns whether the statement has expired -func (s *Statement) IsExpired() bool { - return s.Expiration.Before(time.Now().Add(-10 * time.Second)) -} - -// Bytes returns an indented json representation of the statement -// in a byte array. This value can be written to a file or stream -// without alteration. -func (s *Statement) Bytes() ([]byte, error) { - return s.signature.PrettySignature("signatures") -} - -// LoadStatement loads and verifies a statement from an input stream. -func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - js, err := libtrust.ParsePrettySignature(b, "signatures") - if err != nil { - return nil, err - } - payload, err := js.Payload() - if err != nil { - return nil, err - } - var statement Statement - err = json.Unmarshal(payload, &statement.jsonStatement) - if err != nil { - return nil, err - } - - if authority == nil { - _, err = js.Verify() - if err != nil { - return nil, err - } - } else { - _, err = js.VerifyChains(authority) - if err != nil { - return nil, err - } - } - statement.signature = js - - return &statement, nil -} - -// CreateStatements creates and signs a statement from a stream of grants -// and revocations in a JSON array. -func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) - if err != nil { - return nil, err - } - err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) - if err != nil { - return nil, err - } - statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) - statement.jsonStatement.IssuedAt = time.Now().UTC() - - b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - statement.signature, err = libtrust.NewJSONSignature(b) - if err != nil { - return nil, err - } - err = statement.signature.SignWithChain(key, chain) - if err != nil { - return nil, err - } - - return &statement, nil -} - -type statementList []*Statement - -func (s statementList) Len() int { - return len(s) -} - -func (s statementList) Less(i, j int) bool { - return s[i].IssuedAt.Before(s[j].IssuedAt) -} - -func (s statementList) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// CollapseStatements returns a single list of the valid statements as well as the -// time when the next grant will expire. -func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { - sorted := make(statementList, 0, len(statements)) - for _, statement := range statements { - if useExpired || !statement.IsExpired() { - sorted = append(sorted, statement) - } - } - sort.Sort(sorted) - - var minExpired time.Time - var grantCount int - roots := map[string]*grantNode{} - for i, statement := range sorted { - if statement.Expiration.Before(minExpired) || i == 0 { - minExpired = statement.Expiration - } - for _, grant := range statement.Grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - g := grant.Grant(statement) - grantCount = grantCount + 1 - - for _, part := range parts { - node, nodeOk := nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - node.grants = append(node.grants, g) - nodes = node.children - } - } - - for _, revocation := range statement.Revocations { - parts := strings.Split(revocation.Grantee, "/") - nodes := roots - - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - break - } - nodes = node.children - } - if node != nil { - for _, grant := range node.grants { - if isSubName(grant.Subject, revocation.Subject) { - grant.Permission = grant.Permission &^ revocation.Revocation - } - } - } - } - } - - retGrants := make([]*Grant, 0, grantCount) - for _, rootNodes := range roots { - retGrants = append(retGrants, rootNodes.grants...) - } - - return retGrants, minExpired, nil -} - -// FilterStatements filters the statements to statements including the given grants. -func FilterStatements(grants []*Grant) ([]*Statement, error) { - statements := map[*Statement]bool{} - for _, grant := range grants { - if grant.statement != nil { - statements[grant.statement] = true - } - } - retStatements := make([]*Statement, len(statements)) - var i int - for statement := range statements { - retStatements[i] = statement - i++ - } - return retStatements, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go deleted file mode 100644 index e509468659..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go +++ /dev/null @@ -1,417 +0,0 @@ -package trustgraph - -import ( - "bytes" - "crypto/x509" - "encoding/json" - "testing" - "time" - - "github.com/docker/libtrust" - "github.com/docker/libtrust/testutil" -) - -const testStatementExpiration = time.Hour * 5 - -func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - - statement.Grants = make([]*jsonGrant, len(grants)) - for i, grant := range grants { - statement.Grants[i] = &jsonGrant{ - Subject: grant.Subject, - Permission: grant.Permission, - Grantee: grant.Grantee, - } - } - statement.IssuedAt = time.Now() - statement.Expiration = time.Now().Add(testStatementExpiration) - statement.Revocations = make([]*jsonRevocation, 0) - - marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - sig, err := libtrust.NewJSONSignature(marshalled) - if err != nil { - return nil, err - } - err = sig.SignWithChain(key, chain) - if err != nil { - return nil, err - } - statement.signature = sig - - return &statement, nil -} - -func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { - caKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - - parent := ca - parentKey := caKey - chain := make([]*x509.Certificate, chainLen) - for i := chainLen - 1; i > 0; i-- { - intermediatekey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - caPool := x509.NewCertPool() - caPool.AddCert(ca) - - return trustKey, caPool, chain -} - -func TestLoadStatement(t *testing.T) { - grantCount := 4 - grants, _ := createTestKeysAndGrants(grantCount) - - trustKey, caPool, chain := generateTrustChain(t, 6) - - statement, err := generateStatement(grants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - - statementBytes, err := statement.Bytes() - if err != nil { - t.Fatalf("Error getting statement bytes: %s", err) - } - - s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - if len(s2.Grants) != grantCount { - t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) - } - - pool := x509.NewCertPool() - _, err = LoadStatement(bytes.NewReader(statementBytes), pool) - if err == nil { - t.Fatalf("No error thrown verifying without an authority") - } else if _, ok := err.(x509.UnknownAuthorityError); !ok { - t.Fatalf("Unexpected error verifying without authority: %s", err) - } - - s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - if len(s2.Grants) != grantCount { - t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) - } - - badData := make([]byte, len(statementBytes)) - copy(badData, statementBytes) - badData[0] = '[' - _, err = LoadStatement(bytes.NewReader(badData), nil) - if err == nil { - t.Fatalf("No error thrown parsing bad json") - } - - alteredData := make([]byte, len(statementBytes)) - copy(alteredData, statementBytes) - alteredData[30] = '0' - _, err = LoadStatement(bytes.NewReader(alteredData), nil) - if err == nil { - t.Fatalf("No error thrown from bad data") - } -} - -func TestCollapseGrants(t *testing.T) { - grantCount := 8 - grants, keys := createTestKeysAndGrants(grantCount) - linkGrants := make([]*Grant, 4) - linkGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - linkGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - linkGrants[2] = &Grant{ - Subject: "/user-6", - Permission: 0x0f, - Grantee: "/user-7", - } - linkGrants[3] = &Grant{ - Subject: "/user-6/sub-project/specific-app", - Permission: 0x0f, - Grantee: "/user-5", - } - trustKey, pool, chain := generateTrustChain(t, 3) - - statements := make([]*Statement, 3) - var err error - statements[0], err = generateStatement(grants[0:4], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[1], err = generateStatement(grants[4:], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[2], err = generateStatement(linkGrants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - - statementsCopy := make([]*Statement, len(statements)) - for i, statement := range statements { - b, err := statement.Bytes() - if err != nil { - t.Fatalf("Error getting statement bytes: %s", err) - } - verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - // Force sort by reversing order - statementsCopy[len(statementsCopy)-i-1] = verifiedStatement - } - statements = statementsCopy - - collapsedGrants, expiration, err := CollapseStatements(statements, false) - if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) - } - if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { - t.Fatalf("Unexpected expiration time: %s", expiration.String()) - } - g := NewMemoryGraph(collapsedGrants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) - testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) - testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) - testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) - testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) - - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) - testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) - - // Add revocation grant - statements = append(statements, &Statement{ - jsonStatement{ - IssuedAt: time.Now(), - Expiration: time.Now().Add(testStatementExpiration), - Grants: []*jsonGrant{}, - Revocations: []*jsonRevocation{ - &jsonRevocation{ - Subject: "/user-1", - Revocation: 0x0f, - Grantee: keys[0].KeyID(), - }, - &jsonRevocation{ - Subject: "/user-2", - Revocation: 0x08, - Grantee: keys[1].KeyID(), - }, - &jsonRevocation{ - Subject: "/user-6", - Revocation: 0x0f, - Grantee: "/user-7", - }, - &jsonRevocation{ - Subject: "/user-9", - Revocation: 0x0f, - Grantee: "/user-10", - }, - }, - }, - nil, - }) - - collapsedGrants, expiration, err = CollapseStatements(statements, false) - if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) - } - if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { - t.Fatalf("Unexpected expiration time: %s", expiration.String()) - } - g = NewMemoryGraph(collapsedGrants) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) - - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) -} - -func TestFilterStatements(t *testing.T) { - grantCount := 8 - grants, keys := createTestKeysAndGrants(grantCount) - linkGrants := make([]*Grant, 3) - linkGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - linkGrants[1] = &Grant{ - Subject: "/user-5", - Permission: 0x0f, - Grantee: "/user-4", - } - linkGrants[2] = &Grant{ - Subject: "/user-7", - Permission: 0x0f, - Grantee: "/user-6", - } - - trustKey, _, chain := generateTrustChain(t, 3) - - statements := make([]*Statement, 5) - var err error - statements[0], err = generateStatement(grants[0:2], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[1], err = generateStatement(grants[2:4], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[2], err = generateStatement(grants[4:6], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[3], err = generateStatement(grants[6:], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[4], err = generateStatement(linkGrants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - collapsed, _, err := CollapseStatements(statements, false) - if err != nil { - t.Fatalf("Error collapsing grants: %s", err) - } - - // Filter 1, all 5 statements - filter1, err := FilterStatements(collapsed) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter1) != 5 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) - } - - // Filter 2, one statement - filter2, err := FilterStatements([]*Grant{collapsed[0]}) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter2) != 1 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) - } - - // Filter 3, 2 statements, from graph lookup - g := NewMemoryGraph(collapsed) - lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) - if err != nil { - t.Fatalf("Error looking up grants: %s", err) - } - if len(lookupGrants) != 1 { - t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) - } - if len(lookupGrants[0]) != 2 { - t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) - } - filter3, err := FilterStatements(lookupGrants[0]) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter3) != 2 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) - } - -} - -func TestCreateStatement(t *testing.T) { - grantJSON := bytes.NewReader([]byte(`[ - { - "subject": "/user-2", - "permission": 15, - "grantee": "/user-1" - }, - { - "subject": "/user-7", - "permission": 1, - "grantee": "/user-9" - }, - { - "subject": "/user-3", - "permission": 15, - "grantee": "/user-2" - } -]`)) - revocationJSON := bytes.NewReader([]byte(`[ - { - "subject": "user-8", - "revocation": 12, - "grantee": "user-9" - } -]`)) - - trustKey, pool, chain := generateTrustChain(t, 3) - - statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) - if err != nil { - t.Fatalf("Error creating statement: %s", err) - } - - b, err := statement.Bytes() - if err != nil { - t.Fatalf("Error retrieving bytes: %s", err) - } - - verified, err := LoadStatement(bytes.NewReader(b), pool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - - if len(verified.Grants) != 3 { - t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) - } - - if len(verified.Revocations) != 1 { - t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/Godeps/_workspace/src/github.com/docker/libtrust/util.go deleted file mode 100644 index 45dc3e182d..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,361 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go deleted file mode 100644 index ee54f5b8cc..0000000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "reflect" - "testing" -) - -func TestAddPEMHeadersToKey(t *testing.T) { - pk := &rsaPublicKey{nil, map[string]interface{}{}} - blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} - addPEMHeadersToKey(blk, pk) - - val := pk.GetExtendedField("hosts") - hosts, ok := val.([]string) - if !ok { - t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) - } - expected := []string{"localhost", "127.0.0.1"} - if !reflect.DeepEqual(hosts, expected) { - t.Errorf("hosts(%v), expected %v", hosts, expected) - } -} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/results.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/acceptancetest b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/acceptancetest old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/bootstrap b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/bootstrap old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/cibuild b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/cibuild old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/test b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/test old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/unittest b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/unittest old mode 100755 new mode 100644 diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/doc.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/doc.go deleted file mode 100644 index 430248698b..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This package contains test data shared between the various subpackages of -// the code.google.com/p/go.crypto/ssh package. Under no circumstance should -// this data be used for production code. -package testdata diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/keys.go b/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/keys.go deleted file mode 100644 index 5ff1c0e035..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/ssh/testdata/keys.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package testdata - -var PEMBytes = map[string][]byte{ - "dsa": []byte(`-----BEGIN DSA PRIVATE KEY----- -MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB -lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3 -EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD -nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV -2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r -juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr -FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz -DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj -nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY -Fmsr0W6fHB9nhS4/UXM8 ------END DSA PRIVATE KEY----- -`), - "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY----- -MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49 -AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+ -6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA== ------END EC PRIVATE KEY----- -`), - "rsa": []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBALdGZxkXDAjsYk10ihwU6Id2KeILz1TAJuoq4tOgDWxEEGeTrcld -r/ZwVaFzjWzxaf6zQIJbfaSEAhqD5yo72+sCAwEAAQJBAK8PEVU23Wj8mV0QjwcJ -tZ4GcTUYQL7cF4+ezTCE9a1NrGnCP2RuQkHEKxuTVrxXt+6OF15/1/fuXnxKjmJC -nxkCIQDaXvPPBi0c7vAxGwNY9726x01/dNbHCE0CBtcotobxpwIhANbbQbh3JHVW -2haQh4fAG5mhesZKAGcxTyv4mQ7uMSQdAiAj+4dzMpJWdSzQ+qGHlHMIBvVHLkqB -y2VdEyF7DPCZewIhAI7GOI/6LDIFOvtPo6Bj2nNmyQ1HU6k/LRtNIXi4c9NJAiAr -rrxx26itVhJmcvoUhOjwuzSlP2bE5VHAvkGB352YBg== ------END RSA PRIVATE KEY----- -`), - "user": []byte(`-----BEGIN EC PRIVATE KEY----- -MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49 -AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD -PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w== ------END EC PRIVATE KEY----- -`), -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 38be112604..a47e465595 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -11,7 +11,7 @@ // server. // // See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4 -package clientcredentials // import "golang.org/x/oauth2/clientcredentials" +package clientcredentials import ( "net/http" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go index 14c801a2a1..51c1480bd5 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package facebook provides constants for using OAuth2 to access Facebook. -package facebook // import "golang.org/x/oauth2/facebook" +package facebook import ( "golang.org/x/oauth2" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go index f2978015b0..26502a368d 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package github provides constants for using OAuth2 to access Github. -package github // import "golang.org/x/oauth2/github" +package github import ( "golang.org/x/oauth2" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go index 9a3d5feb1b..0bed738668 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go @@ -12,7 +12,7 @@ // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. -package google // import "golang.org/x/oauth2/google" +package google import ( "encoding/json" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials deleted file mode 100644 index ff5eefbd0a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials +++ /dev/null @@ -1,122 +0,0 @@ -{ - "data": [ - { - "credential": { - "_class": "OAuth2Credentials", - "_module": "oauth2client.client", - "access_token": "foo_access_token", - "client_id": "foo_client_id", - "client_secret": "foo_client_secret", - "id_token": { - "at_hash": "foo_at_hash", - "aud": "foo_aud", - "azp": "foo_azp", - "cid": "foo_cid", - "email": "foo@example.com", - "email_verified": true, - "exp": 1420573614, - "iat": 1420569714, - "id": "1337", - "iss": "accounts.google.com", - "sub": "1337", - "token_hash": "foo_token_hash", - "verified_email": true - }, - "invalid": false, - "refresh_token": "foo_refresh_token", - "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "token_expiry": "2015-01-09T00:51:51Z", - "token_response": { - "access_token": "foo_access_token", - "expires_in": 3600, - "id_token": "foo_id_token", - "token_type": "Bearer" - }, - "token_uri": "https://accounts.google.com/o/oauth2/token", - "user_agent": "Cloud SDK Command Line Tool" - }, - "key": { - "account": "foo@example.com", - "clientId": "foo_client_id", - "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "type": "google-cloud-sdk" - } - }, - { - "credential": { - "_class": "OAuth2Credentials", - "_module": "oauth2client.client", - "access_token": "bar_access_token", - "client_id": "bar_client_id", - "client_secret": "bar_client_secret", - "id_token": { - "at_hash": "bar_at_hash", - "aud": "bar_aud", - "azp": "bar_azp", - "cid": "bar_cid", - "email": "bar@example.com", - "email_verified": true, - "exp": 1420573614, - "iat": 1420569714, - "id": "1337", - "iss": "accounts.google.com", - "sub": "1337", - "token_hash": "bar_token_hash", - "verified_email": true - }, - "invalid": false, - "refresh_token": "bar_refresh_token", - "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "token_expiry": "2015-01-09T00:51:51Z", - "token_response": { - "access_token": "bar_access_token", - "expires_in": 3600, - "id_token": "bar_id_token", - "token_type": "Bearer" - }, - "token_uri": "https://accounts.google.com/o/oauth2/token", - "user_agent": "Cloud SDK Command Line Tool" - }, - "key": { - "account": "bar@example.com", - "clientId": "bar_client_id", - "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "type": "google-cloud-sdk" - } - }, - { - "credential": { - "_class": "ServiceAccountCredentials", - "_kwargs": {}, - "_module": "oauth2client.client", - "_private_key_id": "00000000000000000000000000000000", - "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", - "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "_service_account_email": "baz@serviceaccount.example.com", - "_service_account_id": "baz.serviceaccount.example.com", - "_token_uri": "https://accounts.google.com/o/oauth2/token", - "_user_agent": "Cloud SDK Command Line Tool", - "access_token": null, - "assertion_type": null, - "client_id": null, - "client_secret": null, - "id_token": null, - "invalid": false, - "refresh_token": null, - "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", - "service_account_name": "baz@serviceaccount.example.com", - "token_expiry": null, - "token_response": null, - "user_agent": "Cloud SDK Command Line Tool" - }, - "key": { - "account": "baz@serviceaccount.example.com", - "clientId": "baz_client_id", - "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", - "type": "google-cloud-sdk" - } - } - ], - "file_version": 1 -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties deleted file mode 100644 index 025de886cf..0000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties +++ /dev/null @@ -1,2 +0,0 @@ -[core] -account = bar@example.com \ No newline at end of file diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go index 6b86a3e131..797e2f3e1e 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go @@ -4,7 +4,7 @@ // Package jws provides encoding and decoding utilities for // signed JWS messages. -package jws // import "golang.org/x/oauth2/jws" +package jws import ( "bytes" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go index b619f93d2b..30c212b101 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package linkedin provides constants for using OAuth2 to access LinkedIn. -package linkedin // import "golang.org/x/oauth2/linkedin" +package linkedin import ( "golang.org/x/oauth2" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go index 97997f8c13..b349276e6c 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go @@ -5,7 +5,7 @@ // Package oauth2 provides support for making // OAuth2 authorized and authenticated HTTP requests. // It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" +package oauth2 import ( "bytes" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go index c0d093ccc5..60741ce9da 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. -package odnoklassniki // import "golang.org/x/oauth2/odnoklassniki" +package odnoklassniki import ( "golang.org/x/oauth2" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go index 2e713c53c8..32308322f3 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package paypal provides constants for using OAuth2 to access PayPal. -package paypal // import "golang.org/x/oauth2/paypal" +package paypal import ( "golang.org/x/oauth2" diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go index bd8e159488..6463482cfe 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package vk provides constants for using OAuth2 to access VK.com. -package vk // import "golang.org/x/oauth2/vk" +package vk import ( "golang.org/x/oauth2" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/.travis.yml b/Godeps/_workspace/src/google.golang.org/cloud/.travis.yml deleted file mode 100644 index 86e3423261..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -sudo: false -language: go -go: -- 1.4 -- tip -install: -- go get -v google.golang.org/cloud/... -script: -- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d -- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" - go test -v -tags=integration google.golang.org/cloud/... diff --git a/Godeps/_workspace/src/google.golang.org/cloud/AUTHORS b/Godeps/_workspace/src/google.golang.org/cloud/AUTHORS deleted file mode 100644 index 3da443dc9f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Google Inc. -Palm Stone Games, Inc. -PΓ©ter SzilΓ‘gyi -Tyler Treat diff --git a/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md b/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md deleted file mode 100644 index 9a1cab2878..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md +++ /dev/null @@ -1,114 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. -1. Get the cloud package by running `go get -d google.golang.org/cloud`. - 1. If you have already checked out the source, make sure that the remote git - origin is https://code.googlesource.com/gocloud: - - git remote set-url origin https://code.googlesource.com/gocloud -1. Make changes and create a change by running `git codereview change `, -provide a command message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change and mail as your recieve feedback. - -## Integration Tests - -Additional to the unit tests, you may run the integration test suite. - -To run the integrations tests, creating and configuration of a project in the -Google Developers Console is required. Once you create a project, set the -following environment variables to be able to run the against the actual APIs. - -- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) -- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. - -Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. -The storage integration test will create and delete some objects in this bucket. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create the indexes used in the datastore integration tests with indexes -found in `datastore/testdata/index.yaml`: - -From the project's root directory: - -``` sh -# Install the app component -$ gcloud components update app - -# Set the default project in your env -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticate the gcloud tool with your account -$ gcloud auth login - -# Create the indexes -$ gcloud preview datastore create-indexes datastore/testdata/index.yaml - -``` - -You can run the integration tests by running: - -``` sh -$ go test -v -tags=integration google.golang.org/cloud/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS b/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS deleted file mode 100644 index 475ac6a667..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS +++ /dev/null @@ -1,24 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Symonds -Glenn Lewis -Johan Euphrosine -Luna Duclos -Michael McGreevy -PΓ©ter SzilΓ‘gyi -Tyler Treat diff --git a/Godeps/_workspace/src/google.golang.org/cloud/LICENSE b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE deleted file mode 100644 index a4c5efd822..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/google.golang.org/cloud/README.md b/Godeps/_workspace/src/google.golang.org/cloud/README.md deleted file mode 100644 index 10d3995d58..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/README.md +++ /dev/null @@ -1,135 +0,0 @@ -# Google Cloud for Go - -[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) - -**NOTE:** These packages are experimental, and may occasionally make -backwards-incompatible changes. - -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - -Go packages for Google Cloud Platform services. Supported APIs include: - - * Google Cloud Datastore - * Google Cloud Storage - * Google Cloud Pub/Sub - * Google Cloud Container Engine - -``` go -import "google.golang.org/cloud" -``` - -Documentation and examples are available at -[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). - -## Authorization - -Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. -Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) -for examples on using oauth2 with the Cloud package. - -## Google Cloud Datastore - -[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully -managed, schemaless database for storing non-relational data. Cloud Datastore -automatically scales with your users and supports ACID transactions, high availability -of reads and writes, strong consistency for reads and ancestor queries, and eventual -consistency for all other queries. - -Follow the [activation instructions][cloud-datastore-activation] to use the Google -Cloud Datastore API with your project. - -[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) - - -```go -type Post struct { - Title string - Body string `datastore:",noindex"` - PublishedAt time.Time -} -keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), -} -posts := []*Post{ - {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, - {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, -} -if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { - log.Println(err) -} -``` - -## Google Cloud Storage - -[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store -data on Google infrastructure with very high reliability, performance and availability, -and can be used to distribute large data objects to users via direct download. - -[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) - - -```go -// Read the object1 from bucket. -rc, err := storage.NewReader(ctx, "bucket", "object1") -if err != nil { - log.Fatal(err) -} -slurp, err := ioutil.ReadAll(rc) -rc.Close() -if err != nil { - log.Fatal(err) -} -``` - -## Google Cloud Pub/Sub (Alpha) - -> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in -> backward-incompatible ways and is not recommended for production use. It is not -> subject to any SLA or deprecation policy. - -[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect -your services with reliable, many-to-many, asynchronous messaging hosted on Google's -infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation -for building your own robust, global services. - -[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) - - -```go -// Publish "hello world" on topic1. -msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ - Data: []byte("hello world"), -}) -if err != nil { - log.Println(err) -} -// Pull messages via subscription1. -msgs, err := pubsub.Pull(ctx, "subscription1", 1) -if err != nil { - log.Println(err) -} -``` - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) -document for details. We're using Gerrit for our code reviews. Please don't open pull -requests against this repo, new pull requests will be automatically closed. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-datastore-docs]: https://cloud.google.com/datastore/docs -[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate - -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs - -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview -[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go deleted file mode 100644 index bc23488d4a..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -// TODO(mcgreevy): support dry-run mode when creating jobs. - -import ( - "fmt" - "net/http" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -// A Source is a source of data for the Copy function. -type Source interface { - implementsSource() -} - -// A Destination is a destination of data for the Copy function. -type Destination interface { - implementsDestination() -} - -// An Option is an optional argument to Copy. -type Option interface { - implementsOption() -} - -// A ReadSource is a source of data for the Read function. -type ReadSource interface { - implementsReadSource() -} - -// A ReadOption is an optional argument to Read. -type ReadOption interface { - customizeRead(conf *pagingConf) -} - -const Scope = "https://www.googleapis.com/auth/bigquery" - -// Client may be used to perform BigQuery operations. -type Client struct { - service service - projectID string -} - -// Note: many of the methods on *Client appear in the various *_op.go source files. - -// NewClient constructs a new Client which can perform BigQuery operations. -// Operations performed via the client are billed to the specified GCP project. -// The supplied http.Client is used for making requests to the BigQuery server and must be capable of -// authenticating requests with Scope. -func NewClient(client *http.Client, projectID string) (*Client, error) { - bqService, err := newBigqueryService(client) - if err != nil { - return nil, fmt.Errorf("constructing bigquery client: %v", err) - } - - c := &Client{ - service: bqService, - projectID: projectID, - } - return c, nil -} - -// initJobProto creates and returns a bigquery Job proto. -// The proto is customized using any jobOptions in options. -// The list of Options is returned with the jobOptions removed. -func initJobProto(projectID string, options []Option) (*bq.Job, []Option) { - job := &bq.Job{} - - var other []Option - for _, opt := range options { - if o, ok := opt.(jobOption); ok { - o.customizeJob(job, projectID) - } else { - other = append(other, opt) - } - } - return job, other -} - -// Copy starts a BigQuery operation to copy data from a Source to a Destination. -func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) { - switch dst := dst.(type) { - case *Table: - switch src := src.(type) { - case *GCSReference: - return c.load(ctx, dst, src, options) - case *Table: - return c.cp(ctx, dst, Tables{src}, options) - case Tables: - return c.cp(ctx, dst, src, options) - case *Query: - return c.query(ctx, dst, src, options) - } - case *GCSReference: - if src, ok := src.(*Table); ok { - return c.extract(ctx, dst, src, options) - } - } - return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src) -} - -// Read fetches data from a ReadSource and returns the data via an Iterator. -func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) { - switch src := src.(type) { - case *Job: - return c.readQueryResults(src, options) - case *Query: - return c.executeQuery(ctx, src, options...) - case *Table: - return c.readTable(src, options) - } - return nil, fmt.Errorf("src (%T) does not support the Read operation", src) -} - -// executeQuery submits a query for execution and returns the results via an Iterator. -func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) { - dest := &Table{} - job, err := c.Copy(ctx, dest, q, WriteTruncate) - if err != nil { - return nil, err - } - - return c.Read(ctx, job, options...) -} - -func (c *Client) Dataset(id string) *Dataset { - return &Dataset{ - id: id, - client: c, - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go deleted file mode 100644 index ec0e45e392..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -type copyOption interface { - customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) -} - -func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) { - job, options := initJobProto(c.projectID, options) - payload := &bq.JobConfigurationTableCopy{} - - dst.customizeCopyDst(payload, c.projectID) - src.customizeCopySrc(payload, c.projectID) - - for _, opt := range options { - o, ok := opt.(copyOption) - if !ok { - return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) - } - o.customizeCopy(payload, c.projectID) - } - - job.Configuration = &bq.JobConfiguration{ - Copy: payload, - } - return c.service.insertJob(ctx, job, c.projectID) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go deleted file mode 100644 index 26e40ec3b1..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "reflect" - "testing" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -func defaultCopyJob() *bq.Job { - return &bq.Job{ - Configuration: &bq.JobConfiguration{ - Copy: &bq.JobConfigurationTableCopy{ - DestinationTable: &bq.TableReference{ - ProjectId: "d-project-id", - DatasetId: "d-dataset-id", - TableId: "d-table-id", - }, - SourceTables: []*bq.TableReference{ - { - ProjectId: "s-project-id", - DatasetId: "s-dataset-id", - TableId: "s-table-id", - }, - }, - }, - }, - } -} - -func TestCopy(t *testing.T) { - testCases := []struct { - dst *Table - src Tables - options []Option - want *bq.Job - }{ - { - dst: &Table{ - ProjectID: "d-project-id", - DatasetID: "d-dataset-id", - TableID: "d-table-id", - }, - src: Tables{ - { - ProjectID: "s-project-id", - DatasetID: "s-dataset-id", - TableID: "s-table-id", - }, - }, - want: defaultCopyJob(), - }, - { - dst: &Table{ - ProjectID: "d-project-id", - DatasetID: "d-dataset-id", - TableID: "d-table-id", - }, - src: Tables{ - { - ProjectID: "s-project-id", - DatasetID: "s-dataset-id", - TableID: "s-table-id", - }, - }, - options: []Option{CreateNever, WriteTruncate}, - want: func() *bq.Job { - j := defaultCopyJob() - j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" - j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" - return j - }(), - }, - } - - for _, tc := range testCases { - s := &testService{} - c := &Client{ - service: s, - } - if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { - t.Errorf("err calling cp: %v", err) - continue - } - if !reflect.DeepEqual(s.Job, tc.want) { - t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/create_table_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/create_table_test.go deleted file mode 100644 index e9a498852f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/create_table_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "reflect" - "testing" - "time" - - "golang.org/x/net/context" -) - -type createTableRecorder struct { - conf *createTableConf - service -} - -func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error { - rec.conf = conf - return nil -} - -func TestCreateTableOptions(t *testing.T) { - s := &createTableRecorder{} - c := &Client{ - service: s, - } - exp := time.Now() - q := "query" - if _, err := c.CreateTable(context.Background(), "p", "d", "t", TableExpiration(exp), ViewQuery(q)); err != nil { - t.Fatalf("err calling CreateTable: %v", err) - } - want := createTableConf{ - projectID: "p", - datasetID: "d", - tableID: "t", - expiration: exp, - viewQuery: q, - } - if !reflect.DeepEqual(*s.conf, want) { - t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go deleted file mode 100644 index 3f85935a24..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import "golang.org/x/net/context" - -// Dataset is a reference to a BigQuery dataset. -type Dataset struct { - id string - client *Client -} - -// ListTables returns a list of all the tables contained in the Dataset. -func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) { - var tables []*Table - - err := getPages("", func(pageToken string) (string, error) { - ts, tok, err := d.client.service.listTables(ctx, d.client.projectID, d.id, pageToken) - if err == nil { - tables = append(tables, ts...) - } - return tok, err - }) - - if err != nil { - return nil, err - } - return tables, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go deleted file mode 100644 index 79c9c4e04d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - "reflect" - "testing" - - "golang.org/x/net/context" -) - -// readServiceStub services read requests by returning data from an in-memory list of values. -type listTablesServiceStub struct { - expectedProject, expectedDataset string - values [][]*Table // contains pages of tables. - pageTokens map[string]string // maps incoming page token to returned page token. - - service -} - -func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { - if projectID != s.expectedProject { - return nil, "", errors.New("wrong project id") - } - if datasetID != s.expectedDataset { - return nil, "", errors.New("wrong dataset id") - } - - tables := s.values[0] - s.values = s.values[1:] - return tables, s.pageTokens[pageToken], nil -} - -func TestListTables(t *testing.T) { - t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"} - t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"} - t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"} - testCases := []struct { - data [][]*Table - pageTokens map[string]string - want []*Table - }{ - { - data: [][]*Table{{t1, t2}, {t3}}, - pageTokens: map[string]string{"": "a", "a": ""}, - want: []*Table{t1, t2, t3}, - }, - { - data: [][]*Table{{t1, t2}, {t3}}, - pageTokens: map[string]string{"": ""}, // no more pages after first one. - want: []*Table{t1, t2}, - }, - } - - for _, tc := range testCases { - c := &Client{ - service: &listTablesServiceStub{ - expectedProject: "x", - expectedDataset: "y", - values: tc.data, - pageTokens: tc.pageTokens, - }, - projectID: "x", - } - got, err := c.Dataset("y").ListTables(context.Background()) - if err != nil { - t.Errorf("err calling ListTables: %v", err) - continue - } - - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) - } - } -} - -func TestListTablesError(t *testing.T) { - c := &Client{ - service: &listTablesServiceStub{ - expectedProject: "x", - expectedDataset: "y", - }, - projectID: "x", - } - // Test that service read errors are propagated back to the caller. - // Passing "not y" as the dataset id will cause the service to return an error. - _, err := c.Dataset("not y").ListTables(context.Background()) - if err == nil { - // Read should not return an error; only Err should. - t.Errorf("ListTables expected: non-nil err, got: nil") - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go deleted file mode 100644 index 03f851a9db..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package bigquery provides a client for the BigQuery service. -// -// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected. -package bigquery // import "google.golang.org/cloud/bigquery" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go deleted file mode 100644 index b2e3e3f287..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - - bq "google.golang.org/api/bigquery/v2" -) - -// An Error contains detailed information about an error encountered while processing a job. -type Error struct { - // Mirrors bq.ErrorProto, but drops DebugInfo - Location, Message, Reason string -} - -func (e Error) Error() string { - return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) -} - -func errorFromErrorProto(ep *bq.ErrorProto) *Error { - if ep == nil { - return nil - } - return &Error{ - Location: ep.Location, - Message: ep.Message, - Reason: ep.Reason, - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go deleted file mode 100644 index 63a6428f1d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -type extractOption interface { - customizeExtract(conf *bq.JobConfigurationExtract, projectID string) -} - -// DisableHeader returns an Option that disables the printing of a header row in exported data. -func DisableHeader() Option { return disableHeader{} } - -type disableHeader struct{} - -func (opt disableHeader) implementsOption() {} - -func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract, projectID string) { - f := false - conf.PrintHeader = &f -} - -func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) { - job, options := initJobProto(c.projectID, options) - payload := &bq.JobConfigurationExtract{} - - dst.customizeExtractDst(payload, c.projectID) - src.customizeExtractSrc(payload, c.projectID) - - for _, opt := range options { - o, ok := opt.(extractOption) - if !ok { - return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) - } - o.customizeExtract(payload, c.projectID) - } - - job.Configuration = &bq.JobConfiguration{ - Extract: payload, - } - return c.service.insertJob(ctx, job, c.projectID) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go deleted file mode 100644 index 4fa647491d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "reflect" - "testing" - - "golang.org/x/net/context" - - bq "google.golang.org/api/bigquery/v2" -) - -func defaultExtractJob() *bq.Job { - return &bq.Job{ - Configuration: &bq.JobConfiguration{ - Extract: &bq.JobConfigurationExtract{ - SourceTable: &bq.TableReference{ - ProjectId: "project-id", - DatasetId: "dataset-id", - TableId: "table-id", - }, - DestinationUris: []string{"uri"}, - }, - }, - } -} - -func TestExtract(t *testing.T) { - testCases := []struct { - dst *GCSReference - src *Table - options []Option - want *bq.Job - }{ - { - dst: defaultGCS, - src: defaultTable, - want: defaultExtractJob(), - }, - { - dst: defaultGCS, - src: defaultTable, - options: []Option{ - DisableHeader(), - }, - want: func() *bq.Job { - j := defaultExtractJob() - f := false - j.Configuration.Extract.PrintHeader = &f - return j - }(), - }, - { - dst: &GCSReference{ - uris: []string{"uri"}, - Compression: Gzip, - DestinationFormat: JSON, - FieldDelimiter: "\t", - }, - src: defaultTable, - want: func() *bq.Job { - j := defaultExtractJob() - j.Configuration.Extract.Compression = "GZIP" - j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" - j.Configuration.Extract.FieldDelimiter = "\t" - return j - }(), - }, - } - - for _, tc := range testCases { - s := &testService{} - c := &Client{ - service: s, - } - if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { - t.Errorf("err calling extract: %v", err) - continue - } - if !reflect.DeepEqual(s.Job, tc.want) { - t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go deleted file mode 100644 index d48859bf06..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import bq "google.golang.org/api/bigquery/v2" - -// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute -// an input or output to a BigQuery operation. -type GCSReference struct { - uris []string - - // FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data. - // The default is ",". - FieldDelimiter string - - // The number of rows at the top of a CSV file that BigQuery will skip when loading the data. - SkipLeadingRows int64 - - // SourceFormat is the format of the GCS data to be loaded into BigQuery. - // Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV. - SourceFormat DataFormat - // Only used when loading data. - Encoding Encoding - - // Quote is the value used to quote data sections in a CSV file. - // The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset. - // To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true. - // Only used when loading data. - Quote string - ForceZeroQuote bool - - // DestinationFormat is the format to use when writing exported files. - // Allowed values are: CSV, Avro, JSON. The default is CSV. - // CSV is not supported for tables with nested or repeated fields. - DestinationFormat DataFormat - // Only used when writing data. Default is None. - Compression Compression -} - -func (gcs *GCSReference) implementsSource() {} -func (gcs *GCSReference) implementsDestination() {} - -// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. -// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. -// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. -// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. -// For more information about the treatment of wildcards and multiple URIs, -// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple -func (c *Client) NewGCSReference(uri ...string) *GCSReference { - return &GCSReference{uris: uri} -} - -type DataFormat string - -const ( - CSV DataFormat = "CSV" - Avro DataFormat = "AVRO" - JSON DataFormat = "NEWLINE_DELIMITED_JSON" - DatastoreBackup DataFormat = "DATASTORE_BACKUP" -) - -// Encoding specifies the character encoding of data to be loaded into BigQuery. -// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding -// for more details about how this is used. -type Encoding string - -const ( - UTF_8 Encoding = "UTF-8" - ISO_8859_1 Encoding = "ISO-8859-1" -) - -// Compression is the type of compression to apply when writing data to Google Cloud Storage. -type Compression string - -const ( - None Compression = "NONE" - Gzip Compression = "GZIP" -) - -func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad, projectID string) { - conf.SourceUris = gcs.uris - conf.SkipLeadingRows = gcs.SkipLeadingRows - conf.SourceFormat = string(gcs.SourceFormat) - conf.Encoding = string(gcs.Encoding) - conf.FieldDelimiter = gcs.FieldDelimiter - - if gcs.ForceZeroQuote { - quote := "" - conf.Quote = "e - } else if gcs.Quote != "" { - conf.Quote = &gcs.Quote - } -} - -func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract, projectID string) { - conf.DestinationUris = gcs.uris - conf.Compression = string(gcs.Compression) - conf.DestinationFormat = string(gcs.DestinationFormat) - conf.FieldDelimiter = gcs.FieldDelimiter -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go deleted file mode 100644 index ff8fdc0275..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - "fmt" - - "golang.org/x/net/context" -) - -// A pageFetcher returns a page of rows, starting from the row specified by token. -type pageFetcher interface { - fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) -} - -// Iterator provides access to the result of a BigQuery lookup. -// Next must be called before the first call to Get. -type Iterator struct { - c *Client - - err error // contains any error encountered during calls to Next. - - // Once Next has been called at least once, rs contains the current - // page of data and nextToken contains the token for fetching the next - // page (empty if there is no more data to be fetched). - rs [][]Value - nextToken string - - // The remaining fields contain enough information to fetch the current - // page of data, and determine which row of data from this page is the - // current row. - - pf pageFetcher - pageToken string - - // The offset from the start of the current page to the current row. - // For a new iterator, this is -1. - offset int64 -} - -func newIterator(c *Client, pf pageFetcher) *Iterator { - return &Iterator{ - c: c, - pf: pf, - offset: -1, - } -} - -// fetchPage loads the current page of data from the server. -// The contents of rs and nextToken are replaced with the loaded data. -// If there is an error while fetching, the error is stored in it.err and false is returned. -func (it *Iterator) fetchPage(ctx context.Context) bool { - var res *readDataResult - var err error - for { - res, err = it.pf.fetch(ctx, it.c, it.pageToken) - if err != errIncompleteJob { - break - } - } - - if err != nil { - it.err = err - return false - } - - it.rs = res.rows - it.nextToken = res.pageToken - return true -} - -// getEnoughData loads new data into rs until offset no longer points beyond the end of rs. -func (it *Iterator) getEnoughData(ctx context.Context) bool { - if len(it.rs) == 0 { - // Either we have not yet fetched any pages, or we are iterating over an empty dataset. - // In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken. - // In the latter case, it is harmless to fetch a page of data. - if !it.fetchPage(ctx) { - return false - } - } - - for it.offset >= int64(len(it.rs)) { - // If offset is still outside the bounds of the loaded data, - // but there are no more pages of data to fetch, then we have - // failed to satisfy the offset. - if it.nextToken == "" { - return false - } - - // offset cannot be satisfied with the currently loaded data, - // so we fetch the next page. We no longer need the existing - // cached rows, so we remove them and update the offset to be - // relative to the new page that we're about to fetch. - // NOTE: we can't just set offset to 0, because after - // marshalling/unmarshalling, it's possible for the offset to - // point arbitrarily far beyond the end of rs. - // This can happen if the server returns a different size - // results page before and after marshalling. - it.offset -= int64(len(it.rs)) - it.pageToken = it.nextToken - if !it.fetchPage(ctx) { - return false - } - } - return true -} - -// Next advances the Iterator to the next row, making that row available -// via the Get method. -// Next must be called before the first call to Get, and blocks until data is available. -// Next returns false when there are no more rows available, either because -// the end of the output was reached, or because there was an error (consult -// the Err method to determine which). -func (it *Iterator) Next(ctx context.Context) bool { - if it.err != nil { - return false - } - - // Advance offset to where we want it to be for the next call to Get. - it.offset++ - - // offset may now point beyond the end of rs, so we fetch data - // until offset is within its bounds again. If there are no more - // results available, offset will be left pointing beyond the bounds - // of rs. - // At the end of this method, rs will contain at least one element - // unless the dataset we are iterating over is empty. - return it.getEnoughData(ctx) -} - -// Err returns the last error encountered by Next, or nil for no error. -func (it *Iterator) Err() error { - return it.err -} - -// Get loads the current row into dst, which must implement ValueLoader. -func (it *Iterator) Get(dst interface{}) error { - if it.err != nil { - return fmt.Errorf("Get called on iterator in error state: %v", it.err) - } - - // If Next has been called, then offset should always index into a - // valid row in rs, as long as there is still data available. - if it.offset >= int64(len(it.rs)) || it.offset < 0 { - return errors.New("Get called without preceding successful call to Next") - } - - if dst, ok := dst.(ValueLoader); ok { - return dst.Load(it.rs[it.offset]) - } - return errors.New("Get called with unsupported argument type") -} - -// TODO(mcgreevy): Add a method to *Iterator that returns a schema which describes the data. diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go deleted file mode 100644 index b1f23c28a7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - "fmt" - "reflect" - "testing" - - "golang.org/x/net/context" -) - -type fetchResponse struct { - result *readDataResult // The result to return. - err error // The error to return. -} - -// pageFetcherStub services fetch requests by returning data from an in-memory list of values. -type pageFetcherStub struct { - fetchResponses map[string]fetchResponse - - err error -} - -func (pf *pageFetcherStub) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { - call, ok := pf.fetchResponses[token] - if !ok { - pf.err = fmt.Errorf("Unexpected page token: %q", token) - } - return call.result, call.err -} - -func TestIterator(t *testing.T) { - fetchFailure := errors.New("fetch failure") - - testCases := []struct { - desc string - alreadyConsumed int64 // amount to advance offset before commencing reading. - fetchResponses map[string]fetchResponse - want []ValueList - wantErr error - }{ - { - desc: "Iteration over single empty page", - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{}, - }, - }, - }, - want: []ValueList{}, - }, - { - desc: "Iteration over single page", - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - }, - want: []ValueList{{1, 2}, {11, 12}}, - }, - { - desc: "Iteration over two pages", - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, - }, - { - desc: "Server response includes empty page", - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "b", - rows: [][]Value{}, - }, - }, - "b": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, - }, - { - desc: "Fetch error", - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - // We returns some data from this fetch, but also an error. - // So the end result should include only data from the previous fetch. - err: fetchFailure, - result: &readDataResult{ - pageToken: "b", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - want: []ValueList{{1, 2}, {11, 12}}, - wantErr: fetchFailure, - }, - { - desc: "Skip over a single element", - alreadyConsumed: 1, - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - want: []ValueList{{11, 12}, {101, 102}, {111, 112}}, - }, - { - desc: "Skip over an entire page", - alreadyConsumed: 2, - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - want: []ValueList{{101, 102}, {111, 112}}, - }, - { - desc: "Skip beyond start of second page", - alreadyConsumed: 3, - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - want: []ValueList{{111, 112}}, - }, - { - desc: "Skip beyond all data", - alreadyConsumed: 4, - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - // In this test case, Next will return false on its first call, - // so we won't even attempt to call Get. - want: []ValueList{}, - }, - } - - for _, tc := range testCases { - pf := &pageFetcherStub{ - fetchResponses: tc.fetchResponses, - } - it := newIterator(nil, pf) - it.offset += tc.alreadyConsumed - - values, err := consumeIterator(it) - if err != nil { - t.Fatalf("%s: %v", tc.desc, err) - } - - if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { - t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) - } - if it.Err() != tc.wantErr { - t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr) - } - } -} - -// consumeIterator reads all values from an iterator and returns them. -func consumeIterator(it *Iterator) ([]ValueList, error) { - var got []ValueList - for it.Next(context.Background()) { - var vals ValueList - if err := it.Get(&vals); err != nil { - return nil, fmt.Errorf("err calling Get: %v", err) - } else { - got = append(got, vals) - } - } - - return got, nil -} - -func TestGetBeforeNext(t *testing.T) { - // TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators. - pf := &pageFetcherStub{ - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - }, - } - it := newIterator(nil, pf) - var vals ValueList - if err := it.Get(&vals); err == nil { - t.Errorf("Expected error calling Get before Next") - } -} - -type delayedPageFetcher struct { - pageFetcherStub - delayCount int -} - -func (pf *delayedPageFetcher) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { - if pf.delayCount > 0 { - pf.delayCount-- - return nil, errIncompleteJob - } - return pf.pageFetcherStub.fetch(ctx, c, token) -} - -func TestIterateIncompleteJob(t *testing.T) { - want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}} - pf := pageFetcherStub{ - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "a", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - "a": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{101, 102}, {111, 112}}, - }, - }, - }, - } - dpf := &delayedPageFetcher{ - pageFetcherStub: pf, - delayCount: 1, - } - it := newIterator(nil, dpf) - - values, err := consumeIterator(it) - if err != nil { - t.Fatal(err) - } - - if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) { - t.Errorf("values: got:\n%v\nwant:\n%v", values, want) - } - if it.Err() != nil { - t.Fatalf("iterator.Err: got:\n%v", it.Err()) - } - if dpf.delayCount != 0 { - t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount) - } -} - -func TestGetDuringErrorState(t *testing.T) { - pf := &pageFetcherStub{ - fetchResponses: map[string]fetchResponse{ - "": {err: errors.New("bang")}, - }, - } - it := newIterator(nil, pf) - var vals ValueList - it.Next(context.Background()) - if it.Err() == nil { - t.Errorf("Expected error after calling Next") - } - if err := it.Get(&vals); err == nil { - t.Errorf("Expected error calling Get when iterator has a non-nil error.") - } -} - -func TestGetAfterFinished(t *testing.T) { - testCases := []struct { - alreadyConsumed int64 // amount to advance offset before commencing reading. - fetchResponses map[string]fetchResponse - want []ValueList - }{ - { - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - }, - want: []ValueList{{1, 2}, {11, 12}}, - }, - { - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{}, - }, - }, - }, - want: []ValueList{}, - }, - { - alreadyConsumed: 100, - fetchResponses: map[string]fetchResponse{ - "": { - result: &readDataResult{ - pageToken: "", - rows: [][]Value{{1, 2}, {11, 12}}, - }, - }, - }, - want: []ValueList{}, - }, - } - - for _, tc := range testCases { - pf := &pageFetcherStub{ - fetchResponses: tc.fetchResponses, - } - it := newIterator(nil, pf) - it.offset += tc.alreadyConsumed - - values, err := consumeIterator(it) - if err != nil { - t.Fatal(err) - } - - if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { - t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) - } - if it.Err() != nil { - t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err()) - } - // Try calling Get again. - var vals ValueList - if err := it.Get(&vals); err == nil { - t.Errorf("Expected error calling Get when there are no more values") - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go deleted file mode 100644 index 832f0e9cb2..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -// A Job represents an operation which has been submitted to BigQuery for processing. -type Job struct { - service service - projectID string - jobID string - - isQuery bool -} - -// JobFromID creates a Job which refers to an existing BigQuery job. The job -// need not have been created by this package. For example, the job may have -// been created in the BigQuery console. -func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { - jobType, err := c.service.getJobType(ctx, c.projectID, id) - if err != nil { - return nil, err - } - - return &Job{ - service: c.service, - projectID: c.projectID, - jobID: id, - isQuery: jobType == queryJobType, - }, nil -} - -func (j *Job) ID() string { - return j.jobID -} - -// State is one of a sequence of states that a Job progresses through as it is processed. -type State int - -const ( - Pending State = iota - Running - Done -) - -// JobStatus contains the current State of a job, and errors encountered while processing that job. -type JobStatus struct { - State State - - err error - - // All errors encountered during the running of the job. - // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. - Errors []*Error -} - -// jobOption is an Option which modifies a bq.Job proto. -// This is used for configuring values that apply to all operations, such as setting a jobReference. -type jobOption interface { - customizeJob(job *bq.Job, projectID string) -} - -type jobID string - -// JobID returns an Option that sets the job ID of a BigQuery job. -// If this Option is not used, a job ID is generated automatically. -func JobID(ID string) Option { - return jobID(ID) -} - -func (opt jobID) implementsOption() {} - -func (opt jobID) customizeJob(job *bq.Job, projectID string) { - job.JobReference = &bq.JobReference{ - JobId: string(opt), - ProjectId: projectID, - } -} - -// Done reports whether the job has completed. -// After Done returns true, the Err method will return an error if the job completed unsuccesfully. -func (s *JobStatus) Done() bool { - return s.State == Done -} - -// Err returns the error that caused the job to complete unsuccesfully (if any). -func (s *JobStatus) Err() error { - return s.err -} - -// Status returns the current status of the job. It fails if the Status could not be determined. -func (j *Job) Status(ctx context.Context) (*JobStatus, error) { - return j.service.jobStatus(ctx, j.projectID, j.jobID) -} - -func (j *Job) implementsReadSource() {} - -func (j *Job) customizeReadQuery(cursor *readQueryConf) error { - // There are mulitple kinds of jobs, but only a query job is suitable for reading. - if !j.isQuery { - return errors.New("Cannot read from a non-query job") - } - - cursor.projectID = j.projectID - cursor.jobID = j.jobID - return nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go deleted file mode 100644 index 46ec88cdc2..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -type loadOption interface { - customizeLoad(conf *bq.JobConfigurationLoad, projectID string) -} - -// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table. -// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table. -// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup. -// schema must not be nil. -func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} } - -type destSchema struct { - Schema -} - -func (opt destSchema) implementsOption() {} - -func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.Schema = opt.asTableSchema() -} - -// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored. -// If this maximum is exceeded, the operation will be unsuccessful. -func MaxBadRecords(n int64) Option { return maxBadRecords(n) } - -type maxBadRecords int64 - -func (opt maxBadRecords) implementsOption() {} - -func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.MaxBadRecords = int64(opt) -} - -// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls. -func AllowJaggedRows() Option { return allowJaggedRows{} } - -type allowJaggedRows struct{} - -func (opt allowJaggedRows) implementsOption() {} - -func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.AllowJaggedRows = true -} - -// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data. -func AllowQuotedNewlines() Option { return allowQuotedNewlines{} } - -type allowQuotedNewlines struct{} - -func (opt allowQuotedNewlines) implementsOption() {} - -func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.AllowQuotedNewlines = true -} - -// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated. -// Unknown values are ignored. For CSV this ignores extra values at the end of a line. -// For JSON this ignores named values that do not match any column name. -// If this Option is not used, records containing unknown values are treated as bad records. -// The MaxBadRecords Option can be used to customize how bad records are handled. -func IgnoreUnknownValues() Option { return ignoreUnknownValues{} } - -type ignoreUnknownValues struct{} - -func (opt ignoreUnknownValues) implementsOption() {} - -func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.IgnoreUnknownValues = true -} - -func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) { - job, options := initJobProto(c.projectID, options) - payload := &bq.JobConfigurationLoad{} - - dst.customizeLoadDst(payload, c.projectID) - src.customizeLoadSrc(payload, c.projectID) - - for _, opt := range options { - o, ok := opt.(loadOption) - if !ok { - return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) - } - o.customizeLoad(payload, c.projectID) - } - - job.Configuration = &bq.JobConfiguration{ - Load: payload, - } - return c.service.insertJob(ctx, job, c.projectID) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go deleted file mode 100644 index df6cec0cf6..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "reflect" - "testing" - - "golang.org/x/net/context" - - bq "google.golang.org/api/bigquery/v2" -) - -func defaultLoadJob() *bq.Job { - return &bq.Job{ - Configuration: &bq.JobConfiguration{ - Load: &bq.JobConfigurationLoad{ - DestinationTable: &bq.TableReference{ - ProjectId: "project-id", - DatasetId: "dataset-id", - TableId: "table-id", - }, - SourceUris: []string{"uri"}, - }, - }, - } -} - -func stringFieldSchema() *FieldSchema { - return &FieldSchema{Name: "fieldname", Type: StringFieldType} -} - -func nestedFieldSchema() *FieldSchema { - return &FieldSchema{ - Name: "nested", - Type: RecordFieldType, - Schema: Schema{stringFieldSchema()}, - } -} - -func bqStringFieldSchema() *bq.TableFieldSchema { - return &bq.TableFieldSchema{ - Name: "fieldname", - Type: "STRING", - } -} - -func bqNestedFieldSchema() *bq.TableFieldSchema { - return &bq.TableFieldSchema{ - Name: "nested", - Type: "RECORD", - Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, - } -} - -func TestLoad(t *testing.T) { - testCases := []struct { - dst *Table - src *GCSReference - options []Option - want *bq.Job - }{ - { - dst: defaultTable, - src: defaultGCS, - want: defaultLoadJob(), - }, - { - dst: defaultTable, - src: defaultGCS, - options: []Option{ - MaxBadRecords(1), - AllowJaggedRows(), - AllowQuotedNewlines(), - IgnoreUnknownValues(), - }, - want: func() *bq.Job { - j := defaultLoadJob() - j.Configuration.Load.MaxBadRecords = 1 - j.Configuration.Load.AllowJaggedRows = true - j.Configuration.Load.AllowQuotedNewlines = true - j.Configuration.Load.IgnoreUnknownValues = true - return j - }(), - }, - { - dst: &Table{ - ProjectID: "project-id", - DatasetID: "dataset-id", - TableID: "table-id", - }, - options: []Option{CreateNever, WriteTruncate}, - src: defaultGCS, - want: func() *bq.Job { - j := defaultLoadJob() - j.Configuration.Load.CreateDisposition = "CREATE_NEVER" - j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" - return j - }(), - }, - { - dst: &Table{ - ProjectID: "project-id", - DatasetID: "dataset-id", - TableID: "table-id", - }, - src: defaultGCS, - options: []Option{ - DestinationSchema(Schema{ - stringFieldSchema(), - nestedFieldSchema(), - }), - }, - want: func() *bq.Job { - j := defaultLoadJob() - j.Configuration.Load.Schema = &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqStringFieldSchema(), - bqNestedFieldSchema(), - }} - return j - }(), - }, - { - dst: defaultTable, - src: &GCSReference{ - uris: []string{"uri"}, - SkipLeadingRows: 1, - SourceFormat: JSON, - Encoding: UTF_8, - FieldDelimiter: "\t", - Quote: "-", - }, - want: func() *bq.Job { - j := defaultLoadJob() - j.Configuration.Load.SkipLeadingRows = 1 - j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" - j.Configuration.Load.Encoding = "UTF-8" - j.Configuration.Load.FieldDelimiter = "\t" - hyphen := "-" - j.Configuration.Load.Quote = &hyphen - return j - }(), - }, - { - dst: defaultTable, - src: &GCSReference{ - uris: []string{"uri"}, - Quote: "", - }, - want: func() *bq.Job { - j := defaultLoadJob() - j.Configuration.Load.Quote = nil - return j - }(), - }, - { - dst: defaultTable, - src: &GCSReference{ - uris: []string{"uri"}, - Quote: "", - ForceZeroQuote: true, - }, - want: func() *bq.Job { - j := defaultLoadJob() - empty := "" - j.Configuration.Load.Quote = &empty - return j - }(), - }, - } - - for _, tc := range testCases { - s := &testService{} - c := &Client{ - service: s, - } - if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { - t.Errorf("err calling load: %v", err) - continue - } - if !reflect.DeepEqual(s.Job, tc.want) { - t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go deleted file mode 100644 index 99807a73ec..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import bq "google.golang.org/api/bigquery/v2" - -// Query represents a query to be executed. -type Query struct { - // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. - Q string - - // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. - // If DefaultProjectID is set, DefaultDatasetID must also be set. - DefaultProjectID string - DefaultDatasetID string -} - -func (q *Query) implementsSource() {} - -func (q *Query) implementsReadSource() {} - -func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery, projectID string) { - conf.Query = q.Q - if q.DefaultProjectID != "" || q.DefaultDatasetID != "" { - conf.DefaultDataset = &bq.DatasetReference{ - DatasetId: q.DefaultDatasetID, - ProjectId: q.DefaultProjectID, - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go deleted file mode 100644 index af62b5b3bf..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -type queryOption interface { - customizeQuery(conf *bq.JobConfigurationQuery, projectID string) -} - -// DisableQueryCache returns an Option that prevents results being fetched from the query cache. -// If this Option is not used, results are fetched from the cache if they are available. -// The query cache is a best-effort cache that is flushed whenever tables in the query are modified. -// Cached results are only available when TableID is unspecified in the query's destination Table. -// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching -func DisableQueryCache() Option { return disableQueryCache{} } - -type disableQueryCache struct{} - -func (opt disableQueryCache) implementsOption() {} - -func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { - f := false - conf.UseQueryCache = &f -} - -// JobPriority returns an Option that causes a query to be scheduled with the specified priority. -// The default priority is InteractivePriority. -// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries -func JobPriority(priority string) Option { return jobPriority(priority) } - -type jobPriority string - -func (opt jobPriority) implementsOption() {} - -func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { - conf.Priority = string(opt) -} - -const ( - BatchPriority = "BATCH" - InteractivePriority = "INTERACTIVE" -) - -// TODO(mcgreevy): support large results. -// TODO(mcgreevy): support non-flattened results. - -func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) { - job, options := initJobProto(c.projectID, options) - payload := &bq.JobConfigurationQuery{} - - dst.customizeQueryDst(payload, c.projectID) - src.customizeQuerySrc(payload, c.projectID) - - for _, opt := range options { - o, ok := opt.(queryOption) - if !ok { - return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src) - } - o.customizeQuery(payload, c.projectID) - } - - job.Configuration = &bq.JobConfiguration{ - Query: payload, - } - j, err := c.service.insertJob(ctx, job, c.projectID) - if err != nil { - return nil, err - } - j.isQuery = true - return j, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go deleted file mode 100644 index 119d01b2b0..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "reflect" - "testing" - - "golang.org/x/net/context" - - bq "google.golang.org/api/bigquery/v2" -) - -func defaultQueryJob() *bq.Job { - return &bq.Job{ - Configuration: &bq.JobConfiguration{ - Query: &bq.JobConfigurationQuery{ - DestinationTable: &bq.TableReference{ - ProjectId: "project-id", - DatasetId: "dataset-id", - TableId: "table-id", - }, - Query: "query string", - DefaultDataset: &bq.DatasetReference{ - ProjectId: "def-project-id", - DatasetId: "def-dataset-id", - }, - }, - }, - } -} - -func TestQuery(t *testing.T) { - testCases := []struct { - dst *Table - src *Query - options []Option - want *bq.Job - }{ - { - dst: defaultTable, - src: defaultQuery, - want: defaultQueryJob(), - }, - { - dst: defaultTable, - src: &Query{ - Q: "query string", - }, - want: func() *bq.Job { - j := defaultQueryJob() - j.Configuration.Query.DefaultDataset = nil - return j - }(), - }, - { - dst: &Table{}, - src: defaultQuery, - want: func() *bq.Job { - j := defaultQueryJob() - j.Configuration.Query.DestinationTable = nil - return j - }(), - }, - { - dst: &Table{ - ProjectID: "project-id", - DatasetID: "dataset-id", - TableID: "table-id", - }, - src: defaultQuery, - options: []Option{CreateNever, WriteTruncate}, - want: func() *bq.Job { - j := defaultQueryJob() - j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" - j.Configuration.Query.CreateDisposition = "CREATE_NEVER" - return j - }(), - }, - { - dst: defaultTable, - src: defaultQuery, - options: []Option{DisableQueryCache()}, - want: func() *bq.Job { - j := defaultQueryJob() - f := false - j.Configuration.Query.UseQueryCache = &f - return j - }(), - }, - } - - for _, tc := range testCases { - s := &testService{} - c := &Client{ - service: s, - } - if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil { - t.Errorf("err calling query: %v", err) - continue - } - if !reflect.DeepEqual(s.Job, tc.want) { - t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go deleted file mode 100644 index 0159b12a22..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import "golang.org/x/net/context" - -// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery. -func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) } - -type recordsPerRequest int64 - -func (opt recordsPerRequest) customizeRead(conf *pagingConf) { - conf.recordsPerRequest = int64(opt) - conf.setRecordsPerRequest = true -} - -// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from. -func StartIndex(i uint64) ReadOption { return startIndex(i) } - -type startIndex uint64 - -func (opt startIndex) customizeRead(conf *pagingConf) { - conf.startIndex = uint64(opt) -} - -func (conf *readTableConf) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { - return c.service.readTabledata(ctx, conf, token) -} - -func (c *Client) readTable(t *Table, options []ReadOption) (*Iterator, error) { - conf := &readTableConf{} - t.customizeReadSrc(conf) - - for _, o := range options { - o.customizeRead(&conf.paging) - } - - return newIterator(c, conf), nil -} - -func (conf *readQueryConf) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) { - return c.service.readQuery(ctx, conf, token) -} - -func (c *Client) readQueryResults(job *Job, options []ReadOption) (*Iterator, error) { - conf := &readQueryConf{} - if err := job.customizeReadQuery(conf); err != nil { - return nil, err - } - - for _, o := range options { - o.customizeRead(&conf.paging) - } - - return newIterator(c, conf), nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go deleted file mode 100644 index a03bd3f455..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - "reflect" - "testing" - - "golang.org/x/net/context" -) - -type readTabledataArgs struct { - conf *readTableConf - tok string -} - -type readQueryArgs struct { - conf *readQueryConf - tok string -} - -// readServiceStub services read requests by returning data from an in-memory list of values. -type readServiceStub struct { - // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. - values [][][]Value // contains pages / rows / columns. - pageTokens map[string]string // maps incoming page token to returned page token. - - // arguments are recorded for later inspection. - readTabledataCalls []readTabledataArgs - readQueryCalls []readQueryArgs - - service -} - -func (s *readServiceStub) readValues(tok string) *readDataResult { - result := &readDataResult{ - pageToken: s.pageTokens[tok], - rows: s.values[0], - } - s.values = s.values[1:] - - return result -} -func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { - s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token}) - return s.readValues(token), nil -} - -func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { - s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token}) - return s.readValues(token), nil -} - -func TestRead(t *testing.T) { - // The data for the service stub to return is populated for each test case in the testCases for loop. - service := &readServiceStub{} - c := &Client{ - service: service, - } - - queryJob := &Job{ - projectID: "project-id", - jobID: "job-id", - service: service, - isQuery: true, - } - - for _, src := range []ReadSource{defaultTable, queryJob} { - testCases := []struct { - data [][][]Value - pageTokens map[string]string - want []ValueList - }{ - { - data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, - pageTokens: map[string]string{"": "a", "a": ""}, - want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, - }, - { - data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, - pageTokens: map[string]string{"": ""}, // no more pages after first one. - want: []ValueList{{1, 2}, {11, 12}}, - }, - } - - for _, tc := range testCases { - service.values = tc.data - service.pageTokens = tc.pageTokens - if got, ok := doRead(t, c, src); ok { - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) - } - } - } - } -} - -// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned. -func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) { - it, err := c.Read(context.Background(), src) - if err != nil { - t.Errorf("err calling Read: %v", err) - return nil, false - } - var got []ValueList - for it.Next(context.Background()) { - var vals ValueList - if err := it.Get(&vals); err != nil { - t.Errorf("err calling Get: %v", err) - return nil, false - } else { - got = append(got, vals) - } - } - - return got, true -} - -func TestNoMoreValues(t *testing.T) { - c := &Client{ - service: &readServiceStub{ - values: [][][]Value{{{1, 2}, {11, 12}}}, - }, - } - it, err := c.Read(context.Background(), defaultTable) - if err != nil { - t.Fatalf("err calling Read: %v", err) - } - var vals ValueList - // We expect to retrieve two values and then fail on the next attempt. - if !it.Next(context.Background()) { - t.Fatalf("Next: got: false: want: true") - } - if !it.Next(context.Background()) { - t.Fatalf("Next: got: false: want: true") - } - if err := it.Get(&vals); err != nil { - t.Fatalf("Get: got: %v: want: nil", err) - } - if it.Next(context.Background()) { - t.Fatalf("Next: got: true: want: false") - } - if err := it.Get(&vals); err == nil { - t.Fatalf("Get: got: %v: want: non-nil", err) - } -} - -// delayedReadStub simulates reading results from a query that has not yet -// completed. Its readQuery method initially reports that the query job is not -// yet complete. Subsequently, it proxies the request through to another -// service stub. -type delayedReadStub struct { - numDelays int - - readServiceStub -} - -func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { - if s.numDelays > 0 { - s.numDelays-- - return nil, errIncompleteJob - } - return s.readServiceStub.readQuery(ctx, conf, token) -} - -// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete. -func TestIncompleteJob(t *testing.T) { - service := &delayedReadStub{ - numDelays: 2, - readServiceStub: readServiceStub{ - values: [][][]Value{{{1, 2}}}, - }, - } - c := &Client{service: service} - queryJob := &Job{ - projectID: "project-id", - jobID: "job-id", - service: service, - isQuery: true, - } - it, err := c.Read(context.Background(), queryJob) - if err != nil { - t.Fatalf("err calling Read: %v", err) - } - var got ValueList - want := ValueList{1, 2} - if !it.Next(context.Background()) { - t.Fatalf("Next: got: false: want: true") - } - if err := it.Get(&got); err != nil { - t.Fatalf("Error calling Get: %v", err) - } - if service.numDelays != 0 { - t.Errorf("remaining numDelays : got: %v want:0", service.numDelays) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("reading: got:\n%v\nwant:\n%v", got, want) - } -} - -type errorReadService struct { - service -} - -func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { - return nil, errors.New("bang!") -} - -func TestReadError(t *testing.T) { - // test that service read errors are propagated back to the caller. - c := &Client{service: &errorReadService{}} - it, err := c.Read(context.Background(), defaultTable) - if err != nil { - // Read should not return an error; only Err should. - t.Fatalf("err calling Read: %v", err) - } - if it.Next(context.Background()) { - t.Fatalf("Next: got: true: want: false") - } - if err := it.Err(); err.Error() != "bang!" { - t.Fatalf("Get: got: %v: want: bang!", err) - } -} - -func TestReadTabledataOptions(t *testing.T) { - // test that read options are propagated. - s := &readServiceStub{ - values: [][][]Value{{{1, 2}}}, - } - c := &Client{service: s} - it, err := c.Read(context.Background(), defaultTable, RecordsPerRequest(5)) - - if err != nil { - t.Fatalf("err calling Read: %v", err) - } - if !it.Next(context.Background()) { - t.Fatalf("Next: got: false: want: true") - } - - want := []readTabledataArgs{{ - conf: &readTableConf{ - projectID: "project-id", - datasetID: "dataset-id", - tableID: "table-id", - paging: pagingConf{ - recordsPerRequest: 5, - setRecordsPerRequest: true, - }, - }, - tok: "", - }} - - if !reflect.DeepEqual(s.readTabledataCalls, want) { - t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want) - } -} - -func TestReadQueryOptions(t *testing.T) { - // test that read options are propagated. - s := &readServiceStub{ - values: [][][]Value{{{1, 2}}}, - } - c := &Client{service: s} - - queryJob := &Job{ - projectID: "project-id", - jobID: "job-id", - service: s, - isQuery: true, - } - it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5)) - - if err != nil { - t.Fatalf("err calling Read: %v", err) - } - if !it.Next(context.Background()) { - t.Fatalf("Next: got: false: want: true") - } - - want := []readQueryArgs{{ - conf: &readQueryConf{ - projectID: "project-id", - jobID: "job-id", - paging: pagingConf{ - recordsPerRequest: 5, - setRecordsPerRequest: true, - }, - }, - tok: "", - }} - - if !reflect.DeepEqual(s.readQueryCalls, want) { - t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go deleted file mode 100644 index abc159a53c..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import bq "google.golang.org/api/bigquery/v2" - -// Schema describes the fields in a table or query result. -type Schema []*FieldSchema - -// TODO(mcgreevy): add a function to generate a schema from a struct. - -type FieldSchema struct { - // The field name. - // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), - // and must start with a letter or underscore. - // The maximum length is 128 characters. - Name string - - // A description of the field. The maximum length is 16,384 characters. - Description string - - // Whether the field may contain multiple values. - Repeated bool - // Whether the field is required. Ignored if Repeated is true. - Required bool - - // The field data type. If Type is Record, then this field contains a nested schema, - // which is described by Schema. - Type FieldType - // Describes the nested schema if Type is set to Record. - Schema Schema -} - -func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { - tfs := &bq.TableFieldSchema{ - Description: fs.Description, - Name: fs.Name, - Type: string(fs.Type), - } - - if fs.Repeated { - tfs.Mode = "REPEATED" - } else if fs.Required { - tfs.Mode = "REQUIRED" - } // else leave as default, which is interpreted as NULLABLE. - - for _, f := range fs.Schema { - tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) - } - - return tfs -} - -func (s Schema) asTableSchema() *bq.TableSchema { - var fields []*bq.TableFieldSchema - for _, f := range s { - fields = append(fields, f.asTableFieldSchema()) - } - return &bq.TableSchema{Fields: fields} -} - -func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { - fs := &FieldSchema{ - Description: tfs.Description, - Name: tfs.Name, - Repeated: tfs.Mode == "REPEATED", - Required: tfs.Mode == "REQUIRED", - Type: FieldType(tfs.Type), - } - - for _, f := range tfs.Fields { - fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) - } - return fs -} - -func convertTableSchema(ts *bq.TableSchema) Schema { - var s Schema - for _, f := range ts.Fields { - s = append(s, convertTableFieldSchema(f)) - } - return s -} - -type FieldType string - -const ( - StringFieldType FieldType = "STRING" - IntegerFieldType FieldType = "INTEGER" - FloatFieldType FieldType = "FLOAT" - BooleanFieldType FieldType = "BOOLEAN" - TimestampFieldType FieldType = "TIMESTAMP" - RecordFieldType FieldType = "RECORD" -) diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go deleted file mode 100644 index 4ff147fe0b..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "reflect" - "testing" - - bq "google.golang.org/api/bigquery/v2" -) - -func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { - return &bq.TableFieldSchema{ - Description: desc, - Name: name, - Mode: mode, - Type: typ, - } -} - -func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { - return &FieldSchema{ - Description: desc, - Name: name, - Repeated: repeated, - Required: required, - Type: FieldType(typ), - } -} - -func TestSchemaConversion(t *testing.T) { - testCases := []struct { - schema Schema - bqSchema *bq.TableSchema - }{ - { - // required - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "STRING", false, true), - }, - }, - { - // repeated - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "STRING", true, false), - }, - }, - { - // nullable, string - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "STRING", ""), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "STRING", false, false), - }, - }, - { - // integer - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "INTEGER", ""), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "INTEGER", false, false), - }, - }, - { - // float - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "FLOAT", ""), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "FLOAT", false, false), - }, - }, - { - // boolean - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "BOOLEAN", ""), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "BOOLEAN", false, false), - }, - }, - { - // timestamp - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), - }, - }, - schema: Schema{ - fieldSchema("desc", "name", "TIMESTAMP", false, false), - }, - }, - { - // nested - bqSchema: &bq.TableSchema{ - Fields: []*bq.TableFieldSchema{ - &bq.TableFieldSchema{ - Description: "An outer schema wrapping a nested schema", - Name: "outer", - Mode: "REQUIRED", - Type: "RECORD", - Fields: []*bq.TableFieldSchema{ - bqTableFieldSchema("inner field", "inner", "STRING", ""), - }, - }, - }, - }, - schema: Schema{ - &FieldSchema{ - Description: "An outer schema wrapping a nested schema", - Name: "outer", - Required: true, - Type: "RECORD", - Schema: []*FieldSchema{ - &FieldSchema{ - Description: "inner field", - Name: "inner", - Type: "STRING", - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - bqSchema := tc.schema.asTableSchema() - if !reflect.DeepEqual(bqSchema, tc.bqSchema) { - t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema) - } - schema := convertTableSchema(tc.bqSchema) - if !reflect.DeepEqual(schema, tc.schema) { - t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go deleted file mode 100644 index b57f84e82d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - "fmt" - "net/http" - "sync" - "time" - - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -// service provides an internal abstraction to isolate the generated -// BigQuery API; most of this package uses this interface instead. -// The single implementation, *bigqueryService, contains all the knowledge -// of the generated BigQuery API. -type service interface { - // Jobs - insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error) - getJobType(ctx context.Context, projectId, jobID string) (jobType, error) - jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error) - - // Queries - - // readQuery reads data resulting from a query job. If the job is not - // yet complete, an errIncompleteJob is returned. readQuery may be - // called repeatedly to wait for results indefinitely. - readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) - - readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) - - // Tables - createTable(ctx context.Context, conf *createTableConf) error - getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) - deleteTable(ctx context.Context, projectID, datasetID, tableID string) error - listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) - patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) -} - -type bigqueryService struct { - s *bq.Service -} - -func newBigqueryService(client *http.Client) (*bigqueryService, error) { - s, err := bq.New(client) - if err != nil { - return nil, fmt.Errorf("constructing bigquery client: %v", err) - } - - return &bigqueryService{s: s}, nil -} - -// getPages calls the supplied getPage function repeatedly until there are no pages left to get. -// token is the token of the initial page to start from. Use an empty string to start from the beginning. -func getPages(token string, getPage func(token string) (nextToken string, err error)) error { - for { - var err error - token, err = getPage(token) - if err != nil { - return err - } - if token == "" { - return nil - } - } -} - -func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { - res, err := s.s.Jobs.Insert(projectID, job).Context(ctx).Do() - if err != nil { - return nil, err - } - return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil -} - -type pagingConf struct { - recordsPerRequest int64 - setRecordsPerRequest bool - - startIndex uint64 -} - -type readTableConf struct { - projectID, datasetID, tableID string - paging pagingConf - schema Schema // lazily initialized when the first page of data is fetched. -} - -type readDataResult struct { - pageToken string - rows [][]Value - totalRows uint64 - schema Schema -} - -type readQueryConf struct { - projectID, jobID string - paging pagingConf -} - -func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) { - // Prepare request to fetch one page of table data. - req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID) - - if pageToken != "" { - req.PageToken(pageToken) - } else { - req.StartIndex(conf.paging.startIndex) - } - - if conf.paging.setRecordsPerRequest { - req.MaxResults(conf.paging.recordsPerRequest) - } - - // Fetch the table schema in the background, if necessary. - var schemaErr error - var schemaFetch sync.WaitGroup - if conf.schema == nil { - schemaFetch.Add(1) - go func() { - defer schemaFetch.Done() - var t *bq.Table - t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID). - Fields("schema"). - Context(ctx). - Do() - if schemaErr == nil && t.Schema != nil { - conf.schema = convertTableSchema(t.Schema) - } - }() - } - - res, err := req.Context(ctx).Do() - if err != nil { - return nil, err - } - - schemaFetch.Wait() - if schemaErr != nil { - return nil, schemaErr - } - - result := &readDataResult{ - pageToken: res.PageToken, - totalRows: uint64(res.TotalRows), - schema: conf.schema, - } - result.rows, err = convertRows(res.Rows, conf.schema) - if err != nil { - return nil, err - } - return result, nil -} - -var errIncompleteJob = errors.New("internal error: query results not available because job is not complete") - -// getQueryResultsTimeout controls the maximum duration of a request to the -// BigQuery GetQueryResults endpoint. Setting a long timeout here does not -// cause increased overall latency, as results are returned as soon as they are -// available. -const getQueryResultsTimeout = time.Minute - -func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) { - req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID). - TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6) - - if pageToken != "" { - req.PageToken(pageToken) - } else { - req.StartIndex(conf.paging.startIndex) - } - - if conf.paging.setRecordsPerRequest { - req.MaxResults(conf.paging.recordsPerRequest) - } - - res, err := req.Context(ctx).Do() - if err != nil { - return nil, err - } - - if !res.JobComplete { - return nil, errIncompleteJob - } - schema := convertTableSchema(res.Schema) - result := &readDataResult{ - pageToken: res.PageToken, - totalRows: res.TotalRows, - schema: schema, - } - result.rows, err = convertRows(res.Rows, schema) - if err != nil { - return nil, err - } - return result, nil -} - -type jobType int - -const ( - copyJobType jobType = iota - extractJobType - loadJobType - queryJobType -) - -func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) { - res, err := s.s.Jobs.Get(projectID, jobID). - Fields("configuration"). - Context(ctx). - Do() - - if err != nil { - return 0, err - } - - switch { - case res.Configuration.Copy != nil: - return copyJobType, nil - case res.Configuration.Extract != nil: - return extractJobType, nil - case res.Configuration.Load != nil: - return loadJobType, nil - case res.Configuration.Query != nil: - return queryJobType, nil - default: - return 0, errors.New("unknown job type") - } -} - -func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { - res, err := s.s.Jobs.Get(projectID, jobID). - Fields("status"). // Only fetch what we need. - Context(ctx). - Do() - if err != nil { - return nil, err - } - return jobStatusFromProto(res.Status) -} - -var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} - -func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) { - state, ok := stateMap[status.State] - if !ok { - return nil, fmt.Errorf("unexpected job state: %v", status.State) - } - - newStatus := &JobStatus{ - State: state, - err: nil, - } - if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil { - newStatus.err = err - } - - for _, ep := range status.Errors { - newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep)) - } - return newStatus, nil -} - -// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset. -func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) { - var tables []*Table - res, err := s.s.Tables.List(projectID, datasetID). - PageToken(pageToken). - Context(ctx). - Do() - if err != nil { - return nil, "", err - } - for _, t := range res.Tables { - tables = append(tables, convertListedTable(t)) - } - return tables, res.NextPageToken, nil -} - -type createTableConf struct { - projectID, datasetID, tableID string - expiration time.Time - viewQuery string -} - -// createTable creates a table in the BigQuery service. -// expiration is an optional time after which the table will be deleted and its storage reclaimed. -// If viewQuery is non-empty, the created table will be of type VIEW. -// Note: expiration can only be set during table creation. -// Note: after table creation, a view can be modified only if its table was initially created with a view. -func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error { - table := &bq.Table{ - TableReference: &bq.TableReference{ - ProjectId: conf.projectID, - DatasetId: conf.datasetID, - TableId: conf.tableID, - }, - } - if !conf.expiration.IsZero() { - table.ExpirationTime = conf.expiration.UnixNano() / 1000 - } - if conf.viewQuery != "" { - table.View = &bq.ViewDefinition{ - Query: conf.viewQuery, - } - } - - _, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do() - return err -} - -func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) { - table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do() - if err != nil { - return nil, err - } - return bqTableToMetadata(table), nil -} - -func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error { - return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do() -} - -func bqTableToMetadata(t *bq.Table) *TableMetadata { - md := &TableMetadata{ - Description: t.Description, - Name: t.FriendlyName, - Type: TableType(t.Type), - ID: t.Id, - NumBytes: t.NumBytes, - NumRows: t.NumRows, - } - if t.ExpirationTime != 0 { - md.ExpirationTime = time.Unix(0, t.ExpirationTime*1e6) - } - if t.CreationTime != 0 { - md.CreationTime = time.Unix(0, t.CreationTime*1e6) - } - if t.LastModifiedTime != 0 { - md.LastModifiedTime = time.Unix(0, int64(t.LastModifiedTime*1e6)) - } - if t.Schema != nil { - md.Schema = convertTableSchema(t.Schema) - } - if t.View != nil { - md.View = t.View.Query - } - - return md -} - -func convertListedTable(t *bq.TableListTables) *Table { - return &Table{ - ProjectID: t.TableReference.ProjectId, - DatasetID: t.TableReference.DatasetId, - TableID: t.TableReference.TableId, - } -} - -// patchTableConf contains fields to be patched. -type patchTableConf struct { - // These fields are omitted from the patch operation if nil. - Description *string - Name *string -} - -func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) { - t := &bq.Table{} - forceSend := func(field string) { - t.ForceSendFields = append(t.ForceSendFields, field) - } - - if conf.Description != nil { - t.Description = *conf.Description - forceSend("Description") - } - if conf.Name != nil { - t.FriendlyName = *conf.Name - forceSend("FriendlyName") - } - table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t). - Context(ctx). - Do() - if err != nil { - return nil, err - } - return bqTableToMetadata(table), nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go deleted file mode 100644 index 5ac54873fe..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - "time" - - "golang.org/x/net/context" - - bq "google.golang.org/api/bigquery/v2" -) - -// A Table is a reference to a BigQuery table. -type Table struct { - // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. - // In this case the result will be stored in an ephemeral table. - ProjectID string - DatasetID string - // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). - // The maximum length is 1,024 characters. - TableID string - - service service -} - -// TableMetadata contains information about a BigQuery table. -type TableMetadata struct { - Description string // The user-friendly description of this table. - Name string // The user-friendly name for this table. - Schema Schema - View string - - ID string // An opaque ID uniquely identifying the table. - Type TableType - - // The time when this table expires. If not set, the table will persist - // indefinitely. Expired tables will be deleted and their storage reclaimed. - ExpirationTime time.Time - - CreationTime time.Time - LastModifiedTime time.Time - - // The size of the table in bytes. - // This does not include data that is being buffered during a streaming insert. - NumBytes int64 - - // The number of rows of data in this table. - // This does not include data that is being buffered during a streaming insert. - NumRows uint64 -} - -// Tables is a group of tables. The tables may belong to differing projects or datasets. -type Tables []*Table - -// CreateDisposition specifies the circumstances under which destination table will be created. -// Default is CreateIfNeeded. -type TableCreateDisposition string - -const ( - // The table will be created if it does not already exist. Tables are created atomically on successful completion of a job. - CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" - - // The table must already exist and will not be automatically created. - CreateNever TableCreateDisposition = "CREATE_NEVER" -) - -func CreateDisposition(disp TableCreateDisposition) Option { return disp } - -func (opt TableCreateDisposition) implementsOption() {} - -func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.CreateDisposition = string(opt) -} - -func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) { - conf.CreateDisposition = string(opt) -} - -func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { - conf.CreateDisposition = string(opt) -} - -// TableWriteDisposition specifies how existing data in a destination table is treated. -// Default is WriteAppend. -type TableWriteDisposition string - -const ( - // Data will be appended to any existing data in the destination table. - // Data is appended atomically on successful completion of a job. - WriteAppend TableWriteDisposition = "WRITE_APPEND" - - // Existing data in the destination table will be overwritten. - // Data is overwritten atomically on successful completion of a job. - WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" - - // Writes will fail if the destination table already contains data. - WriteEmpty TableWriteDisposition = "WRITE_EMPTY" -) - -func WriteDisposition(disp TableWriteDisposition) Option { return disp } - -func (opt TableWriteDisposition) implementsOption() {} - -func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) { - conf.WriteDisposition = string(opt) -} - -func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) { - conf.WriteDisposition = string(opt) -} - -func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) { - conf.WriteDisposition = string(opt) -} - -// TableType is the type of table. -type TableType string - -const ( - RegularTable TableType = "TABLE" - ViewTable TableType = "VIEW" -) - -func (t *Table) implementsSource() {} -func (t *Table) implementsReadSource() {} -func (t *Table) implementsDestination() {} -func (ts Tables) implementsSource() {} - -func (t *Table) tableRefProto() *bq.TableReference { - return &bq.TableReference{ - ProjectId: t.ProjectID, - DatasetId: t.DatasetID, - TableId: t.TableID, - } -} - -// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. -func (t *Table) FullyQualifiedName() string { - return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) -} - -// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. -func (t *Table) implicitTable() bool { - return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" -} - -func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad, projectID string) { - conf.DestinationTable = t.tableRefProto() -} - -func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract, projectID string) { - conf.SourceTable = t.tableRefProto() -} - -func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy, projectID string) { - conf.DestinationTable = t.tableRefProto() -} - -func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy, projectID string) { - for _, t := range ts { - conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) - } -} - -func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) { - if !t.implicitTable() { - conf.DestinationTable = t.tableRefProto() - } -} - -func (t *Table) customizeReadSrc(cursor *readTableConf) { - cursor.projectID = t.ProjectID - cursor.datasetID = t.DatasetID - cursor.tableID = t.TableID -} - -// OpenTable creates a handle to an existing BigQuery table. If the table does not already exist, subsequent uses of the *Table will fail. -func (c *Client) OpenTable(projectID, datasetID, tableID string) *Table { - return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service} -} - -// CreateTable creates a table in the BigQuery service and returns a handle to it. -func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, options ...CreateTableOption) (*Table, error) { - conf := &createTableConf{ - projectID: projectID, - datasetID: datasetID, - tableID: tableID, - } - for _, o := range options { - o.customizeCreateTable(conf) - } - if err := c.service.createTable(ctx, conf); err != nil { - return nil, err - } - return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service}, nil -} - -// Metadata fetches the metadata for the table. -func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { - return t.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID) -} - -// Delete deletes the table. -func (t *Table) Delete(ctx context.Context) error { - return t.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID) -} - -// A CreateTableOption is an optional argument to CreateTable. -type CreateTableOption interface { - customizeCreateTable(*createTableConf) -} - -type tableExpiration time.Time - -// TableExpiration returns a CreateTableOption which will cause the created table to be deleted after the expiration time. -func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) } - -func (opt tableExpiration) customizeCreateTable(conf *createTableConf) { - conf.expiration = time.Time(opt) -} - -type viewQuery string - -// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query. -// For more information see: https://cloud.google.com/bigquery/querying-data#views -func ViewQuery(query string) CreateTableOption { return viewQuery(query) } - -func (opt viewQuery) customizeCreateTable(conf *createTableConf) { - conf.viewQuery = string(opt) -} - -// TableMetadataPatch represents a set of changes to a table's metadata. -type TableMetadataPatch struct { - s service - projectID, datasetID, tableID string - conf patchTableConf -} - -// Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields. -// In order to apply the changes, the TableMetadataPatch's Apply method must be called. -func (t *Table) Patch() *TableMetadataPatch { - return &TableMetadataPatch{ - s: t.service, - projectID: t.ProjectID, - datasetID: t.DatasetID, - tableID: t.TableID, - } -} - -// Description sets the table description. -func (p *TableMetadataPatch) Description(desc string) { - p.conf.Description = &desc -} - -// Name sets the table name. -func (p *TableMetadataPatch) Name(name string) { - p.conf.Name = &name -} - -// TODO(mcgreevy): support patching the schema. - -// Apply applies the patch operation. -func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error) { - return p.s.patchTable(ctx, p.projectID, p.datasetID, p.tableID, &p.conf) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go deleted file mode 100644 index 6d441cb94e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "golang.org/x/net/context" - bq "google.golang.org/api/bigquery/v2" -) - -var defaultTable = &Table{ - ProjectID: "project-id", - DatasetID: "dataset-id", - TableID: "table-id", -} - -var defaultGCS = &GCSReference{ - uris: []string{"uri"}, -} - -var defaultQuery = &Query{ - Q: "query string", - DefaultProjectID: "def-project-id", - DefaultDatasetID: "def-dataset-id", -} - -type testService struct { - *bq.Job - - service -} - -func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) { - s.Job = job - return &Job{}, nil -} - -func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { - return &JobStatus{State: Done}, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go deleted file mode 100644 index 369bcd46cd..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "errors" - "fmt" - "strconv" - "time" - - bq "google.golang.org/api/bigquery/v2" -) - -// Value stores the contents of a single cell from a BigQuery result. -type Value interface{} - -// ValueLoader stores a slice of Values representing a result row from a Read operation. -// See Iterator.Get for more information. -type ValueLoader interface { - Load(v []Value) error -} - -// ValueList converts a []Value to implement ValueLoader. -type ValueList []Value - -// Load stores a sequence of values in a ValueList. -func (vs *ValueList) Load(v []Value) error { - *vs = append(*vs, v...) - return nil -} - -// convertRows converts a series of TableRows into a series of Value slices. -// schema is used to interpret the data from rows; its length must match the -// length of each row. -func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { - var rs [][]Value - for _, r := range rows { - row, err := convertRow(r, schema) - if err != nil { - return nil, err - } - rs = append(rs, row) - } - return rs, nil -} - -func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { - if len(schema) != len(r.F) { - return nil, errors.New("schema length does not match row length") - } - var values []Value - for i, cell := range r.F { - fs := schema[i] - v, err := convertValue(cell.V, fs.Type, fs.Schema) - if err != nil { - return nil, err - } - values = append(values, v) - } - return values, nil -} - -func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { - switch val := val.(type) { - case nil: - return nil, nil - case []interface{}: - return convertRepeatedRecord(val, typ, schema) - case map[string]interface{}: - return convertNestedRecord(val, schema) - case string: - return convertBasicType(val, typ) - default: - return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) - } -} - -func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { - var values []Value - for _, cell := range vals { - // each cell contains a single entry, keyed by "v" - val := cell.(map[string]interface{})["v"] - v, err := convertValue(val, typ, schema) - if err != nil { - return nil, err - } - values = append(values, v) - } - return values, nil -} - -func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { - // convertNestedRecord is similar to convertRow, as a record has the same structure as a row. - - // Nested records are wrapped in a map with a single key, "f". - record := val["f"].([]interface{}) - if len(record) != len(schema) { - return nil, errors.New("schema length does not match record length") - } - - var values []Value - for i, cell := range record { - // each cell contains a single entry, keyed by "v" - val := cell.(map[string]interface{})["v"] - - fs := schema[i] - v, err := convertValue(val, fs.Type, fs.Schema) - if err != nil { - return nil, err - } - values = append(values, v) - } - return values, nil -} - -// convertBasicType returns val as an interface with a concrete type specified by typ. -func convertBasicType(val string, typ FieldType) (Value, error) { - switch typ { - case StringFieldType: - return val, nil - case IntegerFieldType: - return strconv.Atoi(val) - case FloatFieldType: - return strconv.ParseFloat(val, 64) - case BooleanFieldType: - return strconv.ParseBool(val) - case TimestampFieldType: - f, err := strconv.ParseFloat(val, 64) - return Value(time.Unix(0, int64(f*1e9))), err - default: - return nil, errors.New("unrecognized type") - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value_test.go deleted file mode 100644 index fbd8089329..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigquery/value_test.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bigquery - -import ( - "fmt" - "reflect" - "testing" - "time" - - bq "google.golang.org/api/bigquery/v2" -) - -func TestConvertBasicValues(t *testing.T) { - schema := []*FieldSchema{ - {Type: StringFieldType}, - {Type: IntegerFieldType}, - {Type: FloatFieldType}, - {Type: BooleanFieldType}, - } - row := &bq.TableRow{ - F: []*bq.TableCell{ - {V: "a"}, - {V: "1"}, - {V: "1.2"}, - {V: "true"}, - }, - } - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - want := []Value{"a", 1, 1.2, true} - if !reflect.DeepEqual(got, want) { - t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want) - } -} - -func TestConvertTime(t *testing.T) { - schema := []*FieldSchema{ - {Type: TimestampFieldType}, - } - thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC) - row := &bq.TableRow{ - F: []*bq.TableCell{ - {V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)}, - }, - } - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - if !got[0].(time.Time).Equal(thyme) { - t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme) - } -} - -func TestConvertNullValues(t *testing.T) { - schema := []*FieldSchema{ - {Type: StringFieldType}, - } - row := &bq.TableRow{ - F: []*bq.TableCell{ - {V: nil}, - }, - } - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - want := []Value{nil} - if !reflect.DeepEqual(got, want) { - t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want) - } -} - -func TestBasicRepetition(t *testing.T) { - schema := []*FieldSchema{ - {Type: IntegerFieldType, Repeated: true}, - } - row := &bq.TableRow{ - F: []*bq.TableCell{ - { - V: []interface{}{ - map[string]interface{}{ - "v": "1", - }, - map[string]interface{}{ - "v": "2", - }, - map[string]interface{}{ - "v": "3", - }, - }, - }, - }, - } - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - want := []Value{[]Value{1, 2, 3}} - if !reflect.DeepEqual(got, want) { - t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) - } -} - -func TestNestedRecordContainingRepetition(t *testing.T) { - schema := []*FieldSchema{ - { - Type: RecordFieldType, - Schema: Schema{ - {Type: IntegerFieldType, Repeated: true}, - }, - }, - } - row := &bq.TableRow{ - F: []*bq.TableCell{ - { - V: map[string]interface{}{ - "f": []interface{}{ - map[string]interface{}{ - "v": []interface{}{ - map[string]interface{}{"v": "1"}, - map[string]interface{}{"v": "2"}, - map[string]interface{}{"v": "3"}, - }, - }, - }, - }, - }, - }, - } - - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - want := []Value{[]Value{[]Value{1, 2, 3}}} - if !reflect.DeepEqual(got, want) { - t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) - } -} - -func TestRepeatedRecordContainingRepetition(t *testing.T) { - schema := []*FieldSchema{ - { - Type: RecordFieldType, - Repeated: true, - Schema: Schema{ - {Type: IntegerFieldType, Repeated: true}, - }, - }, - } - row := &bq.TableRow{F: []*bq.TableCell{ - { - V: []interface{}{ // repeated records. - map[string]interface{}{ // first record. - "v": map[string]interface{}{ // pointless single-key-map wrapper. - "f": []interface{}{ // list of record fields. - map[string]interface{}{ // only record (repeated ints) - "v": []interface{}{ // pointless wrapper. - map[string]interface{}{ - "v": "1", - }, - map[string]interface{}{ - "v": "2", - }, - map[string]interface{}{ - "v": "3", - }, - }, - }, - }, - }, - }, - map[string]interface{}{ // second record. - "v": map[string]interface{}{ - "f": []interface{}{ - map[string]interface{}{ - "v": []interface{}{ - map[string]interface{}{ - "v": "4", - }, - map[string]interface{}{ - "v": "5", - }, - map[string]interface{}{ - "v": "6", - }, - }, - }, - }, - }, - }, - }, - }, - }} - - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. - []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. - []Value{ // the record is a list of length 1, containing an entry for the repeated integer field. - []Value{1, 2, 3}, // the repeated integer field is a list of length 3. - }, - []Value{ // second record - []Value{4, 5, 6}, - }, - }, - } - if !reflect.DeepEqual(got, want) { - t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want) - } -} - -func TestRepeatedRecordContainingRecord(t *testing.T) { - schema := []*FieldSchema{ - { - Type: RecordFieldType, - Repeated: true, - Schema: Schema{ - { - Type: StringFieldType, - }, - { - Type: RecordFieldType, - Schema: Schema{ - {Type: IntegerFieldType}, - {Type: StringFieldType}, - }, - }, - }, - }, - } - row := &bq.TableRow{F: []*bq.TableCell{ - { - V: []interface{}{ // repeated records. - map[string]interface{}{ // first record. - "v": map[string]interface{}{ // pointless single-key-map wrapper. - "f": []interface{}{ // list of record fields. - map[string]interface{}{ // first record field (name) - "v": "first repeated record", - }, - map[string]interface{}{ // second record field (nested record). - "v": map[string]interface{}{ // pointless single-key-map wrapper. - "f": []interface{}{ // nested record fields - map[string]interface{}{ - "v": "1", - }, - map[string]interface{}{ - "v": "two", - }, - }, - }, - }, - }, - }, - }, - map[string]interface{}{ // second record. - "v": map[string]interface{}{ - "f": []interface{}{ - map[string]interface{}{ - "v": "second repeated record", - }, - map[string]interface{}{ - "v": map[string]interface{}{ - "f": []interface{}{ - map[string]interface{}{ - "v": "3", - }, - map[string]interface{}{ - "v": "four", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }} - - got, err := convertRow(row, schema) - if err != nil { - t.Fatalf("error converting: %v", err) - } - // TODO: test with flattenresults. - want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. - []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. - []Value{ // record contains a string followed by a nested record. - "first repeated record", - []Value{ - 1, - "two", - }, - }, - []Value{ // second record. - "second repeated record", - []Value{ - 3, - "four", - }, - }, - }, - } - if !reflect.DeepEqual(got, want) { - t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go deleted file mode 100644 index cfda78d2f5..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bigtable - -import ( - "fmt" - "regexp" - "strings" - - "golang.org/x/net/context" - "google.golang.org/cloud" - btcspb "google.golang.org/cloud/bigtable/internal/cluster_service_proto" - bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto" - "google.golang.org/cloud/internal/transport" - "google.golang.org/grpc" -) - -const adminAddr = "bigtabletableadmin.googleapis.com:443" - -// AdminClient is a client type for performing admin operations within a specific cluster. -type AdminClient struct { - conn *grpc.ClientConn - tClient bttspb.BigtableTableServiceClient - - project, zone, cluster string -} - -// NewAdminClient creates a new AdminClient for a given project, zone and cluster. -func NewAdminClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*AdminClient, error) { - o := []cloud.ClientOption{ - cloud.WithEndpoint(adminAddr), - cloud.WithScopes(AdminScope), - cloud.WithUserAgent(clientUserAgent), - } - o = append(o, opts...) - conn, err := transport.DialGRPC(ctx, o...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - return &AdminClient{ - conn: conn, - tClient: bttspb.NewBigtableTableServiceClient(conn), - - project: project, - zone: zone, - cluster: cluster, - }, nil -} - -// Close closes the AdminClient. -func (ac *AdminClient) Close() { - ac.conn.Close() -} - -func (ac *AdminClient) clusterPrefix() string { - return fmt.Sprintf("projects/%s/zones/%s/clusters/%s", ac.project, ac.zone, ac.cluster) -} - -// Tables returns a list of the tables in the cluster. -func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { - prefix := ac.clusterPrefix() - req := &bttspb.ListTablesRequest{ - Name: prefix, - } - res, err := ac.tClient.ListTables(ctx, req) - if err != nil { - return nil, err - } - names := make([]string, 0, len(res.Tables)) - for _, tbl := range res.Tables { - names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) - } - return names, nil -} - -// CreateTable creates a new table in the cluster. -// This method may return before the table's creation is complete. -func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { - prefix := ac.clusterPrefix() - req := &bttspb.CreateTableRequest{ - Name: prefix, - TableId: table, - } - _, err := ac.tClient.CreateTable(ctx, req) - if err != nil { - return err - } - return nil -} - -// CreateColumnFamily creates a new column family in a table. -func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { - // TODO(dsymonds): Permit specifying gcexpr and any other family settings. - prefix := ac.clusterPrefix() - req := &bttspb.CreateColumnFamilyRequest{ - Name: prefix + "/tables/" + table, - ColumnFamilyId: family, - } - _, err := ac.tClient.CreateColumnFamily(ctx, req) - return err -} - -// DeleteTable deletes a table and all of its data. -func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { - prefix := ac.clusterPrefix() - req := &bttspb.DeleteTableRequest{ - Name: prefix + "/tables/" + table, - } - _, err := ac.tClient.DeleteTable(ctx, req) - return err -} - -// DeleteColumnFamily deletes a column family in a table and all of its data. -func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { - prefix := ac.clusterPrefix() - req := &bttspb.DeleteColumnFamilyRequest{ - Name: prefix + "/tables/" + table + "/columnFamilies/" + family, - } - _, err := ac.tClient.DeleteColumnFamily(ctx, req) - return err -} - -// TableInfo represents information about a table. -type TableInfo struct { - Families []string -} - -// TableInfo retrieves information about a table. -func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { - prefix := ac.clusterPrefix() - req := &bttspb.GetTableRequest{ - Name: prefix + "/tables/" + table, - } - res, err := ac.tClient.GetTable(ctx, req) - if err != nil { - return nil, err - } - ti := &TableInfo{} - for fam := range res.ColumnFamilies { - ti.Families = append(ti.Families, fam) - } - return ti, nil -} - -// SetGCPolicy specifies which cells in a column family should be garbage collected. -// GC executes opportunistically in the background; table reads may return data -// matching the GC policy. -func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { - prefix := ac.clusterPrefix() - tbl, err := ac.tClient.GetTable(ctx, &bttspb.GetTableRequest{ - Name: prefix + "/tables/" + table, - }) - if err != nil { - return err - } - fam, ok := tbl.ColumnFamilies[family] - if !ok { - return fmt.Errorf("unknown column family %q", family) - } - fam.GcRule = policy.proto() - _, err = ac.tClient.UpdateColumnFamily(ctx, fam) - return err -} - -const clusterAdminAddr = "bigtableclusteradmin.googleapis.com:443" - -// ClusterAdminClient is a client type for performing admin operations on clusters. -// These operations can be substantially more dangerous than those provided by AdminClient. -type ClusterAdminClient struct { - conn *grpc.ClientConn - cClient btcspb.BigtableClusterServiceClient - - project string -} - -// NewClusterAdminClient creates a new ClusterAdminClient for a given project. -func NewClusterAdminClient(ctx context.Context, project string, opts ...cloud.ClientOption) (*ClusterAdminClient, error) { - o := []cloud.ClientOption{ - cloud.WithEndpoint(clusterAdminAddr), - cloud.WithScopes(ClusterAdminScope), - cloud.WithUserAgent(clientUserAgent), - } - o = append(o, opts...) - conn, err := transport.DialGRPC(ctx, o...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - return &ClusterAdminClient{ - conn: conn, - cClient: btcspb.NewBigtableClusterServiceClient(conn), - - project: project, - }, nil -} - -// Close closes the ClusterAdminClient. -func (cac *ClusterAdminClient) Close() { - cac.conn.Close() -} - -// ClusterInfo represents information about a cluster. -type ClusterInfo struct { - Name string // name of the cluster - Zone string // GCP zone of the cluster (e.g. "us-central1-a") - DisplayName string // display name for UIs - ServeNodes int // number of allocated serve nodes -} - -var clusterNameRegexp = regexp.MustCompile(`^projects/([^/]+)/zones/([^/]+)/clusters/([a-z][-a-z0-9]*)$`) - -// Clusters returns a list of clusters in the project. -func (cac *ClusterAdminClient) Clusters(ctx context.Context) ([]*ClusterInfo, error) { - req := &btcspb.ListClustersRequest{ - Name: "projects/" + cac.project, - } - res, err := cac.cClient.ListClusters(ctx, req) - if err != nil { - return nil, err - } - // TODO(dsymonds): Deal with failed_zones. - var cis []*ClusterInfo - for _, c := range res.Clusters { - m := clusterNameRegexp.FindStringSubmatch(c.Name) - if m == nil { - return nil, fmt.Errorf("malformed cluster name %q", c.Name) - } - cis = append(cis, &ClusterInfo{ - Name: m[3], - Zone: m[2], - DisplayName: c.DisplayName, - ServeNodes: int(c.ServeNodes), - }) - } - return cis, nil -} - -/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API. - -// SetClusterSize sets the number of server nodes for this cluster. -func (ac *AdminClient) SetClusterSize(ctx context.Context, nodes int) error { - req := &btcspb.GetClusterRequest{ - Name: ac.clusterPrefix(), - } - clu, err := ac.cClient.GetCluster(ctx, req) - if err != nil { - return err - } - clu.ServeNodes = int32(nodes) - _, err = ac.cClient.UpdateCluster(ctx, clu) - return err -} - -*/ diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go deleted file mode 100644 index 191afc20c7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package bigtable - -import ( - "reflect" - "sort" - "testing" - "time" - - "golang.org/x/net/context" - "google.golang.org/cloud" - "google.golang.org/cloud/bigtable/bttest" - "google.golang.org/grpc" -) - -func TestAdminIntegration(t *testing.T) { - srv, err := bttest.NewServer() - if err != nil { - t.Fatal(err) - } - defer srv.Close() - t.Logf("bttest.Server running on %s", srv.Addr) - - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - - conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) - if err != nil { - t.Fatalf("grpc.Dial: %v", err) - } - - adminClient, err := NewAdminClient(ctx, "proj", "zone", "cluster", cloud.WithBaseGRPC(conn)) - if err != nil { - t.Fatalf("NewAdminClient: %v", err) - } - defer adminClient.Close() - - list := func() []string { - tbls, err := adminClient.Tables(ctx) - if err != nil { - t.Fatalf("Fetching list of tables: %v", err) - } - sort.Strings(tbls) - return tbls - } - if err := adminClient.CreateTable(ctx, "mytable"); err != nil { - t.Fatalf("Creating table: %v", err) - } - if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { - t.Fatalf("Creating table: %v", err) - } - if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) { - t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) - } - if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { - t.Fatalf("Deleting table: %v", err) - } - if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) { - t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go deleted file mode 100644 index 08ebe7b801..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go +++ /dev/null @@ -1,529 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bigtable // import "google.golang.org/cloud/bigtable" - -import ( - "fmt" - "io" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/cloud" - btdpb "google.golang.org/cloud/bigtable/internal/data_proto" - btspb "google.golang.org/cloud/bigtable/internal/service_proto" - "google.golang.org/cloud/internal/transport" - "google.golang.org/grpc" -) - -const prodAddr = "bigtable.googleapis.com:443" - -// Client is a client for reading and writing data to tables in a cluster. -type Client struct { - conn *grpc.ClientConn - client btspb.BigtableServiceClient - - project, zone, cluster string -} - -// NewClient creates a new Client for a given project, zone and cluster. -func NewClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*Client, error) { - o := []cloud.ClientOption{ - cloud.WithEndpoint(prodAddr), - cloud.WithScopes(Scope), - cloud.WithUserAgent(clientUserAgent), - } - o = append(o, opts...) - conn, err := transport.DialGRPC(ctx, o...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - return &Client{ - conn: conn, - client: btspb.NewBigtableServiceClient(conn), - - project: project, - zone: zone, - cluster: cluster, - }, nil -} - -// Close closes the Client. -func (c *Client) Close() { - c.conn.Close() -} - -func (c *Client) fullTableName(table string) string { - return fmt.Sprintf("projects/%s/zones/%s/clusters/%s/tables/%s", c.project, c.zone, c.cluster, table) -} - -// A Table refers to a table. -type Table struct { - c *Client - table string -} - -// Open opens a table. -func (c *Client) Open(table string) *Table { - return &Table{ - c: c, - table: table, - } -} - -// TODO(dsymonds): Read method that returns a sequence of ReadItems. - -// ReadRows reads rows from a table. f is called for each row. -// If f returns false, the stream is shut down and ReadRows returns. -// f owns its argument, and f is called serially. -// -// By default, the yielded rows will contain all values in all cells. -// Use RowFilter to limit the cells returned. -func (t *Table) ReadRows(ctx context.Context, arg RowRange, f func(Row) bool, opts ...ReadOption) error { - req := &btspb.ReadRowsRequest{ - TableName: t.c.fullTableName(t.table), - Target: &btspb.ReadRowsRequest_RowRange{arg.proto()}, - } - for _, opt := range opts { - opt.set(req) - } - ctx, cancel := context.WithCancel(ctx) // for aborting the stream - stream, err := t.c.client.ReadRows(ctx, req) - if err != nil { - return err - } - cr := new(chunkReader) - for { - res, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if row := cr.process(res); row != nil { - if !f(row) { - // Cancel and drain stream. - cancel() - for { - if _, err := stream.Recv(); err != nil { - return nil - } - } - } - } - } - return nil -} - -// ReadRow is a convenience implementation of a single-row reader. -// A missing row will return a zero-length map and a nil error. -func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { - var r Row - err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { - r = rr - return true - }, opts...) - return r, err -} - -type chunkReader struct { - partial map[string]Row // incomplete rows -} - -// process handles a single btspb.ReadRowsResponse. -// If it completes a row, that row is returned. -func (cr *chunkReader) process(rrr *btspb.ReadRowsResponse) Row { - if cr.partial == nil { - cr.partial = make(map[string]Row) - } - row := string(rrr.RowKey) - r := cr.partial[row] - if r == nil { - r = make(Row) - cr.partial[row] = r - } - for _, chunk := range rrr.Chunks { - switch c := chunk.Chunk.(type) { - case *btspb.ReadRowsResponse_Chunk_ResetRow: - r = make(Row) - cr.partial[row] = r - continue - case *btspb.ReadRowsResponse_Chunk_CommitRow: - delete(cr.partial, row) - if len(r) == 0 { - // Treat zero-content commits as absent. - continue - } - return r // assume that this is the last chunk - case *btspb.ReadRowsResponse_Chunk_RowContents: - decodeFamilyProto(r, row, c.RowContents) - } - } - return nil -} - -// decodeFamilyProto adds the cell data from f to the given row. -func decodeFamilyProto(r Row, row string, f *btdpb.Family) { - fam := f.Name // does not have colon - for _, col := range f.Columns { - for _, cell := range col.Cells { - ri := ReadItem{ - Row: row, - Column: fmt.Sprintf("%s:%s", fam, col.Qualifier), - Timestamp: Timestamp(cell.TimestampMicros), - Value: cell.Value, - } - r[fam] = append(r[fam], ri) - } - } -} - -// A RowRange is used to describe the rows to be read. -// A RowRange is a half-open interval [Start, Limit) encompassing -// all the rows with keys at least as large as Start, and less than Limit. -// (Bigtable string comparison is the same as Go's.) -// A RowRange can be unbounded, encompassing all keys at least as large as Start. -type RowRange struct { - start string - limit string -} - -// NewRange returns the new RowRange [begin, end). -func NewRange(begin, end string) RowRange { - return RowRange{ - start: begin, - limit: end, - } -} - -// Unbounded tests whether a RowRange is unbounded. -func (r RowRange) Unbounded() bool { - return r.limit == "" -} - -// Contains says whether the RowRange contains the key. -func (r RowRange) Contains(row string) bool { - return r.start <= row && (r.limit == "" || r.limit > row) -} - -// String provides a printable description of a RowRange. -func (r RowRange) String() string { - a := strconv.Quote(r.start) - if r.Unbounded() { - return fmt.Sprintf("[%s,∞)", a) - } - return fmt.Sprintf("[%s,%q)", a, r.limit) -} - -func (r RowRange) proto() *btdpb.RowRange { - if r.Unbounded() { - return &btdpb.RowRange{StartKey: []byte(r.start)} - } - return &btdpb.RowRange{ - StartKey: []byte(r.start), - EndKey: []byte(r.limit), - } -} - -// SingleRow returns a RowRange for reading a single row. -func SingleRow(row string) RowRange { - return RowRange{ - start: row, - limit: row + "\x00", - } -} - -// PrefixRange returns a RowRange consisting of all keys starting with the prefix. -func PrefixRange(prefix string) RowRange { - return RowRange{ - start: prefix, - limit: prefixSuccessor(prefix), - } -} - -// InfiniteRange returns the RowRange consisting of all keys at least as -// large as start. -func InfiniteRange(start string) RowRange { - return RowRange{ - start: start, - limit: "", - } -} - -// prefixSuccessor returns the lexically smallest string greater than the -// prefix, if it exists, or "" otherwise. In either case, it is the string -// needed for the Limit of a RowRange. -func prefixSuccessor(prefix string) string { - if prefix == "" { - return "" // infinite range - } - n := len(prefix) - for n--; n >= 0 && prefix[n] == '\xff'; n-- { - } - if n == -1 { - return "" - } - ans := []byte(prefix[:n]) - ans = append(ans, prefix[n]+1) - return string(ans) -} - -// A ReadOption is an optional argument to ReadRows. -type ReadOption interface { - set(req *btspb.ReadRowsRequest) -} - -// RowFilter returns a ReadOption that applies f to the contents of read rows. -func RowFilter(f Filter) ReadOption { return rowFilter{f} } - -type rowFilter struct{ f Filter } - -func (rf rowFilter) set(req *btspb.ReadRowsRequest) { req.Filter = rf.f.proto() } - -// LimitRows returns a ReadOption that will limit the number of rows to be read. -func LimitRows(limit int64) ReadOption { return limitRows{limit} } - -type limitRows struct{ limit int64 } - -func (lr limitRows) set(req *btspb.ReadRowsRequest) { req.NumRowsLimit = lr.limit } - -// A Row is returned by ReadRow. The map is keyed by column family (the prefix -// of the column name before the colon). The values are the returned ReadItems -// for that column family in the order returned by Read. -type Row map[string][]ReadItem - -// Key returns the row's key, or "" if the row is empty. -func (r Row) Key() string { - for _, items := range r { - if len(items) > 0 { - return items[0].Row - } - } - return "" -} - -// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. -type ReadItem struct { - Row, Column string - Timestamp Timestamp - Value []byte -} - -// Apply applies a Mutation to a specific row. -func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { - after := func(res proto.Message) { - for _, o := range opts { - o.after(res) - } - } - - if m.cond == nil { - req := &btspb.MutateRowRequest{ - TableName: t.c.fullTableName(t.table), - RowKey: []byte(row), - Mutations: m.ops, - } - res, err := t.c.client.MutateRow(ctx, req) - if err == nil { - after(res) - } - return err - } - req := &btspb.CheckAndMutateRowRequest{ - TableName: t.c.fullTableName(t.table), - RowKey: []byte(row), - PredicateFilter: m.cond.proto(), - } - if m.mtrue != nil { - req.TrueMutations = m.mtrue.ops - } - if m.mfalse != nil { - req.FalseMutations = m.mfalse.ops - } - res, err := t.c.client.CheckAndMutateRow(ctx, req) - if err == nil { - after(res) - } - return err -} - -// An ApplyOption is an optional argument to Apply. -type ApplyOption interface { - after(res proto.Message) -} - -type applyAfterFunc func(res proto.Message) - -func (a applyAfterFunc) after(res proto.Message) { a(res) } - -// GetCondMutationResult returns an ApplyOption that reports whether the conditional -// mutation's condition matched. -func GetCondMutationResult(matched *bool) ApplyOption { - return applyAfterFunc(func(res proto.Message) { - if res, ok := res.(*btspb.CheckAndMutateRowResponse); ok { - *matched = res.PredicateMatched - } - }) -} - -// Mutation represents a set of changes for a single row of a table. -type Mutation struct { - ops []*btdpb.Mutation - - // for conditional mutations - cond Filter - mtrue, mfalse *Mutation -} - -// NewMutation returns a new mutation. -func NewMutation() *Mutation { - return new(Mutation) -} - -// NewCondMutation returns a conditional mutation. -// The given row filter determines which mutation is applied: -// If the filter matches any cell in the row, mtrue is applied; -// otherwise, mfalse is applied. -// Either given mutation may be nil. -func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { - return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} -} - -// Set sets a value in a specified column, with the given timestamp. -// The timestamp will be truncated to millisecond resolution. -// A timestamp of ServerTime means to use the server timestamp. -func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { - if ts != ServerTime { - // Truncate to millisecond resolution, since that's the default table config. - // TODO(dsymonds): Provide a way to override this behaviour. - ts -= ts % 1000 - } - m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_SetCell_{&btdpb.Mutation_SetCell{ - FamilyName: family, - ColumnQualifier: []byte(column), - TimestampMicros: int64(ts), - Value: value, - }}}) -} - -// DeleteCellsInColumn will delete all the cells whose columns are family:column. -func (m *Mutation) DeleteCellsInColumn(family, column string) { - m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromColumn_{&btdpb.Mutation_DeleteFromColumn{ - FamilyName: family, - ColumnQualifier: []byte(column), - }}}) -} - -// DeleteTimestampRange deletes all cells whose columns are family:column -// and whose timestamps are in the half-open interval [start, end). -// If end is zero, it will be interpreted as infinity. -func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { - m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromColumn_{&btdpb.Mutation_DeleteFromColumn{ - FamilyName: family, - ColumnQualifier: []byte(column), - TimeRange: &btdpb.TimestampRange{ - StartTimestampMicros: int64(start), - EndTimestampMicros: int64(end), - }, - }}}) -} - -// DeleteCellsInFamily will delete all the cells whose columns are family:*. -func (m *Mutation) DeleteCellsInFamily(family string) { - m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromFamily_{&btdpb.Mutation_DeleteFromFamily{ - FamilyName: family, - }}}) -} - -// DeleteRow deletes the entire row. -func (m *Mutation) DeleteRow() { - m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromRow_{&btdpb.Mutation_DeleteFromRow{}}}) -} - -// Timestamp is in units of microseconds since 1 January 1970. -type Timestamp int64 - -// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. -// It indicates that the server's timestamp should be used. -const ServerTime Timestamp = -1 - -// Time converts a time.Time into a Timestamp. -func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } - -// Now returns the Timestamp representation of the current time on the client. -func Now() Timestamp { return Time(time.Now()) } - -// Time converts a Timestamp into a time.Time. -func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } - -// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. -// It returns the newly written cells. -func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { - req := &btspb.ReadModifyWriteRowRequest{ - TableName: t.c.fullTableName(t.table), - RowKey: []byte(row), - Rules: m.ops, - } - res, err := t.c.client.ReadModifyWriteRow(ctx, req) - if err != nil { - return nil, err - } - r := make(Row) - for _, fam := range res.Families { // res is *btdpb.Row, fam is *btdpb.Family - decodeFamilyProto(r, row, fam) - } - return r, nil -} - -// ReadModifyWrite represents a set of operations on a single row of a table. -// It is like Mutation but for non-idempotent changes. -// When applied, these operations operate on the latest values of the row's cells, -// and result in a new value being written to the relevant cell with a timestamp -// that is max(existing timestamp, current server time). -// -// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will -// be executed serially by the server. -type ReadModifyWrite struct { - ops []*btdpb.ReadModifyWriteRule -} - -// NewReadModifyWrite returns a new ReadModifyWrite. -func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } - -// AppendValue appends a value to a specific cell's value. -// If the cell is unset, it will be treated as an empty value. -func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { - m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{ - FamilyName: family, - ColumnQualifier: []byte(column), - Rule: &btdpb.ReadModifyWriteRule_AppendValue{v}, - }) -} - -// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, -// and adds a value to it. If the cell is unset, it will be treated as zero. -// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite -// operation will fail. -func (m *ReadModifyWrite) Increment(family, column string, delta int64) { - m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{ - FamilyName: family, - ColumnQualifier: []byte(column), - Rule: &btdpb.ReadModifyWriteRule_IncrementAmount{delta}, - }) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go deleted file mode 100644 index 8bce893631..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go +++ /dev/null @@ -1,606 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bigtable - -import ( - "flag" - "fmt" - "math/rand" - "reflect" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/cloud" - "google.golang.org/cloud/bigtable/bttest" - btspb "google.golang.org/cloud/bigtable/internal/service_proto" - "google.golang.org/grpc" -) - -func dataChunk(fam, col string, ts int64, data string) string { - return fmt.Sprintf("chunks:>>>", fam, col, ts, data) -} - -func commit() string { return "chunks:" } -func reset() string { return "chunks:" } - -var chunkTests = []struct { - desc string - chunks []string // sequence of ReadRowsResponse protos in text format - want map[string]Row -}{ - { - desc: "single row single chunk", - chunks: []string{ - `row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data") + commit(), - }, - want: map[string]Row{ - "row1": Row{ - "fam": []ReadItem{{ - Row: "row1", - Column: "fam:col1", - Timestamp: 1428382701000000, - Value: []byte("data"), - }}, - }, - }, - }, - { - desc: "single row multiple chunks", - chunks: []string{ - `row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"), - `row_key: "row1" ` + dataChunk("fam", "col2", 1428382702000000, "more data"), - `row_key: "row1" ` + commit(), - }, - want: map[string]Row{ - "row1": Row{ - "fam": []ReadItem{ - { - Row: "row1", - Column: "fam:col1", - Timestamp: 1428382701000000, - Value: []byte("data"), - }, - { - Row: "row1", - Column: "fam:col2", - Timestamp: 1428382702000000, - Value: []byte("more data"), - }, - }, - }, - }, - }, - { - desc: "chunk, reset, chunk, commit", - chunks: []string{ - `row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"), - `row_key: "row1" ` + reset(), - `row_key: "row1" ` + dataChunk("fam", "col1", 1428382702000000, "data") + commit(), - }, - want: map[string]Row{ - "row1": Row{ - "fam": []ReadItem{{ - Row: "row1", - Column: "fam:col1", - Timestamp: 1428382702000000, - Value: []byte("data"), - }}, - }, - }, - }, - { - desc: "chunk, reset, commit", - chunks: []string{ - `row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"), - `row_key: "row1" ` + reset(), - `row_key: "row1" ` + commit(), - }, - want: map[string]Row{}, - }, - // TODO(dsymonds): More test cases, including - // - multiple rows -} - -func TestChunkReader(t *testing.T) { - for _, tc := range chunkTests { - cr := new(chunkReader) - got := make(map[string]Row) - for i, txt := range tc.chunks { - rrr := new(btspb.ReadRowsResponse) - if err := proto.UnmarshalText(txt, rrr); err != nil { - t.Fatalf("%s: internal error: bad #%d test text: %v", tc.desc, i, err) - } - if row := cr.process(rrr); row != nil { - got[row.Key()] = row - } - } - // TODO(dsymonds): check for partial rows? - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("%s: processed response mismatch.\n got %+v\nwant %+v", tc.desc, got, tc.want) - } - } -} - -func TestPrefix(t *testing.T) { - tests := []struct { - prefix, succ string - }{ - {"", ""}, - {"\xff", ""}, // when used, "" means Infinity - {"x\xff", "y"}, - {"\xfe", "\xff"}, - } - for _, tc := range tests { - got := prefixSuccessor(tc.prefix) - if got != tc.succ { - t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) - continue - } - r := PrefixRange(tc.prefix) - if tc.succ == "" && r.limit != "" { - t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) - } - if tc.succ != "" && r.limit != tc.succ { - t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) - } - } -} - -var useProd = flag.String("use_prod", "", `if set to "proj,zone,cluster,table", run integration test against production`) - -func TestClientIntegration(t *testing.T) { - start := time.Now() - lastCheckpoint := start - checkpoint := func(s string) { - n := time.Now() - t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) - lastCheckpoint = n - } - - proj, zone, cluster, table := "proj", "zone", "cluster", "mytable" - var clientOpts []cloud.ClientOption - timeout := 10 * time.Second - if *useProd == "" { - srv, err := bttest.NewServer() - if err != nil { - t.Fatal(err) - } - defer srv.Close() - t.Logf("bttest.Server running on %s", srv.Addr) - conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) - if err != nil { - t.Fatalf("grpc.Dial: %v", err) - } - clientOpts = []cloud.ClientOption{cloud.WithBaseGRPC(conn)} - } else { - t.Logf("Running test against production") - a := strings.Split(*useProd, ",") - proj, zone, cluster, table = a[0], a[1], a[2], a[3] - timeout = 5 * time.Minute - } - - ctx, _ := context.WithTimeout(context.Background(), timeout) - - client, err := NewClient(ctx, proj, zone, cluster, clientOpts...) - if err != nil { - t.Fatalf("NewClient: %v", err) - } - defer client.Close() - checkpoint("dialed Client") - - adminClient, err := NewAdminClient(ctx, proj, zone, cluster, clientOpts...) - if err != nil { - t.Fatalf("NewAdminClient: %v", err) - } - defer adminClient.Close() - checkpoint("dialed AdminClient") - - // Delete the table at the end of the test. - // Do this even before creating the table so that if this is running - // against production and CreateTable fails there's a chance of cleaning it up. - defer adminClient.DeleteTable(ctx, table) - - if err := adminClient.CreateTable(ctx, table); err != nil { - t.Fatalf("Creating table: %v", err) - } - checkpoint("created table") - if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { - t.Fatalf("Creating column family: %v", err) - } - checkpoint(`created "follows" column family`) - - tbl := client.Open(table) - - // Insert some data. - initialData := map[string][]string{ - "wmckinley": []string{"tjefferson"}, - "gwashington": []string{"jadams"}, - "tjefferson": []string{"gwashington", "jadams"}, // wmckinley set conditionally below - "jadams": []string{"gwashington", "tjefferson"}, - } - for row, ss := range initialData { - mut := NewMutation() - for _, name := range ss { - mut.Set("follows", name, 0, []byte("1")) - } - if err := tbl.Apply(ctx, row, mut); err != nil { - t.Errorf("Mutating row %q: %v", row, err) - } - } - checkpoint("inserted initial data") - - // Do a conditional mutation with a complex filter. - mutTrue := NewMutation() - mutTrue.Set("follows", "wmckinley", 0, []byte("1")) - filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) - mut := NewCondMutation(filter, mutTrue, nil) - if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { - t.Errorf("Conditionally mutating row: %v", err) - } - // Do a second condition mutation with a filter that does not match, - // and thus no changes should be made. - mutTrue = NewMutation() - mutTrue.DeleteRow() - filter = ColumnFilter("snoop.dogg") - mut = NewCondMutation(filter, mutTrue, nil) - if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { - t.Errorf("Conditionally mutating row: %v", err) - } - checkpoint("did two conditional mutations") - - // Fetch a row. - row, err := tbl.ReadRow(ctx, "jadams") - if err != nil { - t.Fatalf("Reading a row: %v", err) - } - wantRow := Row{ - "follows": []ReadItem{ - {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, - {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, - }, - } - for _, ris := range row { - sort.Sort(byColumn(ris)) - } - if !reflect.DeepEqual(row, wantRow) { - t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) - } - checkpoint("tested ReadRow") - - // Do a bunch of reads with filters. - readTests := []struct { - desc string - rr RowRange - filter Filter // may be nil - - // We do the read, grab all the cells, turn them into "--", - // sort that list, and join with a comma. - want string - }{ - { - desc: "read all, unfiltered", - rr: RowRange{}, - want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", - }, - { - desc: "read with InfiniteRange, unfiltered", - rr: InfiniteRange("tjefferson"), - want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", - }, - { - desc: "read with NewRange, unfiltered", - rr: NewRange("gargamel", "hubbard"), - want: "gwashington-jadams-1", - }, - { - desc: "read with PrefixRange, unfiltered", - rr: PrefixRange("jad"), - want: "jadams-gwashington-1,jadams-tjefferson-1", - }, - { - desc: "read with SingleRow, unfiltered", - rr: SingleRow("wmckinley"), - want: "wmckinley-tjefferson-1", - }, - { - desc: "read all, with ColumnFilter", - rr: RowRange{}, - filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" - want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", - }, - } - for _, tc := range readTests { - var opts []ReadOption - if tc.filter != nil { - opts = append(opts, RowFilter(tc.filter)) - } - var elt []string - err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { - for _, ris := range r { - for _, ri := range ris { - // Use the column qualifier only to make the test data briefer. - col := ri.Column[strings.Index(ri.Column, ":")+1:] - x := fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) - elt = append(elt, x) - } - } - return true - }, opts...) - if err != nil { - t.Errorf("%s: %v", tc.desc, err) - continue - } - sort.Strings(elt) - if got := strings.Join(elt, ","); got != tc.want { - t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) - } - } - checkpoint("tested ReadRows in a few ways") - - // Do a scan and stop part way through. - // Verify that the ReadRows callback doesn't keep running. - stopped := false - err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { - if r.Key() < "h" { - return true - } - if !stopped { - stopped = true - return false - } - t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) - return false - }) - if err != nil { - t.Errorf("Partial ReadRows: %v", err) - } - checkpoint("did partial ReadRows test") - - // Delete a row and check it goes away. - mut = NewMutation() - mut.DeleteRow() - if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { - t.Errorf("Apply DeleteRow: %v", err) - } - row, err = tbl.ReadRow(ctx, "wmckinley") - if err != nil { - t.Fatalf("Reading a row after DeleteRow: %v", err) - } - if len(row) != 0 { - t.Fatalf("Read non-zero row after DeleteRow: %v", row) - } - checkpoint("exercised DeleteRow") - - // Check ReadModifyWrite. - - if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { - t.Fatalf("Creating column family: %v", err) - } - - appendRMW := func(b []byte) *ReadModifyWrite { - rmw := NewReadModifyWrite() - rmw.AppendValue("counter", "likes", b) - return rmw - } - incRMW := func(n int64) *ReadModifyWrite { - rmw := NewReadModifyWrite() - rmw.Increment("counter", "likes", n) - return rmw - } - rmwSeq := []struct { - desc string - rmw *ReadModifyWrite - want []byte - }{ - { - desc: "append #1", - rmw: appendRMW([]byte{0, 0, 0}), - want: []byte{0, 0, 0}, - }, - { - desc: "append #2", - rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 - want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, - }, - { - desc: "increment", - rmw: incRMW(8), - want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, - }, - } - for _, step := range rmwSeq { - row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) - if err != nil { - t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) - } - clearTimestamps(row) - wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} - if !reflect.DeepEqual(row, wantRow) { - t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) - } - } - checkpoint("tested ReadModifyWrite") - - // Test arbitrary timestamps more thoroughly. - if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { - t.Fatalf("Creating column family: %v", err) - } - const numVersions = 4 - mut = NewMutation() - for i := 0; i < numVersions; i++ { - // Timestamps are used in thousands because the server - // only permits that granularity. - mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) - } - if err := tbl.Apply(ctx, "testrow", mut); err != nil { - t.Fatalf("Mutating row: %v", err) - } - r, err := tbl.ReadRow(ctx, "testrow") - if err != nil { - t.Fatalf("Reading row: %v", err) - } - wantRow = Row{"ts": []ReadItem{ - // These should be returned in descending timestamp order. - {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, - {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, - {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, - {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, - }} - if !reflect.DeepEqual(r, wantRow) { - t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) - } - // Do the same read, but filter to the latest two versions. - r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) - if err != nil { - t.Fatalf("Reading row: %v", err) - } - wantRow = Row{"ts": []ReadItem{ - {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, - {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, - }} - if !reflect.DeepEqual(r, wantRow) { - t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) - } - // Delete the cell with timestamp 2000 and repeat the last read, - // checking that we get ts 3000 and ts 1000. - mut = NewMutation() - mut.DeleteTimestampRange("ts", "col", 2000, 3000) // half-open interval - if err := tbl.Apply(ctx, "testrow", mut); err != nil { - t.Fatalf("Mutating row: %v", err) - } - r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) - if err != nil { - t.Fatalf("Reading row: %v", err) - } - wantRow = Row{"ts": []ReadItem{ - {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, - {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, - }} - if !reflect.DeepEqual(r, wantRow) { - t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) - } - checkpoint("tested multiple versions in a cell") - - // Do highly concurrent reads/writes. - // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. - const maxConcurrency = 100 - var wg sync.WaitGroup - for i := 0; i < maxConcurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - switch r := rand.Intn(100); { // r ∈ [0,100) - case 0 <= r && r < 30: - // Do a read. - _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) - if err != nil { - t.Errorf("Concurrent read: %v", err) - } - case 30 <= r && r < 100: - // Do a write. - mut := NewMutation() - mut.Set("ts", "col", 0, []byte("data")) - if err := tbl.Apply(ctx, "testrow", mut); err != nil { - t.Errorf("Concurrent write: %v", err) - } - } - }() - } - wg.Wait() - checkpoint("tested high concurrency") - - // Large reads, writes and scans. - bigBytes := make([]byte, 15<<20) // 15 MB is large - nonsense := []byte("lorem ipsum dolor sit amet, ") - fill(bigBytes, nonsense) - mut = NewMutation() - mut.Set("ts", "col", 0, bigBytes) - if err := tbl.Apply(ctx, "bigrow", mut); err != nil { - t.Errorf("Big write: %v", err) - } - r, err = tbl.ReadRow(ctx, "bigrow") - if err != nil { - t.Errorf("Big read: %v", err) - } - wantRow = Row{"ts": []ReadItem{ - {Row: "bigrow", Column: "ts:col", Value: bigBytes}, - }} - if !reflect.DeepEqual(r, wantRow) { - t.Errorf("Big read returned incorrect bytes: %v", r) - } - // Now write 1000 rows, each with 82 KB values, then scan them all. - medBytes := make([]byte, 82<<10) - fill(medBytes, nonsense) - sem := make(chan int, 50) // do up to 50 mutations at a time. - for i := 0; i < 1000; i++ { - mut := NewMutation() - mut.Set("ts", "big-scan", 0, medBytes) - row := fmt.Sprintf("row-%d", i) - wg.Add(1) - go func() { - defer wg.Done() - defer func() { <-sem }() - sem <- 1 - if err := tbl.Apply(ctx, row, mut); err != nil { - t.Errorf("Preparing large scan: %v", err) - } - }() - } - wg.Wait() - n := 0 - err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { - for _, ris := range r { - for _, ri := range ris { - n += len(ri.Value) - } - } - return true - }, RowFilter(ColumnFilter("big-scan"))) - if err != nil { - t.Errorf("Doing large scan: %v", err) - } - if want := 1000 * len(medBytes); n != want { - t.Errorf("Large scan returned %d bytes, want %d", n, want) - } - checkpoint("tested big read/write/scan") -} - -func fill(b, sub []byte) { - for len(b) > len(sub) { - n := copy(b, sub) - b = b[n:] - } -} - -type byColumn []ReadItem - -func (b byColumn) Len() int { return len(b) } -func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } - -func clearTimestamps(r Row) { - for _, ris := range r { - for i := range ris { - ris[i].Timestamp = 0 - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go deleted file mode 100644 index e771873895..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go +++ /dev/null @@ -1,839 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package bttest contains test helpers for working with the bigtable package. - -To use a Server, create it, and then connect to it with no security: -(The project/zone/cluster values are ignored.) - srv, err := bttest.NewServer() - ... - conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) - ... - client, err := bigtable.NewClient(ctx, proj, zone, cluster, - bigtable.WithBaseGRPC(conn)) - ... -*/ -package bttest // import "google.golang.org/cloud/bigtable/bttest" - -import ( - "encoding/binary" - "fmt" - "log" - "math/rand" - "net" - "regexp" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - btdpb "google.golang.org/cloud/bigtable/internal/data_proto" - emptypb "google.golang.org/cloud/bigtable/internal/empty" - btspb "google.golang.org/cloud/bigtable/internal/service_proto" - bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto" - bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto" - "google.golang.org/grpc" -) - -// Server is an in-memory Cloud Bigtable fake. -// It is unauthenticated, and only a rough approximation. -type Server struct { - Addr string - - l net.Listener - srv *grpc.Server - s *server -} - -// server is the real implementation of the fake. -// It is a separate and unexported type so the API won't be cluttered with -// methods that are only relevant to the fake's implementation. -type server struct { - mu sync.Mutex - tables map[string]*table // keyed by fully qualified name - gcc chan int // set when gcloop starts, closed when server shuts down - - // Any unimplemented methods will cause a panic. - bttspb.BigtableTableServiceServer - btspb.BigtableServiceServer -} - -// NewServer creates a new Server. The Server will be listening for gRPC connections -// at the address named by the Addr field, without TLS. -func NewServer() (*Server, error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return nil, err - } - - s := &Server{ - Addr: l.Addr().String(), - l: l, - srv: grpc.NewServer(), - s: &server{ - tables: make(map[string]*table), - }, - } - bttspb.RegisterBigtableTableServiceServer(s.srv, s.s) - btspb.RegisterBigtableServiceServer(s.srv, s.s) - - go s.srv.Serve(s.l) - - return s, nil -} - -// Close shuts down the server. -func (s *Server) Close() { - s.s.mu.Lock() - if s.s.gcc != nil { - close(s.s.gcc) - } - s.s.mu.Unlock() - - s.srv.Stop() - s.l.Close() -} - -func (s *server) CreateTable(ctx context.Context, req *bttspb.CreateTableRequest) (*bttdpb.Table, error) { - tbl := req.Name + "/tables/" + req.TableId - - s.mu.Lock() - if _, ok := s.tables[tbl]; ok { - s.mu.Unlock() - return nil, fmt.Errorf("table %q already exists", tbl) - } - s.tables[tbl] = newTable() - s.mu.Unlock() - - return &bttdpb.Table{Name: tbl}, nil -} - -func (s *server) ListTables(ctx context.Context, req *bttspb.ListTablesRequest) (*bttspb.ListTablesResponse, error) { - res := &bttspb.ListTablesResponse{} - prefix := req.Name + "/tables/" - - s.mu.Lock() - for tbl := range s.tables { - if strings.HasPrefix(tbl, prefix) { - res.Tables = append(res.Tables, &bttdpb.Table{Name: tbl}) - } - } - s.mu.Unlock() - - return res, nil -} - -func (s *server) GetTable(ctx context.Context, req *bttspb.GetTableRequest) (*bttdpb.Table, error) { - tbl := req.Name - - s.mu.Lock() - tblIns, ok := s.tables[tbl] - s.mu.Unlock() - if !ok { - return nil, fmt.Errorf("table %q not found", tbl) - } - - return &bttdpb.Table{ - Name: tbl, - ColumnFamilies: toColumnFamilies(tblIns.families), - }, nil -} - -func (s *server) DeleteTable(ctx context.Context, req *bttspb.DeleteTableRequest) (*emptypb.Empty, error) { - s.mu.Lock() - defer s.mu.Unlock() - if _, ok := s.tables[req.Name]; !ok { - return nil, fmt.Errorf("no such table %q", req.Name) - } - delete(s.tables, req.Name) - return &emptypb.Empty{}, nil -} - -func (s *server) CreateColumnFamily(ctx context.Context, req *bttspb.CreateColumnFamilyRequest) (*bttdpb.ColumnFamily, error) { - s.mu.Lock() - tbl, ok := s.tables[req.Name] - s.mu.Unlock() - if !ok { - return nil, fmt.Errorf("no such table %q", req.Name) - } - - // Check it is unique and record it. - fam := req.ColumnFamilyId - tbl.mu.Lock() - defer tbl.mu.Unlock() - if _, ok := tbl.families[fam]; ok { - return nil, fmt.Errorf("family %q already exists", fam) - } - newcf := &columnFamily{ - name: req.Name + "/columnFamilies/" + fam, - } - tbl.families[fam] = newcf - return newcf.proto(), nil -} - -func (s *server) UpdateColumnFamily(ctx context.Context, req *bttdpb.ColumnFamily) (*bttdpb.ColumnFamily, error) { - index := strings.Index(req.Name, "/columnFamilies/") - if index == -1 { - return nil, fmt.Errorf("bad family name %q", req.Name) - } - tblName := req.Name[:index] - fam := req.Name[index+len("/columnFamilies/"):] - - s.mu.Lock() - tbl, ok := s.tables[tblName] - s.mu.Unlock() - if !ok { - return nil, fmt.Errorf("no such table %q", req.Name) - } - - tbl.mu.Lock() - defer tbl.mu.Unlock() - - // Check it is unique and record it. - if _, ok := tbl.families[fam]; !ok { - return nil, fmt.Errorf("no such family %q", fam) - } - - newcf := &columnFamily{ - name: req.Name, - gcRule: req.GcRule, - } - // assume that we ALWAYS want to replace by the new setting - // we may need partial update through - tbl.families[fam] = newcf - s.needGC() - return newcf.proto(), nil -} - -func (s *server) ReadRows(req *btspb.ReadRowsRequest, stream btspb.BigtableService_ReadRowsServer) error { - s.mu.Lock() - tbl, ok := s.tables[req.TableName] - s.mu.Unlock() - if !ok { - return fmt.Errorf("no such table %q", req.TableName) - } - - var start, end string // half-open interval - switch targ := req.Target.(type) { - case *btspb.ReadRowsRequest_RowRange: - start, end = string(targ.RowRange.StartKey), string(targ.RowRange.EndKey) - case *btspb.ReadRowsRequest_RowKey: - // A single row read is simply an edge case. - start = string(targ.RowKey) - end = start + "\x00" - default: - return fmt.Errorf("unknown ReadRowsRequest.Target oneof %T", targ) - } - - // Get rows to stream back. - tbl.mu.RLock() - si, ei := 0, len(tbl.rows) // half-open interval - if start != "" { - si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) - } - if end != "" { - ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) - } - if si >= ei { - tbl.mu.RUnlock() - return nil - } - rows := make([]*row, ei-si) - copy(rows, tbl.rows[si:ei]) - tbl.mu.RUnlock() - - for _, r := range rows { - if err := streamRow(stream, r, req.Filter); err != nil { - return err - } - } - - return nil -} - -func streamRow(stream btspb.BigtableService_ReadRowsServer, r *row, f *btdpb.RowFilter) error { - r.mu.Lock() - nr := r.copy() - r.mu.Unlock() - r = nr - - filterRow(f, r) - - rrr := &btspb.ReadRowsResponse{ - RowKey: []byte(r.key), - } - for col, cells := range r.cells { - i := strings.Index(col, ":") // guaranteed to exist - fam, col := col[:i], col[i+1:] - if len(cells) == 0 { - continue - } - // TODO(dsymonds): Apply transformers. - colm := &btdpb.Column{ - Qualifier: []byte(col), - // Cells is populated below. - } - for _, cell := range cells { - colm.Cells = append(colm.Cells, &btdpb.Cell{ - TimestampMicros: cell.ts, - Value: cell.value, - }) - } - rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{ - Chunk: &btspb.ReadRowsResponse_Chunk_RowContents{&btdpb.Family{ - Name: fam, - Columns: []*btdpb.Column{colm}, - }}, - }) - } - rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{Chunk: &btspb.ReadRowsResponse_Chunk_CommitRow{true}}) - return stream.Send(rrr) -} - -// filterRow modifies a row with the given filter. -func filterRow(f *btdpb.RowFilter, r *row) { - if f == nil { - return - } - // Handle filters that apply beyond just including/excluding cells. - switch f := f.Filter.(type) { - case *btdpb.RowFilter_Chain_: - for _, sub := range f.Chain.Filters { - filterRow(sub, r) - } - return - case *btdpb.RowFilter_Interleave_: - srs := make([]*row, 0, len(f.Interleave.Filters)) - for _, sub := range f.Interleave.Filters { - sr := r.copy() - filterRow(sub, sr) - srs = append(srs, sr) - } - // merge - // TODO(dsymonds): is this correct? - r.cells = make(map[string][]cell) - for _, sr := range srs { - for col, cs := range sr.cells { - r.cells[col] = append(r.cells[col], cs...) - } - } - for _, cs := range r.cells { - sort.Sort(byDescTS(cs)) - } - return - case *btdpb.RowFilter_CellsPerColumnLimitFilter: - lim := int(f.CellsPerColumnLimitFilter) - for col, cs := range r.cells { - if len(cs) > lim { - r.cells[col] = cs[:lim] - } - } - return - } - - // Any other case, operate on a per-cell basis. - for key, cs := range r.cells { - i := strings.Index(key, ":") // guaranteed to exist - fam, col := key[:i], key[i+1:] - r.cells[key] = filterCells(f, fam, col, cs) - } -} - -func filterCells(f *btdpb.RowFilter, fam, col string, cs []cell) []cell { - var ret []cell - for _, cell := range cs { - if includeCell(f, fam, col, cell) { - ret = append(ret, cell) - } - } - return ret -} - -func includeCell(f *btdpb.RowFilter, fam, col string, cell cell) bool { - if f == nil { - return true - } - // TODO(dsymonds): Implement many more filters. - switch f := f.Filter.(type) { - default: - log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) - return true - case *btdpb.RowFilter_ColumnQualifierRegexFilter: - pat := string(f.ColumnQualifierRegexFilter) - rx, err := regexp.Compile(pat) - if err != nil { - log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) - return false - } - return rx.MatchString(col) - case *btdpb.RowFilter_ValueRegexFilter: - pat := string(f.ValueRegexFilter) - rx, err := regexp.Compile(pat) - if err != nil { - log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) - return false - } - return rx.Match(cell.value) - } -} - -func (s *server) MutateRow(ctx context.Context, req *btspb.MutateRowRequest) (*emptypb.Empty, error) { - s.mu.Lock() - tbl, ok := s.tables[req.TableName] - s.mu.Unlock() - if !ok { - return nil, fmt.Errorf("no such table %q", req.TableName) - } - - r := tbl.mutableRow(string(req.RowKey)) - r.mu.Lock() - defer r.mu.Unlock() - - if err := applyMutations(tbl, r, req.Mutations); err != nil { - return nil, err - } - return &emptypb.Empty{}, nil -} - -func (s *server) CheckAndMutateRow(ctx context.Context, req *btspb.CheckAndMutateRowRequest) (*btspb.CheckAndMutateRowResponse, error) { - s.mu.Lock() - tbl, ok := s.tables[req.TableName] - s.mu.Unlock() - if !ok { - return nil, fmt.Errorf("no such table %q", req.TableName) - } - - res := &btspb.CheckAndMutateRowResponse{} - - r := tbl.mutableRow(string(req.RowKey)) - r.mu.Lock() - defer r.mu.Unlock() - - // Figure out which mutation to apply. - whichMut := false - if req.PredicateFilter == nil { - // Use true_mutations iff row contains any cells. - whichMut = len(r.cells) > 0 - } else { - // Use true_mutations iff any cells in the row match the filter. - // TODO(dsymonds): This could be cheaper. - nr := r.copy() - filterRow(req.PredicateFilter, nr) - for _, cs := range nr.cells { - if len(cs) > 0 { - whichMut = true - break - } - } - // TODO(dsymonds): Figure out if this is supposed to be set - // even when there's no predicate filter. - res.PredicateMatched = whichMut - } - muts := req.FalseMutations - if whichMut { - muts = req.TrueMutations - } - - if err := applyMutations(tbl, r, muts); err != nil { - return nil, err - } - return res, nil -} - -// applyMutations applies a sequence of mutations to a row. -// It assumes r.mu is locked. -func applyMutations(tbl *table, r *row, muts []*btdpb.Mutation) error { - for _, mut := range muts { - switch mut := mut.Mutation.(type) { - default: - return fmt.Errorf("can't handle mutation type %T", mut) - case *btdpb.Mutation_SetCell_: - set := mut.SetCell - tbl.mu.RLock() - _, famOK := tbl.families[set.FamilyName] - tbl.mu.RUnlock() - if !famOK { - return fmt.Errorf("unknown family %q", set.FamilyName) - } - ts := set.TimestampMicros - if ts == -1 { // bigtable.ServerTime - ts = time.Now().UnixNano() / 1e3 - ts -= ts % 1000 // round to millisecond granularity - } - if !tbl.validTimestamp(ts) { - return fmt.Errorf("invalid timestamp %d", ts) - } - col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier) - - cs := r.cells[col] - newCell := cell{ts: ts, value: set.Value} - replaced := false - for i, cell := range cs { - if cell.ts == newCell.ts { - cs[i] = newCell - replaced = true - break - } - } - if !replaced { - cs = append(cs, newCell) - } - sort.Sort(byDescTS(cs)) - r.cells[col] = cs - case *btdpb.Mutation_DeleteFromColumn_: - del := mut.DeleteFromColumn - col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier) - - cs := r.cells[col] - if del.TimeRange != nil { - tsr := del.TimeRange - if !tbl.validTimestamp(tsr.StartTimestampMicros) { - return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) - } - if !tbl.validTimestamp(tsr.EndTimestampMicros) { - return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) - } - // Find half-open interval to remove. - // Cells are in descending timestamp order, - // so the predicates to sort.Search are inverted. - si, ei := 0, len(cs) - if tsr.StartTimestampMicros > 0 { - ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) - } - if tsr.EndTimestampMicros > 0 { - si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) - } - if si < ei { - copy(cs[si:], cs[ei:]) - cs = cs[:len(cs)-(ei-si)] - } - } else { - cs = nil - } - if len(cs) == 0 { - delete(r.cells, col) - } else { - r.cells[col] = cs - } - case *btdpb.Mutation_DeleteFromRow_: - r.cells = make(map[string][]cell) - } - } - return nil -} - -func (s *server) ReadModifyWriteRow(ctx context.Context, req *btspb.ReadModifyWriteRowRequest) (*btdpb.Row, error) { - s.mu.Lock() - tbl, ok := s.tables[req.TableName] - s.mu.Unlock() - if !ok { - return nil, fmt.Errorf("no such table %q", req.TableName) - } - - updates := make(map[string]cell) // copy of updated cells; keyed by full column name - - r := tbl.mutableRow(string(req.RowKey)) - r.mu.Lock() - defer r.mu.Unlock() - // Assume all mutations apply to the most recent version of the cell. - // TODO(dsymonds): Verify this assumption and document it in the proto. - for _, rule := range req.Rules { - tbl.mu.RLock() - _, famOK := tbl.families[rule.FamilyName] - tbl.mu.RUnlock() - if !famOK { - return nil, fmt.Errorf("unknown family %q", rule.FamilyName) - } - - key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier) - - newCell := false - if len(r.cells[key]) == 0 { - r.cells[key] = []cell{{ - // TODO(dsymonds): should this set a timestamp? - }} - newCell = true - } - cell := &r.cells[key][0] - - switch rule := rule.Rule.(type) { - default: - return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) - case *btdpb.ReadModifyWriteRule_AppendValue: - cell.value = append(cell.value, rule.AppendValue...) - case *btdpb.ReadModifyWriteRule_IncrementAmount: - var v int64 - if !newCell { - if len(cell.value) != 8 { - return nil, fmt.Errorf("increment on non-64-bit value") - } - v = int64(binary.BigEndian.Uint64(cell.value)) - } - v += rule.IncrementAmount - var val [8]byte - binary.BigEndian.PutUint64(val[:], uint64(v)) - cell.value = val[:] - } - updates[key] = *cell - } - - res := &btdpb.Row{ - Key: req.RowKey, - } - for col, cell := range updates { - i := strings.Index(col, ":") - fam, qual := col[:i], col[i+1:] - var f *btdpb.Family - for _, ff := range res.Families { - if ff.Name == fam { - f = ff - break - } - } - if f == nil { - f = &btdpb.Family{Name: fam} - res.Families = append(res.Families, f) - } - f.Columns = append(f.Columns, &btdpb.Column{ - Qualifier: []byte(qual), - Cells: []*btdpb.Cell{{ - Value: cell.value, - }}, - }) - } - return res, nil -} - -// needGC is invoked whenever the server needs gcloop running. -func (s *server) needGC() { - s.mu.Lock() - if s.gcc == nil { - s.gcc = make(chan int) - go s.gcloop(s.gcc) - } - s.mu.Unlock() -} - -func (s *server) gcloop(done <-chan int) { - const ( - minWait = 500 // ms - maxWait = 1500 // ms - ) - - for { - // Wait for a random time interval. - d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond - select { - case <-time.After(d): - case <-done: - return // server has been closed - } - - // Do a GC pass over all tables. - var tables []*table - s.mu.Lock() - for _, tbl := range s.tables { - tables = append(tables, tbl) - } - s.mu.Unlock() - for _, tbl := range tables { - tbl.gc() - } - } -} - -type table struct { - mu sync.RWMutex - families map[string]*columnFamily // keyed by plain family name - rows []*row // sorted by row key - rowIndex map[string]*row // indexed by row key -} - -func newTable() *table { - return &table{ - families: make(map[string]*columnFamily), - rowIndex: make(map[string]*row), - } -} - -func (t *table) validTimestamp(ts int64) bool { - // Assume millisecond granularity is required. - return ts%1000 == 0 -} - -func (t *table) mutableRow(row string) *row { - // Try fast path first. - t.mu.RLock() - r := t.rowIndex[row] - t.mu.RUnlock() - if r != nil { - return r - } - - // We probably need to create the row. - t.mu.Lock() - r = t.rowIndex[row] - if r == nil { - r = newRow(row) - t.rowIndex[row] = r - t.rows = append(t.rows, r) - sort.Sort(byRowKey(t.rows)) // yay, inefficient! - } - t.mu.Unlock() - return r -} - -func (t *table) gc() { - // This method doesn't add or remove rows, so we only need a read lock for the table. - t.mu.RLock() - defer t.mu.RUnlock() - - // Gather GC rules we'll apply. - rules := make(map[string]*bttdpb.GcRule) // keyed by "fam" - for fam, cf := range t.families { - if cf.gcRule != nil { - rules[fam] = cf.gcRule - } - } - if len(rules) == 0 { - return - } - - for _, r := range t.rows { - r.mu.Lock() - r.gc(rules) - r.mu.Unlock() - } -} - -type byRowKey []*row - -func (b byRowKey) Len() int { return len(b) } -func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } - -type row struct { - key string - - mu sync.Mutex - cells map[string][]cell // keyed by full column name; cells are in descending timestamp order -} - -func newRow(key string) *row { - return &row{ - key: key, - cells: make(map[string][]cell), - } -} - -// copy returns a copy of the row. -// Cell values are aliased. -// r.mu should be held. -func (r *row) copy() *row { - nr := &row{ - key: r.key, - cells: make(map[string][]cell, len(r.cells)), - } - for col, cs := range r.cells { - // Copy the []cell slice, but not the []byte inside each cell. - nr.cells[col] = append([]cell(nil), cs...) - } - return nr -} - -// gc applies the given GC rules to the row. -// r.mu should be held. -func (r *row) gc(rules map[string]*bttdpb.GcRule) { - for col, cs := range r.cells { - fam := col[:strings.Index(col, ":")] - rule, ok := rules[fam] - if !ok { - continue - } - r.cells[col] = applyGC(cs, rule) - } -} - -var gcTypeWarn sync.Once - -// applyGC applies the given GC rule to the cells. -func applyGC(cells []cell, rule *bttdpb.GcRule) []cell { - switch rule := rule.Rule.(type) { - default: - // TODO(dsymonds): Support GcRule_Intersection_ - gcTypeWarn.Do(func() { - log.Printf("Unsupported GC rule type %T", rule) - }) - case *bttdpb.GcRule_Union_: - for _, sub := range rule.Union.Rules { - cells = applyGC(cells, sub) - } - return cells - case *bttdpb.GcRule_MaxAge: - // Timestamps are in microseconds. - cutoff := time.Now().UnixNano() / 1e3 - cutoff -= rule.MaxAge.Seconds * 1e6 - cutoff -= int64(rule.MaxAge.Nanos) / 1e3 - // The slice of cells in in descending timestamp order. - // This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff. - si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff }) - if si < len(cells) { - log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si) - } - return cells[:si] - case *bttdpb.GcRule_MaxNumVersions: - n := int(rule.MaxNumVersions) - if len(cells) > n { - log.Printf("bttest: GC MaxNumVersions(%d) deleted %d cells.", n, len(cells)-n) - cells = cells[:n] - } - return cells - } - return cells -} - -type cell struct { - ts int64 - value []byte -} - -type byDescTS []cell - -func (b byDescTS) Len() int { return len(b) } -func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } - -type columnFamily struct { - name string - gcRule *bttdpb.GcRule -} - -func (c *columnFamily) proto() *bttdpb.ColumnFamily { - return &bttdpb.ColumnFamily{ - Name: c.name, - GcRule: c.gcRule, - } -} - -func toColumnFamilies(families map[string]*columnFamily) map[string]*bttdpb.ColumnFamily { - f := make(map[string]*bttdpb.ColumnFamily) - for k, v := range families { - f[k] = v.proto() - } - return f -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go deleted file mode 100644 index 504686b709..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go +++ /dev/null @@ -1,580 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// Command docs are in cbtdoc.go. - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "text/tabwriter" - "text/template" - "time" - - "golang.org/x/net/context" - "google.golang.org/cloud/bigtable" - "google.golang.org/cloud/bigtable/internal/cbtrc" -) - -var ( - oFlag = flag.String("o", "", "if set, redirect stdout to this file") - - config *cbtrc.Config - client *bigtable.Client - adminClient *bigtable.AdminClient - clusterAdminClient *bigtable.ClusterAdminClient -) - -func getClient() *bigtable.Client { - if client == nil { - var err error - client, err = bigtable.NewClient(context.Background(), config.Project, config.Zone, config.Cluster) - if err != nil { - log.Fatalf("Making bigtable.Client: %v", err) - } - } - return client -} - -func getAdminClient() *bigtable.AdminClient { - if adminClient == nil { - var err error - adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Zone, config.Cluster) - if err != nil { - log.Fatalf("Making bigtable.AdminClient: %v", err) - } - } - return adminClient -} - -func getClusterAdminClient() *bigtable.ClusterAdminClient { - if clusterAdminClient == nil { - var err error - clusterAdminClient, err = bigtable.NewClusterAdminClient(context.Background(), config.Project) - if err != nil { - log.Fatalf("Making bigtable.ClusterAdminClient: %v", err) - } - } - return clusterAdminClient -} - -func main() { - var err error - config, err = cbtrc.Load() - if err != nil { - log.Fatal(err) - } - config.RegisterFlags() - - flag.Usage = usage - flag.Parse() - if err := config.CheckFlags(); err != nil { - log.Fatal(err) - } - if config.Creds != "" { - os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) - } - if flag.NArg() == 0 { - usage() - os.Exit(1) - } - - if *oFlag != "" { - f, err := os.Create(*oFlag) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := f.Close(); err != nil { - log.Fatal(err) - } - }() - os.Stdout = f - } - - ctx := context.Background() - for _, cmd := range commands { - if cmd.Name == flag.Arg(0) { - cmd.do(ctx, flag.Args()[1:]...) - return - } - } - log.Fatalf("Unknown command %q", flag.Arg(0)) -} - -func usage() { - fmt.Fprintf(os.Stderr, "Usage: %s [flags] ...\n", os.Args[0]) - flag.PrintDefaults() - fmt.Fprintf(os.Stderr, "\n%s", cmdSummary) -} - -var cmdSummary string // generated in init, below - -func init() { - var buf bytes.Buffer - tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) - for _, cmd := range commands { - fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) - } - tw.Flush() - buf.WriteString(configHelp) - cmdSummary = buf.String() -} - -var configHelp = ` -For convenience, values of the -project, -zone, -cluster and -creds flags -may be specified in ` + cbtrc.Filename() + ` in this format: - project = my-project-123 - zone = us-central1-b - cluster = my-cluster - creds = path-to-account-key.json -All values are optional, and all will be overridden by flags. -` - -var commands = []struct { - Name, Desc string - do func(context.Context, ...string) - Usage string -}{ - { - Name: "count", - Desc: "Count rows in a table", - do: doCount, - Usage: "cbt count ", - }, - { - Name: "createfamily", - Desc: "Create a column family", - do: doCreateFamily, - Usage: "cbt createfamily
", - }, - { - Name: "createtable", - Desc: "Create a table", - do: doCreateTable, - Usage: "cbt createtable
", - }, - { - Name: "deletefamily", - Desc: "Delete a column family", - do: doDeleteFamily, - Usage: "cbt deletefamily
", - }, - { - Name: "deleterow", - Desc: "Delete a row", - do: doDeleteRow, - Usage: "cbt deleterow
", - }, - { - Name: "deletetable", - Desc: "Delete a table", - do: doDeleteTable, - Usage: "cbt deletetable
", - }, - { - Name: "doc", - Desc: "Print documentation for cbt", - do: doDoc, - Usage: "cbt doc", - }, - { - Name: "help", - Desc: "Print help text", - do: doHelp, - Usage: "cbt help [command]", - }, - { - Name: "listclusters", - Desc: "List clusters in a project", - do: doListClusters, - Usage: "cbt listclusters", - }, - { - Name: "lookup", - Desc: "Read from a single row", - do: doLookup, - Usage: "cbt lookup
", - }, - { - Name: "ls", - Desc: "List tables and column families", - do: doLS, - Usage: "cbt ls List tables\n" + - "cbt ls
List column families in
", - }, - { - Name: "read", - Desc: "Read rows", - do: doRead, - Usage: "cbt read
[start=] [limit=] [prefix=]\n" + - " start= Start reading at this row\n" + - " limit= Stop reading before this row\n" + - " prefix= Read rows with this prefix\n", - }, - { - Name: "set", - Desc: "Set value of a cell", - do: doSet, - Usage: "cbt set
family:column=val[@ts] ...\n" + - " family:column=val[@ts] may be repeated to set multiple cells.\n" + - "\n" + - " ts is an optional integer timestamp.\n" + - " If it cannot be parsed, the `@ts` part will be\n" + - " interpreted as part of the value.", - }, - /* TODO(dsymonds): Re-enable when there's a ClusterAdmin API. - { - Name: "setclustersize", - Desc: "Set size of a cluster", - do: doSetClusterSize, - Usage: "cbt setclustersize ", - }, - */ -} - -func doCount(ctx context.Context, args ...string) { - if len(args) != 1 { - log.Fatal("usage: cbt count
") - } - tbl := getClient().Open(args[0]) - - n := 0 - err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { - n++ - return true - }, bigtable.RowFilter(bigtable.StripValueFilter())) - if err != nil { - log.Fatalf("Reading rows: %v", err) - } - fmt.Println(n) -} - -func doCreateFamily(ctx context.Context, args ...string) { - if len(args) != 2 { - log.Fatal("usage: cbt createfamily
") - } - err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) - if err != nil { - log.Fatalf("Creating column family: %v", err) - } -} - -func doCreateTable(ctx context.Context, args ...string) { - if len(args) != 1 { - log.Fatal("usage: cbt createtable
") - } - err := getAdminClient().CreateTable(ctx, args[0]) - if err != nil { - log.Fatalf("Creating table: %v", err) - } -} - -func doDeleteFamily(ctx context.Context, args ...string) { - if len(args) != 2 { - log.Fatal("usage: cbt deletefamily
") - } - err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) - if err != nil { - log.Fatalf("Deleting column family: %v", err) - } -} - -func doDeleteRow(ctx context.Context, args ...string) { - if len(args) != 2 { - log.Fatal("usage: cbt deleterow
") - } - tbl := getClient().Open(args[0]) - mut := bigtable.NewMutation() - mut.DeleteRow() - if err := tbl.Apply(ctx, args[1], mut); err != nil { - log.Fatalf("Deleting row: %v", err) - } -} - -func doDeleteTable(ctx context.Context, args ...string) { - if len(args) != 1 { - log.Fatalf("Can't do `cbt deletetable %s`", args) - } - err := getAdminClient().DeleteTable(ctx, args[0]) - if err != nil { - log.Fatalf("Deleting table: %v", err) - } -} - -// to break circular dependencies -var ( - doDocFn func(ctx context.Context, args ...string) - doHelpFn func(ctx context.Context, args ...string) -) - -func init() { - doDocFn = doDocReal - doHelpFn = doHelpReal -} - -func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } -func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } - -func doDocReal(ctx context.Context, args ...string) { - data := map[string]interface{}{ - "Commands": commands, - } - var buf bytes.Buffer - if err := docTemplate.Execute(&buf, data); err != nil { - log.Fatalf("Bad doc template: %v", err) - } - out, err := format.Source(buf.Bytes()) - if err != nil { - log.Fatalf("Bad doc output: %v", err) - } - os.Stdout.Write(out) -} - -var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ - "indent": func(s, ind string) string { - ss := strings.Split(s, "\n") - for i, p := range ss { - ss[i] = ind + p - } - return strings.Join(ss, "\n") - }, -}). - Parse(` -// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. -// Run "go generate" to regenerate. -//go:generate go run cbt.go -o cbtdoc.go doc - -/* -Cbt is a tool for doing basic interactions with Cloud Bigtable. - -Usage: - - cbt [options] command [arguments] - -The commands are: -{{range .Commands}} - {{printf "%-25s %s" .Name .Desc}}{{end}} - -Use "cbt help " for more information about a command. - -{{range .Commands}} -{{.Desc}} - -Usage: -{{indent .Usage "\t"}} - - - -{{end}} -*/ -package main -`)) - -func doHelpReal(ctx context.Context, args ...string) { - if len(args) == 0 { - fmt.Print(cmdSummary) - return - } - for _, cmd := range commands { - if cmd.Name == args[0] { - fmt.Println(cmd.Usage) - return - } - } - log.Fatalf("Don't know command %q", args[0]) -} - -func doListClusters(ctx context.Context, args ...string) { - if len(args) != 0 { - log.Fatalf("usage: cbt listclusters") - } - cis, err := getClusterAdminClient().Clusters(ctx) - if err != nil { - log.Fatalf("Getting list of clusters: %v", err) - } - tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) - fmt.Fprintf(tw, "Cluster Name\tZone\tInfo\n") - fmt.Fprintf(tw, "------------\t----\t----\n") - for _, ci := range cis { - fmt.Fprintf(tw, "%s\t%s\t%s (%d serve nodes)\n", ci.Name, ci.Zone, ci.DisplayName, ci.ServeNodes) - } - tw.Flush() -} - -func doLookup(ctx context.Context, args ...string) { - if len(args) != 2 { - log.Fatalf("usage: cbt lookup
") - } - table, row := args[0], args[1] - tbl := getClient().Open(table) - r, err := tbl.ReadRow(ctx, row) - if err != nil { - log.Fatalf("Reading row: %v", err) - } - printRow(r) -} - -func printRow(r bigtable.Row) { - fmt.Println(strings.Repeat("-", 40)) - fmt.Println(r.Key()) - - var fams []string - for fam := range r { - fams = append(fams, fam) - } - sort.Strings(fams) - for _, fam := range fams { - ris := r[fam] - sort.Sort(byColumn(ris)) - for _, ri := range ris { - ts := time.Unix(0, int64(ri.Timestamp)*1e3) - fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) - fmt.Printf(" %q\n", ri.Value) - } - } -} - -type byColumn []bigtable.ReadItem - -func (b byColumn) Len() int { return len(b) } -func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } - -func doLS(ctx context.Context, args ...string) { - switch len(args) { - default: - log.Fatalf("Can't do `cbt ls %s`", args) - case 0: - tables, err := getAdminClient().Tables(ctx) - if err != nil { - log.Fatalf("Getting list of tables: %v", err) - } - sort.Strings(tables) - for _, table := range tables { - fmt.Println(table) - } - case 1: - table := args[0] - ti, err := getAdminClient().TableInfo(ctx, table) - if err != nil { - log.Fatalf("Getting table info: %v", err) - } - sort.Strings(ti.Families) - for _, fam := range ti.Families { - fmt.Println(fam) - } - } -} - -func doRead(ctx context.Context, args ...string) { - if len(args) < 1 { - log.Fatalf("usage: cbt read
[args ...]") - } - tbl := getClient().Open(args[0]) - - parsed := make(map[string]string) - for _, arg := range args[1:] { - i := strings.Index(arg, "=") - if i < 0 { - log.Fatalf("Bad arg %q", arg) - } - key, val := arg[:i], arg[i+1:] - switch key { - default: - log.Fatalf("Unknown arg key %q", key) - case "start", "limit", "prefix": - parsed[key] = val - } - } - if (parsed["start"] != "" || parsed["limit"] != "") && parsed["prefix"] != "" { - log.Fatal(`"start"/"limit" may not be mixed with "prefix"`) - } - - var rr bigtable.RowRange - if start, limit := parsed["start"], parsed["limit"]; limit != "" { - rr = bigtable.NewRange(start, limit) - } else if start != "" { - rr = bigtable.InfiniteRange(start) - } - if prefix := parsed["prefix"]; prefix != "" { - rr = bigtable.PrefixRange(prefix) - } - - // TODO(dsymonds): Support filters. - err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { - printRow(r) - return true - }) - if err != nil { - log.Fatalf("Reading rows: %v", err) - } -} - -var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) - -func doSet(ctx context.Context, args ...string) { - if len(args) < 3 { - log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") - } - tbl := getClient().Open(args[0]) - row := args[1] - mut := bigtable.NewMutation() - for _, arg := range args[2:] { - m := setArg.FindStringSubmatch(arg) - if m == nil { - log.Fatalf("Bad set arg %q", arg) - } - val := m[3] - ts := bigtable.Now() - if i := strings.LastIndex(val, "@"); i >= 0 { - // Try parsing a timestamp. - n, err := strconv.ParseInt(val[i+1:], 0, 64) - if err == nil { - val = val[:i] - ts = bigtable.Timestamp(n) - } - } - mut.Set(m[1], m[2], ts, []byte(val)) - } - if err := tbl.Apply(ctx, row, mut); err != nil { - log.Fatalf("Applying mutation: %v", err) - } -} - -/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API. -func doSetClusterSize(ctx context.Context, args ...string) { - if len(args) != 1 { - log.Fatalf("usage: cbt setclustersize ") - } - n, err := strconv.ParseInt(args[0], 0, 32) - if err != nil { - log.Fatalf("Bad num_nodes value %q: %v", args[0], err) - } - if err := getAdminClient().SetClusterSize(ctx, int(n)); err != nil { - log.Fatalf("Setting cluster size: %v", err) - } -} -*/ diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go deleted file mode 100644 index 0e00367225..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go +++ /dev/null @@ -1,146 +0,0 @@ -// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. -// Run "go generate" to regenerate. -//go:generate go run cbt.go -o cbtdoc.go doc - -/* -Cbt is a tool for doing basic interactions with Cloud Bigtable. - -Usage: - - cbt [options] command [arguments] - -The commands are: - - count Count rows in a table - createfamily Create a column family - createtable Create a table - deletefamily Delete a column family - deleterow Delete a row - deletetable Delete a table - doc Print documentation for cbt - help Print help text - listclusters List clusters in a project - lookup Read from a single row - ls List tables and column families - read Read rows - set Set value of a cell - -Use "cbt help " for more information about a command. - - -Count rows in a table - -Usage: - cbt count
- - - - -Create a column family - -Usage: - cbt createfamily
- - - - -Create a table - -Usage: - cbt createtable
- - - - -Delete a column family - -Usage: - cbt deletefamily
- - - - -Delete a row - -Usage: - cbt deleterow
- - - - -Delete a table - -Usage: - cbt deletetable
- - - - -Print documentation for cbt - -Usage: - cbt doc - - - - -Print help text - -Usage: - cbt help [command] - - - - -List clusters in a project - -Usage: - cbt listclusters - - - - -Read from a single row - -Usage: - cbt lookup
- - - - -List tables and column families - -Usage: - cbt ls List tables - cbt ls
List column families in
- - - - -Read rows - -Usage: - cbt read
[start=] [limit=] [prefix=] - start= Start reading at this row - limit= Stop reading before this row - prefix= Read rows with this prefix - - - - - -Set value of a cell - -Usage: - cbt set
family:column=val[@ts] ... - family:column=val[@ts] may be repeated to set multiple cells. - - ts is an optional integer timestamp. - If it cannot be parsed, the `@ts` part will be - interpreted as part of the value. - - - - -*/ -package main diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go deleted file mode 100644 index ff5c5a1510..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Loadtest does some load testing through the Go client library for Cloud Bigtable. -*/ -package main - -import ( - "bytes" - "flag" - "fmt" - "log" - "math/rand" - "os" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/context" - "google.golang.org/cloud/bigtable" - "google.golang.org/cloud/bigtable/internal/cbtrc" -) - -var ( - runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") - scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") - - config *cbtrc.Config - client *bigtable.Client - adminClient *bigtable.AdminClient -) - -func main() { - var err error - config, err = cbtrc.Load() - if err != nil { - log.Fatal(err) - } - config.RegisterFlags() - - flag.Parse() - if err := config.CheckFlags(); err != nil { - log.Fatal(err) - } - if config.Creds != "" { - os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) - } - if flag.NArg() != 0 { - flag.Usage() - os.Exit(1) - } - - log.Printf("Dialing connections...") - client, err = bigtable.NewClient(context.Background(), config.Project, config.Zone, config.Cluster) - if err != nil { - log.Fatalf("Making bigtable.Client: %v", err) - } - defer client.Close() - adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Zone, config.Cluster) - if err != nil { - log.Fatalf("Making bigtable.AdminClient: %v", err) - } - defer adminClient.Close() - - // Create a scratch table. - log.Printf("Setting up scratch table...") - if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { - log.Fatalf("Making scratch table %q: %v", *scratchTable, err) - } - if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { - log.Fatalf("Making scratch table column family: %v", err) - } - // Upon a successful run, delete the table. Don't bother checking for errors. - defer adminClient.DeleteTable(context.Background(), *scratchTable) - - log.Printf("Starting load test... (run for %v)", *runFor) - tbl := client.Open(*scratchTable) - sem := make(chan int, 100) // limit the number of requests happening at once - var reads, writes stats - stopTime := time.Now().Add(*runFor) - var wg sync.WaitGroup - for time.Now().Before(stopTime) { - sem <- 1 - wg.Add(1) - go func() { - defer wg.Done() - defer func() { <-sem }() - - ok := true - opStart := time.Now() - var stats *stats - defer func() { - stats.Record(ok, time.Since(opStart)) - }() - - row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows - - switch rand.Intn(10) { - default: - // read - stats = &reads - _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) - if err != nil { - log.Printf("Error doing read: %v", err) - ok = false - } - case 0, 1, 2, 3, 4: - // write - stats = &writes - mut := bigtable.NewMutation() - mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write - if err := tbl.Apply(context.Background(), row, mut); err != nil { - log.Printf("Error doing mutation: %v", err) - ok = false - } - } - }() - } - wg.Wait() - - log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, newAggregate(reads.ds)) - log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, newAggregate(writes.ds)) -} - -var allStats int64 // atomic - -type stats struct { - mu sync.Mutex - tries, ok int - ds []time.Duration -} - -func (s *stats) Record(ok bool, d time.Duration) { - s.mu.Lock() - s.tries++ - if ok { - s.ok++ - } - s.ds = append(s.ds, d) - s.mu.Unlock() - - if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { - log.Printf("Progress: done %d ops", n) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go deleted file mode 100644 index 57c12da1e7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "math" - "sort" - "text/tabwriter" - "time" -) - -type byDuration []time.Duration - -func (data byDuration) Len() int { return len(data) } -func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } -func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } - -// quantile returns a value representing the kth of q quantiles. -// May alter the order of data. -func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { - if len(data) < 1 { - return 0, false - } - if k > q { - return 0, false - } - if k < 0 || q < 1 { - return 0, false - } - - sort.Sort(byDuration(data)) - - if k == 0 { - return data[0], true - } - if k == q { - return data[len(data)-1], true - } - - bucketSize := float64(len(data)-1) / float64(q) - i := float64(k) * bucketSize - - lower := int(math.Trunc(i)) - var upper int - if i > float64(lower) && lower+1 < len(data) { - // If the quantile lies between two elements - upper = lower + 1 - } else { - upper = lower - } - weightUpper := i - float64(lower) - weightLower := 1 - weightUpper - return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true -} - -type aggregate struct { - min, median, max time.Duration - p95, p99 time.Duration // percentiles -} - -// newAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. -func newAggregate(latencies []time.Duration) *aggregate { - var agg aggregate - - if len(latencies) == 0 { - return nil - } - var ok bool - if agg.min, ok = quantile(latencies, 0, 2); !ok { - return nil - } - if agg.median, ok = quantile(latencies, 1, 2); !ok { - return nil - } - if agg.max, ok = quantile(latencies, 2, 2); !ok { - return nil - } - if agg.p95, ok = quantile(latencies, 95, 100); !ok { - return nil - } - if agg.p99, ok = quantile(latencies, 99, 100); !ok { - return nil - } - return &agg -} - -func (agg *aggregate) String() string { - if agg == nil { - return "no data" - } - var buf bytes.Buffer - tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding - fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", - agg.min, agg.median, agg.max, agg.p95, agg.p99) - tw.Flush() - return buf.String() -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go deleted file mode 100644 index 032e501dd4..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package bigtable is an API to Google Cloud Bigtable. - -See https://cloud.google.com/bigtable/docs/ for general product documentation. - -Setup and Credentials - -Use NewClient or NewAdminClient to create a client that can be used to access -the data or admin APIs respectively. Both require credentials that have permission -to access the Cloud Bigtable API. - -If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials -(https://developers.google.com/accounts/docs/application-default-credentials) -is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. - -To use alternate credentials, pass them to NewClient or NewAdminClient using cloud.WithTokenSource. -For instance, you can use service account credentials by visiting -https://cloud.google.com/console/project/MYPROJECT/apiui/credential, -creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing - jsonKey, err := ioutil.ReadFile(pathToKeyFile) - ... - config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. - ... - client, err := bigtable.NewClient(ctx, project, zone, cluster, cloud.WithTokenSource(config.TokenSource(ctx))) - ... -Here, `google` means the golang.org/x/oauth2/google package -and `cloud` means the google.golang.org/cloud package. - -Reading - -The principal way to read from a Bigtable is to use the ReadRows method on *Table. -A RowRange specifies a contiguous portion of a table. A Filter may be provided through -RowFilter to limit or transform the data that is returned. - tbl := client.Open("mytable") - ... - // Read all the rows starting with "com.google.", - // but only fetch the columns in the "links" family. - rr := bigtable.PrefixRange("com.google.") - err := tbl.ReadRows(ctx, rr, func(r Row) bool { - // do something with r - return true // keep going - }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) - ... - -To read a single row, use the ReadRow helper method. - r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key - ... - -Writing - -This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. -The former expresses idempotent operations. -The latter expresses non-idempotent operations and returns the new values of updated cells. -These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), -building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite -methods on a Table. - -For instance, to set a couple of cells in a table, - tbl := client.Open("mytable") - mut := bigtable.NewMutation() - mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) - mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) - err := tbl.Apply(ctx, "com.google.cloud", mut) - ... - -To increment an encoded value in one cell, - tbl := client.Open("mytable") - rmw := bigtable.NewReadModifyWrite() - rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" - r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) - ... -*/ -package bigtable // import "google.golang.org/cloud/bigtable" - -// Scope constants for authentication credentials. -// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. -const ( - // Scope is the OAuth scope for Cloud Bigtable data operations. - Scope = "https://www.googleapis.com/auth/bigtable.data" - // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. - ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" - - // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. - AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" - - // ClusterAdminScope is the OAuth scope for Cloud Bigtable cluster admin operations. - ClusterAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" -) - -// clientUserAgent identifies the version of this package. -// It should be bumped upon significant changes only. -const clientUserAgent = "cbt-go/20150727" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go deleted file mode 100644 index dbe247b1bc..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bigtable - -import ( - "fmt" - "strings" - - btdpb "google.golang.org/cloud/bigtable/internal/data_proto" -) - -// A Filter represents a row filter. -type Filter interface { - String() string - proto() *btdpb.RowFilter -} - -// ChainFilters returns a filter that applies a sequence of filters. -func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } - -type chainFilter struct { - sub []Filter -} - -func (cf chainFilter) String() string { - var ss []string - for _, sf := range cf.sub { - ss = append(ss, sf.String()) - } - return "(" + strings.Join(ss, " | ") + ")" -} - -func (cf chainFilter) proto() *btdpb.RowFilter { - chain := &btdpb.RowFilter_Chain{} - for _, sf := range cf.sub { - chain.Filters = append(chain.Filters, sf.proto()) - } - return &btdpb.RowFilter{ - Filter: &btdpb.RowFilter_Chain_{chain}, - } -} - -// InterleaveFilters returns a filter that applies a set of filters in parallel -// and interleaves the results. -func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } - -type interleaveFilter struct { - sub []Filter -} - -func (ilf interleaveFilter) String() string { - var ss []string - for _, sf := range ilf.sub { - ss = append(ss, sf.String()) - } - return "(" + strings.Join(ss, " + ") + ")" -} - -func (ilf interleaveFilter) proto() *btdpb.RowFilter { - inter := &btdpb.RowFilter_Interleave{} - for _, sf := range ilf.sub { - inter.Filters = append(inter.Filters, sf.proto()) - } - return &btdpb.RowFilter{ - Filter: &btdpb.RowFilter_Interleave_{inter}, - } -} - -// RowKeyFilter returns a filter that matches cells from rows whose -// key matches the provided RE2 pattern. -// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. -func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } - -type rowKeyFilter string - -func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } - -func (rkf rowKeyFilter) proto() *btdpb.RowFilter { - return &btdpb.RowFilter{Filter: &btdpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} -} - -// FamilyFilter returns a filter that matches cells whose family name -// matches the provided RE2 pattern. -// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. -func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } - -type familyFilter string - -func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } - -func (ff familyFilter) proto() *btdpb.RowFilter { - return &btdpb.RowFilter{Filter: &btdpb.RowFilter_FamilyNameRegexFilter{string(ff)}} -} - -// ColumnFilter returns a filter that matches cells whose column name -// matches the provided RE2 pattern. -// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. -func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } - -type columnFilter string - -func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } - -func (cf columnFilter) proto() *btdpb.RowFilter { - return &btdpb.RowFilter{Filter: &btdpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} -} - -// ValueFilter returns a filter that matches cells whose value -// matches the provided RE2 pattern. -// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. -func ValueFilter(pattern string) Filter { return valueFilter(pattern) } - -type valueFilter string - -func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } - -func (vf valueFilter) proto() *btdpb.RowFilter { - return &btdpb.RowFilter{Filter: &btdpb.RowFilter_ValueRegexFilter{[]byte(vf)}} -} - -// LatestNFilter returns a filter that matches the most recent N cells in each column. -func LatestNFilter(n int) Filter { return latestNFilter(n) } - -type latestNFilter int32 - -func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } - -func (lnf latestNFilter) proto() *btdpb.RowFilter { - return &btdpb.RowFilter{Filter: &btdpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} -} - -// StripValueFilter returns a filter that replaces each value with the empty string. -func StripValueFilter() Filter { return stripValueFilter{} } - -type stripValueFilter struct{} - -func (stripValueFilter) String() string { return "strip_value()" } -func (stripValueFilter) proto() *btdpb.RowFilter { - return &btdpb.RowFilter{Filter: &btdpb.RowFilter_StripValueTransformer{true}} -} - -// TODO(dsymonds): More filters: cond, col/ts/value range, sampling diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go deleted file mode 100644 index 84499fcd27..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package bigtable - -import ( - "fmt" - "strings" - "time" - - durpb "google.golang.org/cloud/bigtable/internal/duration_proto" - bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto" -) - -// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. -type GCPolicy interface { - String() string - proto() *bttdpb.GcRule -} - -// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. -func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } - -type intersectionPolicy struct { - sub []GCPolicy -} - -func (ip intersectionPolicy) String() string { - var ss []string - for _, sp := range ip.sub { - ss = append(ss, sp.String()) - } - return "(" + strings.Join(ss, " && ") + ")" -} - -func (ip intersectionPolicy) proto() *bttdpb.GcRule { - inter := &bttdpb.GcRule_Intersection{} - for _, sp := range ip.sub { - inter.Rules = append(inter.Rules, sp.proto()) - } - return &bttdpb.GcRule{ - Rule: &bttdpb.GcRule_Intersection_{inter}, - } -} - -// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. -func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } - -type unionPolicy struct { - sub []GCPolicy -} - -func (up unionPolicy) String() string { - var ss []string - for _, sp := range up.sub { - ss = append(ss, sp.String()) - } - return "(" + strings.Join(ss, " || ") + ")" -} - -func (up unionPolicy) proto() *bttdpb.GcRule { - union := &bttdpb.GcRule_Union{} - for _, sp := range up.sub { - union.Rules = append(union.Rules, sp.proto()) - } - return &bttdpb.GcRule{ - Rule: &bttdpb.GcRule_Union_{union}, - } -} - -// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell -// except for the most recent n. -func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } - -type maxVersionsPolicy int - -func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } - -func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { - return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} -} - -// MaxAgePolicy returns a GC policy that applies to all cells -// older than the given age. -func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } - -type maxAgePolicy time.Duration - -var units = []struct { - d time.Duration - suffix string -}{ - {24 * time.Hour, "d"}, - {time.Hour, "h"}, - {time.Minute, "m"}, -} - -func (ma maxAgePolicy) String() string { - d := time.Duration(ma) - for _, u := range units { - if d%u.d == 0 { - return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) - } - } - return fmt.Sprintf("age() > %d", d/time.Microsecond) -} - -func (ma maxAgePolicy) proto() *bttdpb.GcRule { - // This doesn't handle overflows, etc. - // Fix this if people care about GC policies over 290 years. - ns := time.Duration(ma).Nanoseconds() - return &bttdpb.GcRule{ - Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ - Seconds: ns / 1e9, - Nanos: int32(ns % 1e9), - }}, - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go deleted file mode 100644 index fec4ab3fd2..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cbtrc encapsulates common code for reading .cbtrc files. -package cbtrc - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -// Config represents a configuration. -type Config struct { - Project, Zone, Cluster string // required - Creds string // optional -} - -// RegisterFlags registers a set of standard flags for this config. -// It should be called before flag.Parse. -func (c *Config) RegisterFlags() { - flag.StringVar(&c.Project, "project", c.Project, "project ID") - flag.StringVar(&c.Zone, "zone", c.Zone, "CBT zone") - flag.StringVar(&c.Cluster, "cluster", c.Cluster, "CBT cluster") - flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") -} - -// CheckFlags checks that the required config values are set. -func (c *Config) CheckFlags() error { - var missing []string - if c.Project == "" { - missing = append(missing, "-project") - } - if c.Zone == "" { - missing = append(missing, "-zone") - } - if c.Cluster == "" { - missing = append(missing, "-cluster") - } - if len(missing) > 0 { - return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) - } - return nil -} - -// Filename returns the filename consulted for standard configuration. -func Filename() string { - // TODO(dsymonds): Might need tweaking for Windows. - return filepath.Join(os.Getenv("HOME"), ".cbtrc") -} - -// Load loads a .cbtrc file. -// If the file is not present, an empty config is returned. -func Load() (*Config, error) { - filename := Filename() - data, err := ioutil.ReadFile(filename) - if err != nil { - // silent fail if the file isn't there - if os.IsNotExist(err) { - return &Config{}, nil - } - return nil, fmt.Errorf("Reading %s: %v", filename, err) - } - c := new(Config) - s := bufio.NewScanner(bytes.NewReader(data)) - for s.Scan() { - line := s.Text() - i := strings.Index(line, "=") - if i < 0 { - return nil, fmt.Errorf("Bad line in %s: %q", filename, line) - } - key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) - switch key { - default: - return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) - case "project": - c.Project = val - case "zone": - c.Zone = val - case "cluster": - c.Cluster = val - case "creds": - c.Creds = val - } - } - return c, s.Err() -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go deleted file mode 100644 index 4d5a27c765..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto -// DO NOT EDIT! - -/* -Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto - -It has these top-level messages: - Zone - Cluster -*/ -package google_bigtable_admin_cluster_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type StorageType int32 - -const ( - // The storage type used is unspecified. - StorageType_STORAGE_UNSPECIFIED StorageType = 0 - // Data will be stored in SSD, providing low and consistent latencies. - StorageType_STORAGE_SSD StorageType = 1 -) - -var StorageType_name = map[int32]string{ - 0: "STORAGE_UNSPECIFIED", - 1: "STORAGE_SSD", -} -var StorageType_value = map[string]int32{ - "STORAGE_UNSPECIFIED": 0, - "STORAGE_SSD": 1, -} - -func (x StorageType) String() string { - return proto.EnumName(StorageType_name, int32(x)) -} - -// Possible states of a zone. -type Zone_Status int32 - -const ( - // The state of the zone is unknown or unspecified. - Zone_UNKNOWN Zone_Status = 0 - // The zone is in a good state. - Zone_OK Zone_Status = 1 - // The zone is down for planned maintenance. - Zone_PLANNED_MAINTENANCE Zone_Status = 2 - // The zone is down for emergency or unplanned maintenance. - Zone_EMERGENCY_MAINENANCE Zone_Status = 3 -) - -var Zone_Status_name = map[int32]string{ - 0: "UNKNOWN", - 1: "OK", - 2: "PLANNED_MAINTENANCE", - 3: "EMERGENCY_MAINENANCE", -} -var Zone_Status_value = map[string]int32{ - "UNKNOWN": 0, - "OK": 1, - "PLANNED_MAINTENANCE": 2, - "EMERGENCY_MAINENANCE": 3, -} - -func (x Zone_Status) String() string { - return proto.EnumName(Zone_Status_name, int32(x)) -} - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -type Zone struct { - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The name of this zone as it appears in UIs. - DisplayName string `protobuf:"bytes,2,opt,name=display_name" json:"display_name,omitempty"` - // The current state of this zone. - Status Zone_Status `protobuf:"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"` -} - -func (m *Zone) Reset() { *m = Zone{} } -func (m *Zone) String() string { return proto.CompactTextString(m) } -func (*Zone) ProtoMessage() {} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -type Cluster struct { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - DisplayName string `protobuf:"bytes,4,opt,name=display_name" json:"display_name,omitempty"` - // The number of serve nodes allocated to this cluster. - ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes" json:"serve_nodes,omitempty"` - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - DefaultStorageType StorageType `protobuf:"varint,8,opt,name=default_storage_type,enum=google.bigtable.admin.cluster.v1.StorageType" json:"default_storage_type,omitempty"` -} - -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} - -func init() { - proto.RegisterEnum("google.bigtable.admin.cluster.v1.StorageType", StorageType_name, StorageType_value) - proto.RegisterEnum("google.bigtable.admin.cluster.v1.Zone_Status", Zone_Status_name, Zone_Status_value) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto deleted file mode 100644 index af39559b31..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // If this cluster has been deleted, the time at which its backup will - // be irrevocably destroyed. Omitted otherwise. - // This cannot be set directly, only through DeleteCluster. - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go deleted file mode 100644 index 8de4500a38..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.pb.go +++ /dev/null @@ -1,331 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto -// DO NOT EDIT! - -package google_bigtable_admin_cluster_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_bigtable_admin_cluster_v11 "google.golang.org/cloud/bigtable/internal/cluster_data_proto" -import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// Client API for BigtableClusterService service - -type BigtableClusterServiceClient interface { - // Lists the supported zones for the given project. - ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) - // Gets information about a particular cluster. - GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) -} - -type bigtableClusterServiceClient struct { - cc *grpc.ClientConn -} - -func NewBigtableClusterServiceClient(cc *grpc.ClientConn) BigtableClusterServiceClient { - return &bigtableClusterServiceClient{cc} -} - -func (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) { - out := new(ListZonesResponse) - err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { - out := new(google_bigtable_admin_cluster_v11.Cluster) - err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { - out := new(ListClustersResponse) - err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { - out := new(google_bigtable_admin_cluster_v11.Cluster) - err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableClusterServiceClient) UpdateCluster(ctx context.Context, in *google_bigtable_admin_cluster_v11.Cluster, opts ...grpc.CallOption) (*google_bigtable_admin_cluster_v11.Cluster, error) { - out := new(google_bigtable_admin_cluster_v11.Cluster) - err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { - out := new(google_protobuf.Empty) - err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for BigtableClusterService service - -type BigtableClusterServiceServer interface { - // Lists the supported zones for the given project. - ListZones(context.Context, *ListZonesRequest) (*ListZonesResponse, error) - // Gets information about a particular cluster. - GetCluster(context.Context, *GetClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error) - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - CreateCluster(context.Context, *CreateClusterRequest) (*google_bigtable_admin_cluster_v11.Cluster, error) - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - UpdateCluster(context.Context, *google_bigtable_admin_cluster_v11.Cluster) (*google_bigtable_admin_cluster_v11.Cluster, error) - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf.Empty, error) -} - -func RegisterBigtableClusterServiceServer(s *grpc.Server, srv BigtableClusterServiceServer) { - s.RegisterService(&_BigtableClusterService_serviceDesc, srv) -} - -func _BigtableClusterService_ListZones_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(ListZonesRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableClusterServiceServer).ListZones(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableClusterService_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(GetClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableClusterServiceServer).GetCluster(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableClusterService_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(ListClustersRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableClusterServiceServer).ListClusters(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableClusterService_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(CreateClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableClusterServiceServer).CreateCluster(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableClusterService_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(google_bigtable_admin_cluster_v11.Cluster) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableClusterServiceServer).UpdateCluster(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableClusterService_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(DeleteClusterRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableClusterServiceServer).DeleteCluster(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -var _BigtableClusterService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.bigtable.admin.cluster.v1.BigtableClusterService", - HandlerType: (*BigtableClusterServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListZones", - Handler: _BigtableClusterService_ListZones_Handler, - }, - { - MethodName: "GetCluster", - Handler: _BigtableClusterService_GetCluster_Handler, - }, - { - MethodName: "ListClusters", - Handler: _BigtableClusterService_ListClusters_Handler, - }, - { - MethodName: "CreateCluster", - Handler: _BigtableClusterService_CreateCluster_Handler, - }, - { - MethodName: "UpdateCluster", - Handler: _BigtableClusterService_UpdateCluster_Handler, - }, - { - MethodName: "DeleteCluster", - Handler: _BigtableClusterService_DeleteCluster_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto deleted file mode 100644 index 6243dcfb58..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto"; -import "google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto"; -import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go deleted file mode 100644 index c60c14a84f..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.pb.go +++ /dev/null @@ -1,205 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto -// DO NOT EDIT! - -/* -Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto - google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service.proto - -It has these top-level messages: - ListZonesRequest - ListZonesResponse - GetClusterRequest - ListClustersRequest - ListClustersResponse - CreateClusterRequest - CreateClusterMetadata - UpdateClusterMetadata - DeleteClusterRequest - UndeleteClusterRequest - UndeleteClusterMetadata -*/ -package google_bigtable_admin_cluster_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_bigtable_admin_cluster_v11 "google.golang.org/cloud/bigtable/internal/cluster_data_proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Request message for BigtableClusterService.ListZones. -type ListZonesRequest struct { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} } -func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) } -func (*ListZonesRequest) ProtoMessage() {} - -// Response message for BigtableClusterService.ListZones. -type ListZonesResponse struct { - // The list of requested zones. - Zones []*google_bigtable_admin_cluster_v11.Zone `protobuf:"bytes,1,rep,name=zones" json:"zones,omitempty"` -} - -func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} } -func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) } -func (*ListZonesResponse) ProtoMessage() {} - -func (m *ListZonesResponse) GetZones() []*google_bigtable_admin_cluster_v11.Zone { - if m != nil { - return m.Zones - } - return nil -} - -// Request message for BigtableClusterService.GetCluster. -type GetClusterRequest struct { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } -func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } -func (*GetClusterRequest) ProtoMessage() {} - -// Request message for BigtableClusterService.ListClusters. -type ListClustersRequest struct { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } -func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } -func (*ListClustersRequest) ProtoMessage() {} - -// Response message for BigtableClusterService.ListClusters. -type ListClustersResponse struct { - // The list of requested Clusters. - Clusters []*google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` - // The zones for which clusters could not be retrieved. - FailedZones []*google_bigtable_admin_cluster_v11.Zone `protobuf:"bytes,2,rep,name=failed_zones" json:"failed_zones,omitempty"` -} - -func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } -func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } -func (*ListClustersResponse) ProtoMessage() {} - -func (m *ListClustersResponse) GetClusters() []*google_bigtable_admin_cluster_v11.Cluster { - if m != nil { - return m.Clusters - } - return nil -} - -func (m *ListClustersResponse) GetFailedZones() []*google_bigtable_admin_cluster_v11.Zone { - if m != nil { - return m.FailedZones - } - return nil -} - -// Request message for BigtableClusterService.CreateCluster. -type CreateClusterRequest struct { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - ClusterId string `protobuf:"bytes,2,opt,name=cluster_id" json:"cluster_id,omitempty"` - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster *google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` -} - -func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } -func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } -func (*CreateClusterRequest) ProtoMessage() {} - -func (m *CreateClusterRequest) GetCluster() *google_bigtable_admin_cluster_v11.Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -type CreateClusterMetadata struct { - // The request which prompted the creation of this operation. - OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request" json:"original_request,omitempty"` -} - -func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } -func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } -func (*CreateClusterMetadata) ProtoMessage() {} - -func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest { - if m != nil { - return m.OriginalRequest - } - return nil -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -type UpdateClusterMetadata struct { - // The request which prompted the creation of this operation. - OriginalRequest *google_bigtable_admin_cluster_v11.Cluster `protobuf:"bytes,1,opt,name=original_request" json:"original_request,omitempty"` -} - -func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } -func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } -func (*UpdateClusterMetadata) ProtoMessage() {} - -func (m *UpdateClusterMetadata) GetOriginalRequest() *google_bigtable_admin_cluster_v11.Cluster { - if m != nil { - return m.OriginalRequest - } - return nil -} - -// Request message for BigtableClusterService.DeleteCluster. -type DeleteClusterRequest struct { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } -func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteClusterRequest) ProtoMessage() {} - -// Request message for BigtableClusterService.UndeleteCluster. -type UndeleteClusterRequest struct { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *UndeleteClusterRequest) Reset() { *m = UndeleteClusterRequest{} } -func (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) } -func (*UndeleteClusterRequest) ProtoMessage() {} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -type UndeleteClusterMetadata struct { -} - -func (m *UndeleteClusterMetadata) Reset() { *m = UndeleteClusterMetadata{} } -func (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) } -func (*UndeleteClusterMetadata) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 2e5d4a72d7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_service_proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - - // The time at which this operation failed or was completed successfully. -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - - // The time at which this operation failed or was completed successfully. -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - - // The time at which this operation failed or was completed successfully. -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go deleted file mode 100644 index c68471ac58..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.pb.go +++ /dev/null @@ -1,1462 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto -// DO NOT EDIT! - -/* -Package google_bigtable_v1 is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto - -It has these top-level messages: - Row - Family - Column - Cell - RowRange - ColumnRange - TimestampRange - ValueRange - RowFilter - Mutation - ReadModifyWriteRule -*/ -package google_bigtable_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -type Row struct { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - Families []*Family `protobuf:"bytes,2,rep,name=families" json:"families,omitempty"` -} - -func (m *Row) Reset() { *m = Row{} } -func (m *Row) String() string { return proto.CompactTextString(m) } -func (*Row) ProtoMessage() {} - -func (m *Row) GetFamilies() []*Family { - if m != nil { - return m.Families - } - return nil -} - -// Specifies (some of) the contents of a single row/column family of a table. -type Family struct { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Must not be empty. Sorted in order of increasing "qualifier". - Columns []*Column `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` -} - -func (m *Family) Reset() { *m = Family{} } -func (m *Family) String() string { return proto.CompactTextString(m) } -func (*Family) ProtoMessage() {} - -func (m *Family) GetColumns() []*Column { - if m != nil { - return m.Columns - } - return nil -} - -// Specifies (some of) the contents of a single row/column of a table. -type Column struct { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - Qualifier []byte `protobuf:"bytes,1,opt,name=qualifier,proto3" json:"qualifier,omitempty"` - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - Cells []*Cell `protobuf:"bytes,2,rep,name=cells" json:"cells,omitempty"` -} - -func (m *Column) Reset() { *m = Column{} } -func (m *Column) String() string { return proto.CompactTextString(m) } -func (*Column) ProtoMessage() {} - -func (m *Column) GetCells() []*Cell { - if m != nil { - return m.Cells - } - return nil -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -type Cell struct { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - TimestampMicros int64 `protobuf:"varint,1,opt,name=timestamp_micros" json:"timestamp_micros,omitempty"` - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - Labels []string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty"` -} - -func (m *Cell) Reset() { *m = Cell{} } -func (m *Cell) String() string { return proto.CompactTextString(m) } -func (*Cell) ProtoMessage() {} - -// Specifies a contiguous range of rows. -type RowRange struct { - // Inclusive lower bound. If left empty, interpreted as the empty string. - StartKey []byte `protobuf:"bytes,2,opt,name=start_key,proto3" json:"start_key,omitempty"` - // Exclusive upper bound. If left empty, interpreted as infinity. - EndKey []byte `protobuf:"bytes,3,opt,name=end_key,proto3" json:"end_key,omitempty"` -} - -func (m *RowRange) Reset() { *m = RowRange{} } -func (m *RowRange) String() string { return proto.CompactTextString(m) } -func (*RowRange) ProtoMessage() {} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -type ColumnRange struct { - // The name of the column family within which this range falls. - FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - // - // Types that are valid to be assigned to StartQualifier: - // *ColumnRange_StartQualifierInclusive - // *ColumnRange_StartQualifierExclusive - StartQualifier isColumnRange_StartQualifier `protobuf_oneof:"start_qualifier"` - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - // - // Types that are valid to be assigned to EndQualifier: - // *ColumnRange_EndQualifierInclusive - // *ColumnRange_EndQualifierExclusive - EndQualifier isColumnRange_EndQualifier `protobuf_oneof:"end_qualifier"` -} - -func (m *ColumnRange) Reset() { *m = ColumnRange{} } -func (m *ColumnRange) String() string { return proto.CompactTextString(m) } -func (*ColumnRange) ProtoMessage() {} - -type isColumnRange_StartQualifier interface { - isColumnRange_StartQualifier() -} -type isColumnRange_EndQualifier interface { - isColumnRange_EndQualifier() -} - -type ColumnRange_StartQualifierInclusive struct { - StartQualifierInclusive []byte `protobuf:"bytes,2,opt,name=start_qualifier_inclusive,proto3,oneof"` -} -type ColumnRange_StartQualifierExclusive struct { - StartQualifierExclusive []byte `protobuf:"bytes,3,opt,name=start_qualifier_exclusive,proto3,oneof"` -} -type ColumnRange_EndQualifierInclusive struct { - EndQualifierInclusive []byte `protobuf:"bytes,4,opt,name=end_qualifier_inclusive,proto3,oneof"` -} -type ColumnRange_EndQualifierExclusive struct { - EndQualifierExclusive []byte `protobuf:"bytes,5,opt,name=end_qualifier_exclusive,proto3,oneof"` -} - -func (*ColumnRange_StartQualifierInclusive) isColumnRange_StartQualifier() {} -func (*ColumnRange_StartQualifierExclusive) isColumnRange_StartQualifier() {} -func (*ColumnRange_EndQualifierInclusive) isColumnRange_EndQualifier() {} -func (*ColumnRange_EndQualifierExclusive) isColumnRange_EndQualifier() {} - -func (m *ColumnRange) GetStartQualifier() isColumnRange_StartQualifier { - if m != nil { - return m.StartQualifier - } - return nil -} -func (m *ColumnRange) GetEndQualifier() isColumnRange_EndQualifier { - if m != nil { - return m.EndQualifier - } - return nil -} - -func (m *ColumnRange) GetStartQualifierInclusive() []byte { - if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierInclusive); ok { - return x.StartQualifierInclusive - } - return nil -} - -func (m *ColumnRange) GetStartQualifierExclusive() []byte { - if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierExclusive); ok { - return x.StartQualifierExclusive - } - return nil -} - -func (m *ColumnRange) GetEndQualifierInclusive() []byte { - if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierInclusive); ok { - return x.EndQualifierInclusive - } - return nil -} - -func (m *ColumnRange) GetEndQualifierExclusive() []byte { - if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierExclusive); ok { - return x.EndQualifierExclusive - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ColumnRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _ColumnRange_OneofMarshaler, _ColumnRange_OneofUnmarshaler, []interface{}{ - (*ColumnRange_StartQualifierInclusive)(nil), - (*ColumnRange_StartQualifierExclusive)(nil), - (*ColumnRange_EndQualifierInclusive)(nil), - (*ColumnRange_EndQualifierExclusive)(nil), - } -} - -func _ColumnRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ColumnRange) - // start_qualifier - switch x := m.StartQualifier.(type) { - case *ColumnRange_StartQualifierInclusive: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeRawBytes(x.StartQualifierInclusive) - case *ColumnRange_StartQualifierExclusive: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeRawBytes(x.StartQualifierExclusive) - case nil: - default: - return fmt.Errorf("ColumnRange.StartQualifier has unexpected type %T", x) - } - // end_qualifier - switch x := m.EndQualifier.(type) { - case *ColumnRange_EndQualifierInclusive: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeRawBytes(x.EndQualifierInclusive) - case *ColumnRange_EndQualifierExclusive: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeRawBytes(x.EndQualifierExclusive) - case nil: - default: - return fmt.Errorf("ColumnRange.EndQualifier has unexpected type %T", x) - } - return nil -} - -func _ColumnRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ColumnRange) - switch tag { - case 2: // start_qualifier.start_qualifier_inclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.StartQualifier = &ColumnRange_StartQualifierInclusive{x} - return true, err - case 3: // start_qualifier.start_qualifier_exclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.StartQualifier = &ColumnRange_StartQualifierExclusive{x} - return true, err - case 4: // end_qualifier.end_qualifier_inclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.EndQualifier = &ColumnRange_EndQualifierInclusive{x} - return true, err - case 5: // end_qualifier.end_qualifier_exclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.EndQualifier = &ColumnRange_EndQualifierExclusive{x} - return true, err - default: - return false, nil - } -} - -// Specified a contiguous range of microsecond timestamps. -type TimestampRange struct { - // Inclusive lower bound. If left empty, interpreted as 0. - StartTimestampMicros int64 `protobuf:"varint,1,opt,name=start_timestamp_micros" json:"start_timestamp_micros,omitempty"` - // Exclusive upper bound. If left empty, interpreted as infinity. - EndTimestampMicros int64 `protobuf:"varint,2,opt,name=end_timestamp_micros" json:"end_timestamp_micros,omitempty"` -} - -func (m *TimestampRange) Reset() { *m = TimestampRange{} } -func (m *TimestampRange) String() string { return proto.CompactTextString(m) } -func (*TimestampRange) ProtoMessage() {} - -// Specifies a contiguous range of raw byte values. -type ValueRange struct { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - // - // Types that are valid to be assigned to StartValue: - // *ValueRange_StartValueInclusive - // *ValueRange_StartValueExclusive - StartValue isValueRange_StartValue `protobuf_oneof:"start_value"` - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - // - // Types that are valid to be assigned to EndValue: - // *ValueRange_EndValueInclusive - // *ValueRange_EndValueExclusive - EndValue isValueRange_EndValue `protobuf_oneof:"end_value"` -} - -func (m *ValueRange) Reset() { *m = ValueRange{} } -func (m *ValueRange) String() string { return proto.CompactTextString(m) } -func (*ValueRange) ProtoMessage() {} - -type isValueRange_StartValue interface { - isValueRange_StartValue() -} -type isValueRange_EndValue interface { - isValueRange_EndValue() -} - -type ValueRange_StartValueInclusive struct { - StartValueInclusive []byte `protobuf:"bytes,1,opt,name=start_value_inclusive,proto3,oneof"` -} -type ValueRange_StartValueExclusive struct { - StartValueExclusive []byte `protobuf:"bytes,2,opt,name=start_value_exclusive,proto3,oneof"` -} -type ValueRange_EndValueInclusive struct { - EndValueInclusive []byte `protobuf:"bytes,3,opt,name=end_value_inclusive,proto3,oneof"` -} -type ValueRange_EndValueExclusive struct { - EndValueExclusive []byte `protobuf:"bytes,4,opt,name=end_value_exclusive,proto3,oneof"` -} - -func (*ValueRange_StartValueInclusive) isValueRange_StartValue() {} -func (*ValueRange_StartValueExclusive) isValueRange_StartValue() {} -func (*ValueRange_EndValueInclusive) isValueRange_EndValue() {} -func (*ValueRange_EndValueExclusive) isValueRange_EndValue() {} - -func (m *ValueRange) GetStartValue() isValueRange_StartValue { - if m != nil { - return m.StartValue - } - return nil -} -func (m *ValueRange) GetEndValue() isValueRange_EndValue { - if m != nil { - return m.EndValue - } - return nil -} - -func (m *ValueRange) GetStartValueInclusive() []byte { - if x, ok := m.GetStartValue().(*ValueRange_StartValueInclusive); ok { - return x.StartValueInclusive - } - return nil -} - -func (m *ValueRange) GetStartValueExclusive() []byte { - if x, ok := m.GetStartValue().(*ValueRange_StartValueExclusive); ok { - return x.StartValueExclusive - } - return nil -} - -func (m *ValueRange) GetEndValueInclusive() []byte { - if x, ok := m.GetEndValue().(*ValueRange_EndValueInclusive); ok { - return x.EndValueInclusive - } - return nil -} - -func (m *ValueRange) GetEndValueExclusive() []byte { - if x, ok := m.GetEndValue().(*ValueRange_EndValueExclusive); ok { - return x.EndValueExclusive - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ValueRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _ValueRange_OneofMarshaler, _ValueRange_OneofUnmarshaler, []interface{}{ - (*ValueRange_StartValueInclusive)(nil), - (*ValueRange_StartValueExclusive)(nil), - (*ValueRange_EndValueInclusive)(nil), - (*ValueRange_EndValueExclusive)(nil), - } -} - -func _ValueRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ValueRange) - // start_value - switch x := m.StartValue.(type) { - case *ValueRange_StartValueInclusive: - b.EncodeVarint(1<<3 | proto.WireBytes) - b.EncodeRawBytes(x.StartValueInclusive) - case *ValueRange_StartValueExclusive: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeRawBytes(x.StartValueExclusive) - case nil: - default: - return fmt.Errorf("ValueRange.StartValue has unexpected type %T", x) - } - // end_value - switch x := m.EndValue.(type) { - case *ValueRange_EndValueInclusive: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeRawBytes(x.EndValueInclusive) - case *ValueRange_EndValueExclusive: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeRawBytes(x.EndValueExclusive) - case nil: - default: - return fmt.Errorf("ValueRange.EndValue has unexpected type %T", x) - } - return nil -} - -func _ValueRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ValueRange) - switch tag { - case 1: // start_value.start_value_inclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.StartValue = &ValueRange_StartValueInclusive{x} - return true, err - case 2: // start_value.start_value_exclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.StartValue = &ValueRange_StartValueExclusive{x} - return true, err - case 3: // end_value.end_value_inclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.EndValue = &ValueRange_EndValueInclusive{x} - return true, err - case 4: // end_value.end_value_exclusive - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.EndValue = &ValueRange_EndValueExclusive{x} - return true, err - default: - return false, nil - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -type RowFilter struct { - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - // - // Types that are valid to be assigned to Filter: - // *RowFilter_Chain_ - // *RowFilter_Interleave_ - // *RowFilter_Condition_ - // *RowFilter_Sink - // *RowFilter_PassAllFilter - // *RowFilter_BlockAllFilter - // *RowFilter_RowKeyRegexFilter - // *RowFilter_RowSampleFilter - // *RowFilter_FamilyNameRegexFilter - // *RowFilter_ColumnQualifierRegexFilter - // *RowFilter_ColumnRangeFilter - // *RowFilter_TimestampRangeFilter - // *RowFilter_ValueRegexFilter - // *RowFilter_ValueRangeFilter - // *RowFilter_CellsPerRowOffsetFilter - // *RowFilter_CellsPerRowLimitFilter - // *RowFilter_CellsPerColumnLimitFilter - // *RowFilter_StripValueTransformer - // *RowFilter_ApplyLabelTransformer - Filter isRowFilter_Filter `protobuf_oneof:"filter"` -} - -func (m *RowFilter) Reset() { *m = RowFilter{} } -func (m *RowFilter) String() string { return proto.CompactTextString(m) } -func (*RowFilter) ProtoMessage() {} - -type isRowFilter_Filter interface { - isRowFilter_Filter() -} - -type RowFilter_Chain_ struct { - Chain *RowFilter_Chain `protobuf:"bytes,1,opt,name=chain,oneof"` -} -type RowFilter_Interleave_ struct { - Interleave *RowFilter_Interleave `protobuf:"bytes,2,opt,name=interleave,oneof"` -} -type RowFilter_Condition_ struct { - Condition *RowFilter_Condition `protobuf:"bytes,3,opt,name=condition,oneof"` -} -type RowFilter_Sink struct { - Sink bool `protobuf:"varint,16,opt,name=sink,oneof"` -} -type RowFilter_PassAllFilter struct { - PassAllFilter bool `protobuf:"varint,17,opt,name=pass_all_filter,oneof"` -} -type RowFilter_BlockAllFilter struct { - BlockAllFilter bool `protobuf:"varint,18,opt,name=block_all_filter,oneof"` -} -type RowFilter_RowKeyRegexFilter struct { - RowKeyRegexFilter []byte `protobuf:"bytes,4,opt,name=row_key_regex_filter,proto3,oneof"` -} -type RowFilter_RowSampleFilter struct { - RowSampleFilter float64 `protobuf:"fixed64,14,opt,name=row_sample_filter,oneof"` -} -type RowFilter_FamilyNameRegexFilter struct { - FamilyNameRegexFilter string `protobuf:"bytes,5,opt,name=family_name_regex_filter,oneof"` -} -type RowFilter_ColumnQualifierRegexFilter struct { - ColumnQualifierRegexFilter []byte `protobuf:"bytes,6,opt,name=column_qualifier_regex_filter,proto3,oneof"` -} -type RowFilter_ColumnRangeFilter struct { - ColumnRangeFilter *ColumnRange `protobuf:"bytes,7,opt,name=column_range_filter,oneof"` -} -type RowFilter_TimestampRangeFilter struct { - TimestampRangeFilter *TimestampRange `protobuf:"bytes,8,opt,name=timestamp_range_filter,oneof"` -} -type RowFilter_ValueRegexFilter struct { - ValueRegexFilter []byte `protobuf:"bytes,9,opt,name=value_regex_filter,proto3,oneof"` -} -type RowFilter_ValueRangeFilter struct { - ValueRangeFilter *ValueRange `protobuf:"bytes,15,opt,name=value_range_filter,oneof"` -} -type RowFilter_CellsPerRowOffsetFilter struct { - CellsPerRowOffsetFilter int32 `protobuf:"varint,10,opt,name=cells_per_row_offset_filter,oneof"` -} -type RowFilter_CellsPerRowLimitFilter struct { - CellsPerRowLimitFilter int32 `protobuf:"varint,11,opt,name=cells_per_row_limit_filter,oneof"` -} -type RowFilter_CellsPerColumnLimitFilter struct { - CellsPerColumnLimitFilter int32 `protobuf:"varint,12,opt,name=cells_per_column_limit_filter,oneof"` -} -type RowFilter_StripValueTransformer struct { - StripValueTransformer bool `protobuf:"varint,13,opt,name=strip_value_transformer,oneof"` -} -type RowFilter_ApplyLabelTransformer struct { - ApplyLabelTransformer string `protobuf:"bytes,19,opt,name=apply_label_transformer,oneof"` -} - -func (*RowFilter_Chain_) isRowFilter_Filter() {} -func (*RowFilter_Interleave_) isRowFilter_Filter() {} -func (*RowFilter_Condition_) isRowFilter_Filter() {} -func (*RowFilter_Sink) isRowFilter_Filter() {} -func (*RowFilter_PassAllFilter) isRowFilter_Filter() {} -func (*RowFilter_BlockAllFilter) isRowFilter_Filter() {} -func (*RowFilter_RowKeyRegexFilter) isRowFilter_Filter() {} -func (*RowFilter_RowSampleFilter) isRowFilter_Filter() {} -func (*RowFilter_FamilyNameRegexFilter) isRowFilter_Filter() {} -func (*RowFilter_ColumnQualifierRegexFilter) isRowFilter_Filter() {} -func (*RowFilter_ColumnRangeFilter) isRowFilter_Filter() {} -func (*RowFilter_TimestampRangeFilter) isRowFilter_Filter() {} -func (*RowFilter_ValueRegexFilter) isRowFilter_Filter() {} -func (*RowFilter_ValueRangeFilter) isRowFilter_Filter() {} -func (*RowFilter_CellsPerRowOffsetFilter) isRowFilter_Filter() {} -func (*RowFilter_CellsPerRowLimitFilter) isRowFilter_Filter() {} -func (*RowFilter_CellsPerColumnLimitFilter) isRowFilter_Filter() {} -func (*RowFilter_StripValueTransformer) isRowFilter_Filter() {} -func (*RowFilter_ApplyLabelTransformer) isRowFilter_Filter() {} - -func (m *RowFilter) GetFilter() isRowFilter_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *RowFilter) GetChain() *RowFilter_Chain { - if x, ok := m.GetFilter().(*RowFilter_Chain_); ok { - return x.Chain - } - return nil -} - -func (m *RowFilter) GetInterleave() *RowFilter_Interleave { - if x, ok := m.GetFilter().(*RowFilter_Interleave_); ok { - return x.Interleave - } - return nil -} - -func (m *RowFilter) GetCondition() *RowFilter_Condition { - if x, ok := m.GetFilter().(*RowFilter_Condition_); ok { - return x.Condition - } - return nil -} - -func (m *RowFilter) GetSink() bool { - if x, ok := m.GetFilter().(*RowFilter_Sink); ok { - return x.Sink - } - return false -} - -func (m *RowFilter) GetPassAllFilter() bool { - if x, ok := m.GetFilter().(*RowFilter_PassAllFilter); ok { - return x.PassAllFilter - } - return false -} - -func (m *RowFilter) GetBlockAllFilter() bool { - if x, ok := m.GetFilter().(*RowFilter_BlockAllFilter); ok { - return x.BlockAllFilter - } - return false -} - -func (m *RowFilter) GetRowKeyRegexFilter() []byte { - if x, ok := m.GetFilter().(*RowFilter_RowKeyRegexFilter); ok { - return x.RowKeyRegexFilter - } - return nil -} - -func (m *RowFilter) GetRowSampleFilter() float64 { - if x, ok := m.GetFilter().(*RowFilter_RowSampleFilter); ok { - return x.RowSampleFilter - } - return 0 -} - -func (m *RowFilter) GetFamilyNameRegexFilter() string { - if x, ok := m.GetFilter().(*RowFilter_FamilyNameRegexFilter); ok { - return x.FamilyNameRegexFilter - } - return "" -} - -func (m *RowFilter) GetColumnQualifierRegexFilter() []byte { - if x, ok := m.GetFilter().(*RowFilter_ColumnQualifierRegexFilter); ok { - return x.ColumnQualifierRegexFilter - } - return nil -} - -func (m *RowFilter) GetColumnRangeFilter() *ColumnRange { - if x, ok := m.GetFilter().(*RowFilter_ColumnRangeFilter); ok { - return x.ColumnRangeFilter - } - return nil -} - -func (m *RowFilter) GetTimestampRangeFilter() *TimestampRange { - if x, ok := m.GetFilter().(*RowFilter_TimestampRangeFilter); ok { - return x.TimestampRangeFilter - } - return nil -} - -func (m *RowFilter) GetValueRegexFilter() []byte { - if x, ok := m.GetFilter().(*RowFilter_ValueRegexFilter); ok { - return x.ValueRegexFilter - } - return nil -} - -func (m *RowFilter) GetValueRangeFilter() *ValueRange { - if x, ok := m.GetFilter().(*RowFilter_ValueRangeFilter); ok { - return x.ValueRangeFilter - } - return nil -} - -func (m *RowFilter) GetCellsPerRowOffsetFilter() int32 { - if x, ok := m.GetFilter().(*RowFilter_CellsPerRowOffsetFilter); ok { - return x.CellsPerRowOffsetFilter - } - return 0 -} - -func (m *RowFilter) GetCellsPerRowLimitFilter() int32 { - if x, ok := m.GetFilter().(*RowFilter_CellsPerRowLimitFilter); ok { - return x.CellsPerRowLimitFilter - } - return 0 -} - -func (m *RowFilter) GetCellsPerColumnLimitFilter() int32 { - if x, ok := m.GetFilter().(*RowFilter_CellsPerColumnLimitFilter); ok { - return x.CellsPerColumnLimitFilter - } - return 0 -} - -func (m *RowFilter) GetStripValueTransformer() bool { - if x, ok := m.GetFilter().(*RowFilter_StripValueTransformer); ok { - return x.StripValueTransformer - } - return false -} - -func (m *RowFilter) GetApplyLabelTransformer() string { - if x, ok := m.GetFilter().(*RowFilter_ApplyLabelTransformer); ok { - return x.ApplyLabelTransformer - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*RowFilter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _RowFilter_OneofMarshaler, _RowFilter_OneofUnmarshaler, []interface{}{ - (*RowFilter_Chain_)(nil), - (*RowFilter_Interleave_)(nil), - (*RowFilter_Condition_)(nil), - (*RowFilter_Sink)(nil), - (*RowFilter_PassAllFilter)(nil), - (*RowFilter_BlockAllFilter)(nil), - (*RowFilter_RowKeyRegexFilter)(nil), - (*RowFilter_RowSampleFilter)(nil), - (*RowFilter_FamilyNameRegexFilter)(nil), - (*RowFilter_ColumnQualifierRegexFilter)(nil), - (*RowFilter_ColumnRangeFilter)(nil), - (*RowFilter_TimestampRangeFilter)(nil), - (*RowFilter_ValueRegexFilter)(nil), - (*RowFilter_ValueRangeFilter)(nil), - (*RowFilter_CellsPerRowOffsetFilter)(nil), - (*RowFilter_CellsPerRowLimitFilter)(nil), - (*RowFilter_CellsPerColumnLimitFilter)(nil), - (*RowFilter_StripValueTransformer)(nil), - (*RowFilter_ApplyLabelTransformer)(nil), - } -} - -func _RowFilter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*RowFilter) - // filter - switch x := m.Filter.(type) { - case *RowFilter_Chain_: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Chain); err != nil { - return err - } - case *RowFilter_Interleave_: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Interleave); err != nil { - return err - } - case *RowFilter_Condition_: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Condition); err != nil { - return err - } - case *RowFilter_Sink: - t := uint64(0) - if x.Sink { - t = 1 - } - b.EncodeVarint(16<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *RowFilter_PassAllFilter: - t := uint64(0) - if x.PassAllFilter { - t = 1 - } - b.EncodeVarint(17<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *RowFilter_BlockAllFilter: - t := uint64(0) - if x.BlockAllFilter { - t = 1 - } - b.EncodeVarint(18<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *RowFilter_RowKeyRegexFilter: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeRawBytes(x.RowKeyRegexFilter) - case *RowFilter_RowSampleFilter: - b.EncodeVarint(14<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.RowSampleFilter)) - case *RowFilter_FamilyNameRegexFilter: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.FamilyNameRegexFilter) - case *RowFilter_ColumnQualifierRegexFilter: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeRawBytes(x.ColumnQualifierRegexFilter) - case *RowFilter_ColumnRangeFilter: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ColumnRangeFilter); err != nil { - return err - } - case *RowFilter_TimestampRangeFilter: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.TimestampRangeFilter); err != nil { - return err - } - case *RowFilter_ValueRegexFilter: - b.EncodeVarint(9<<3 | proto.WireBytes) - b.EncodeRawBytes(x.ValueRegexFilter) - case *RowFilter_ValueRangeFilter: - b.EncodeVarint(15<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ValueRangeFilter); err != nil { - return err - } - case *RowFilter_CellsPerRowOffsetFilter: - b.EncodeVarint(10<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.CellsPerRowOffsetFilter)) - case *RowFilter_CellsPerRowLimitFilter: - b.EncodeVarint(11<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.CellsPerRowLimitFilter)) - case *RowFilter_CellsPerColumnLimitFilter: - b.EncodeVarint(12<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.CellsPerColumnLimitFilter)) - case *RowFilter_StripValueTransformer: - t := uint64(0) - if x.StripValueTransformer { - t = 1 - } - b.EncodeVarint(13<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *RowFilter_ApplyLabelTransformer: - b.EncodeVarint(19<<3 | proto.WireBytes) - b.EncodeStringBytes(x.ApplyLabelTransformer) - case nil: - default: - return fmt.Errorf("RowFilter.Filter has unexpected type %T", x) - } - return nil -} - -func _RowFilter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*RowFilter) - switch tag { - case 1: // filter.chain - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RowFilter_Chain) - err := b.DecodeMessage(msg) - m.Filter = &RowFilter_Chain_{msg} - return true, err - case 2: // filter.interleave - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RowFilter_Interleave) - err := b.DecodeMessage(msg) - m.Filter = &RowFilter_Interleave_{msg} - return true, err - case 3: // filter.condition - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RowFilter_Condition) - err := b.DecodeMessage(msg) - m.Filter = &RowFilter_Condition_{msg} - return true, err - case 16: // filter.sink - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_Sink{x != 0} - return true, err - case 17: // filter.pass_all_filter - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_PassAllFilter{x != 0} - return true, err - case 18: // filter.block_all_filter - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_BlockAllFilter{x != 0} - return true, err - case 4: // filter.row_key_regex_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Filter = &RowFilter_RowKeyRegexFilter{x} - return true, err - case 14: // filter.row_sample_filter - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Filter = &RowFilter_RowSampleFilter{math.Float64frombits(x)} - return true, err - case 5: // filter.family_name_regex_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Filter = &RowFilter_FamilyNameRegexFilter{x} - return true, err - case 6: // filter.column_qualifier_regex_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Filter = &RowFilter_ColumnQualifierRegexFilter{x} - return true, err - case 7: // filter.column_range_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ColumnRange) - err := b.DecodeMessage(msg) - m.Filter = &RowFilter_ColumnRangeFilter{msg} - return true, err - case 8: // filter.timestamp_range_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TimestampRange) - err := b.DecodeMessage(msg) - m.Filter = &RowFilter_TimestampRangeFilter{msg} - return true, err - case 9: // filter.value_regex_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Filter = &RowFilter_ValueRegexFilter{x} - return true, err - case 15: // filter.value_range_filter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ValueRange) - err := b.DecodeMessage(msg) - m.Filter = &RowFilter_ValueRangeFilter{msg} - return true, err - case 10: // filter.cells_per_row_offset_filter - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_CellsPerRowOffsetFilter{int32(x)} - return true, err - case 11: // filter.cells_per_row_limit_filter - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_CellsPerRowLimitFilter{int32(x)} - return true, err - case 12: // filter.cells_per_column_limit_filter - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_CellsPerColumnLimitFilter{int32(x)} - return true, err - case 13: // filter.strip_value_transformer - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Filter = &RowFilter_StripValueTransformer{x != 0} - return true, err - case 19: // filter.apply_label_transformer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Filter = &RowFilter_ApplyLabelTransformer{x} - return true, err - default: - return false, nil - } -} - -// A RowFilter which sends rows through several RowFilters in sequence. -type RowFilter_Chain struct { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` -} - -func (m *RowFilter_Chain) Reset() { *m = RowFilter_Chain{} } -func (m *RowFilter_Chain) String() string { return proto.CompactTextString(m) } -func (*RowFilter_Chain) ProtoMessage() {} - -func (m *RowFilter_Chain) GetFilters() []*RowFilter { - if m != nil { - return m.Filters - } - return nil -} - -// A RowFilter which sends each row to each of several component -// RowFilters and interleaves the results. -type RowFilter_Interleave struct { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` -} - -func (m *RowFilter_Interleave) Reset() { *m = RowFilter_Interleave{} } -func (m *RowFilter_Interleave) String() string { return proto.CompactTextString(m) } -func (*RowFilter_Interleave) ProtoMessage() {} - -func (m *RowFilter_Interleave) GetFilters() []*RowFilter { - if m != nil { - return m.Filters - } - return nil -} - -// A RowFilter which evaluates one of two possible RowFilters, depending on -// whether or not a predicate RowFilter outputs any cells from the input row. -// -// IMPORTANT NOTE: The predicate filter does not execute atomically with the -// true and false filters, which may lead to inconsistent or unexpected -// results. Additionally, Condition filters have poor performance, especially -// when filters are set for the false condition. -type RowFilter_Condition struct { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter" json:"predicate_filter,omitempty"` - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter" json:"true_filter,omitempty"` - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter" json:"false_filter,omitempty"` -} - -func (m *RowFilter_Condition) Reset() { *m = RowFilter_Condition{} } -func (m *RowFilter_Condition) String() string { return proto.CompactTextString(m) } -func (*RowFilter_Condition) ProtoMessage() {} - -func (m *RowFilter_Condition) GetPredicateFilter() *RowFilter { - if m != nil { - return m.PredicateFilter - } - return nil -} - -func (m *RowFilter_Condition) GetTrueFilter() *RowFilter { - if m != nil { - return m.TrueFilter - } - return nil -} - -func (m *RowFilter_Condition) GetFalseFilter() *RowFilter { - if m != nil { - return m.FalseFilter - } - return nil -} - -// Specifies a particular change to be made to the contents of a row. -type Mutation struct { - // Which of the possible Mutation types to apply. - // - // Types that are valid to be assigned to Mutation: - // *Mutation_SetCell_ - // *Mutation_DeleteFromColumn_ - // *Mutation_DeleteFromFamily_ - // *Mutation_DeleteFromRow_ - Mutation isMutation_Mutation `protobuf_oneof:"mutation"` -} - -func (m *Mutation) Reset() { *m = Mutation{} } -func (m *Mutation) String() string { return proto.CompactTextString(m) } -func (*Mutation) ProtoMessage() {} - -type isMutation_Mutation interface { - isMutation_Mutation() -} - -type Mutation_SetCell_ struct { - SetCell *Mutation_SetCell `protobuf:"bytes,1,opt,name=set_cell,oneof"` -} -type Mutation_DeleteFromColumn_ struct { - DeleteFromColumn *Mutation_DeleteFromColumn `protobuf:"bytes,2,opt,name=delete_from_column,oneof"` -} -type Mutation_DeleteFromFamily_ struct { - DeleteFromFamily *Mutation_DeleteFromFamily `protobuf:"bytes,3,opt,name=delete_from_family,oneof"` -} -type Mutation_DeleteFromRow_ struct { - DeleteFromRow *Mutation_DeleteFromRow `protobuf:"bytes,4,opt,name=delete_from_row,oneof"` -} - -func (*Mutation_SetCell_) isMutation_Mutation() {} -func (*Mutation_DeleteFromColumn_) isMutation_Mutation() {} -func (*Mutation_DeleteFromFamily_) isMutation_Mutation() {} -func (*Mutation_DeleteFromRow_) isMutation_Mutation() {} - -func (m *Mutation) GetMutation() isMutation_Mutation { - if m != nil { - return m.Mutation - } - return nil -} - -func (m *Mutation) GetSetCell() *Mutation_SetCell { - if x, ok := m.GetMutation().(*Mutation_SetCell_); ok { - return x.SetCell - } - return nil -} - -func (m *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn { - if x, ok := m.GetMutation().(*Mutation_DeleteFromColumn_); ok { - return x.DeleteFromColumn - } - return nil -} - -func (m *Mutation) GetDeleteFromFamily() *Mutation_DeleteFromFamily { - if x, ok := m.GetMutation().(*Mutation_DeleteFromFamily_); ok { - return x.DeleteFromFamily - } - return nil -} - -func (m *Mutation) GetDeleteFromRow() *Mutation_DeleteFromRow { - if x, ok := m.GetMutation().(*Mutation_DeleteFromRow_); ok { - return x.DeleteFromRow - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, []interface{}{ - (*Mutation_SetCell_)(nil), - (*Mutation_DeleteFromColumn_)(nil), - (*Mutation_DeleteFromFamily_)(nil), - (*Mutation_DeleteFromRow_)(nil), - } -} - -func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Mutation) - // mutation - switch x := m.Mutation.(type) { - case *Mutation_SetCell_: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.SetCell); err != nil { - return err - } - case *Mutation_DeleteFromColumn_: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.DeleteFromColumn); err != nil { - return err - } - case *Mutation_DeleteFromFamily_: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.DeleteFromFamily); err != nil { - return err - } - case *Mutation_DeleteFromRow_: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.DeleteFromRow); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Mutation.Mutation has unexpected type %T", x) - } - return nil -} - -func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Mutation) - switch tag { - case 1: // mutation.set_cell - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mutation_SetCell) - err := b.DecodeMessage(msg) - m.Mutation = &Mutation_SetCell_{msg} - return true, err - case 2: // mutation.delete_from_column - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mutation_DeleteFromColumn) - err := b.DecodeMessage(msg) - m.Mutation = &Mutation_DeleteFromColumn_{msg} - return true, err - case 3: // mutation.delete_from_family - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mutation_DeleteFromFamily) - err := b.DecodeMessage(msg) - m.Mutation = &Mutation_DeleteFromFamily_{msg} - return true, err - case 4: // mutation.delete_from_row - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mutation_DeleteFromRow) - err := b.DecodeMessage(msg) - m.Mutation = &Mutation_DeleteFromRow_{msg} - return true, err - default: - return false, nil - } -} - -// A Mutation which sets the value of the specified cell. -type Mutation_SetCell struct { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - TimestampMicros int64 `protobuf:"varint,3,opt,name=timestamp_micros" json:"timestamp_micros,omitempty"` - // The value to be written into the specified cell. - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Mutation_SetCell) Reset() { *m = Mutation_SetCell{} } -func (m *Mutation_SetCell) String() string { return proto.CompactTextString(m) } -func (*Mutation_SetCell) ProtoMessage() {} - -// A Mutation which deletes cells from the specified column, optionally -// restricting the deletions to a given timestamp range. -type Mutation_DeleteFromColumn struct { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` - // The range of timestamps within which cells should be deleted. - TimeRange *TimestampRange `protobuf:"bytes,3,opt,name=time_range" json:"time_range,omitempty"` -} - -func (m *Mutation_DeleteFromColumn) Reset() { *m = Mutation_DeleteFromColumn{} } -func (m *Mutation_DeleteFromColumn) String() string { return proto.CompactTextString(m) } -func (*Mutation_DeleteFromColumn) ProtoMessage() {} - -func (m *Mutation_DeleteFromColumn) GetTimeRange() *TimestampRange { - if m != nil { - return m.TimeRange - } - return nil -} - -// A Mutation which deletes all cells from the specified column family. -type Mutation_DeleteFromFamily struct { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` -} - -func (m *Mutation_DeleteFromFamily) Reset() { *m = Mutation_DeleteFromFamily{} } -func (m *Mutation_DeleteFromFamily) String() string { return proto.CompactTextString(m) } -func (*Mutation_DeleteFromFamily) ProtoMessage() {} - -// A Mutation which deletes all cells from the containing row. -type Mutation_DeleteFromRow struct { -} - -func (m *Mutation_DeleteFromRow) Reset() { *m = Mutation_DeleteFromRow{} } -func (m *Mutation_DeleteFromRow) String() string { return proto.CompactTextString(m) } -func (*Mutation_DeleteFromRow) ProtoMessage() {} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -type ReadModifyWriteRule struct { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - FamilyName string `protobuf:"bytes,1,opt,name=family_name" json:"family_name,omitempty"` - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,proto3" json:"column_qualifier,omitempty"` - // The rule used to determine the column's new latest value from its current - // latest value. - // - // Types that are valid to be assigned to Rule: - // *ReadModifyWriteRule_AppendValue - // *ReadModifyWriteRule_IncrementAmount - Rule isReadModifyWriteRule_Rule `protobuf_oneof:"rule"` -} - -func (m *ReadModifyWriteRule) Reset() { *m = ReadModifyWriteRule{} } -func (m *ReadModifyWriteRule) String() string { return proto.CompactTextString(m) } -func (*ReadModifyWriteRule) ProtoMessage() {} - -type isReadModifyWriteRule_Rule interface { - isReadModifyWriteRule_Rule() -} - -type ReadModifyWriteRule_AppendValue struct { - AppendValue []byte `protobuf:"bytes,3,opt,name=append_value,proto3,oneof"` -} -type ReadModifyWriteRule_IncrementAmount struct { - IncrementAmount int64 `protobuf:"varint,4,opt,name=increment_amount,oneof"` -} - -func (*ReadModifyWriteRule_AppendValue) isReadModifyWriteRule_Rule() {} -func (*ReadModifyWriteRule_IncrementAmount) isReadModifyWriteRule_Rule() {} - -func (m *ReadModifyWriteRule) GetRule() isReadModifyWriteRule_Rule { - if m != nil { - return m.Rule - } - return nil -} - -func (m *ReadModifyWriteRule) GetAppendValue() []byte { - if x, ok := m.GetRule().(*ReadModifyWriteRule_AppendValue); ok { - return x.AppendValue - } - return nil -} - -func (m *ReadModifyWriteRule) GetIncrementAmount() int64 { - if x, ok := m.GetRule().(*ReadModifyWriteRule_IncrementAmount); ok { - return x.IncrementAmount - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ReadModifyWriteRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _ReadModifyWriteRule_OneofMarshaler, _ReadModifyWriteRule_OneofUnmarshaler, []interface{}{ - (*ReadModifyWriteRule_AppendValue)(nil), - (*ReadModifyWriteRule_IncrementAmount)(nil), - } -} - -func _ReadModifyWriteRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ReadModifyWriteRule) - // rule - switch x := m.Rule.(type) { - case *ReadModifyWriteRule_AppendValue: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeRawBytes(x.AppendValue) - case *ReadModifyWriteRule_IncrementAmount: - b.EncodeVarint(4<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IncrementAmount)) - case nil: - default: - return fmt.Errorf("ReadModifyWriteRule.Rule has unexpected type %T", x) - } - return nil -} - -func _ReadModifyWriteRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ReadModifyWriteRule) - switch tag { - case 3: // rule.append_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Rule = &ReadModifyWriteRule_AppendValue{x} - return true, err - case 4: // rule.increment_amount - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Rule = &ReadModifyWriteRule_IncrementAmount{int64(x)} - return true, err - default: - return false, nil - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto deleted file mode 100644 index 86234d22d5..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -option java_multiple_files = true; -option java_outer_classname = "BigtableDataProto"; -option java_package = "com.google.bigtable.v1"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column of a table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // Inclusive lower bound. If left empty, interpreted as the empty string. - bytes start_key = 2; - - // Exclusive upper bound. If left empty, interpreted as infinity. - bytes end_key = 3; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_inclusive = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_exclusive = 3; - } - - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_inclusive = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_exclusive = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_inclusive = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_exclusive = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_inclusive = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_exclusive = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that "append_value" be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that "increment_amount" be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.pb.go deleted file mode 100644 index 63d6727e36..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.pb.go +++ /dev/null @@ -1,81 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto -// DO NOT EDIT! - -/* -Package google_protobuf is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto - -It has these top-level messages: - Duration -*/ -package google_protobuf - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto deleted file mode 100644 index 15e9d44d3b..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.protobuf; - -option java_generate_equals_and_hash = true; -option java_multiple_files = true; -option java_outer_classname = "DurationProto"; -option java_package = "com.google.protobuf"; - - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -message Duration { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go deleted file mode 100644 index 89bb423481..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.pb.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/empty/empty.proto -// DO NOT EDIT! - -/* -Package google_protobuf is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/empty/empty.proto - -It has these top-level messages: - Empty -*/ -package google_protobuf - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -type Empty struct { -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.proto deleted file mode 100644 index 43b06e87bd..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/empty/empty.proto +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.protobuf; - -option java_multiple_files = true; -option java_outer_classname = "EmptyProto"; -option java_package = "com.google.protobuf"; - - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -message Empty { - -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/regen.sh b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/regen.sh deleted file mode 100755 index 6cfa1688b4..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/regen.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=google.golang.org/cloud/bigtable -UPSTREAM=https://github.com/GoogleCloudPlatform/cloud-bigtable-client -UPSTREAM_SUBDIR=bigtable-protos/src/main/proto - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-cbt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir -# Pass 1: build mapping from upstream filename to our filename. -declare -A filename_map -for f in $(cd $PKG && find internal -name '*.proto'); do - echo -n 1>&2 "looking for latest version of $f... " - up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f)) - echo 1>&2 $up - if [ $(echo $up | wc -w) != "1" ]; then - die "not exactly one match" - fi - filename_map[$up]=$f -done -# Pass 2: build sed script for fixing imports. -import_fixes=$tmpdir/fix_imports.sed -for up in "${!filename_map[@]}"; do - f=${filename_map[$up]} - echo >>$import_fixes "s,\"$up\",\"$PKG/$f\"," -done -cat $import_fixes | sed 's,^,### ,' 1>&2 -# Pass 3: copy files, making necessary adjustments. -for up in "${!filename_map[@]}"; do - f=${filename_map[$up]} - cat $tmpdir/$UPSTREAM_SUBDIR/$up | - # Adjust proto imports. - sed -f $import_fixes | - # Drop the UndeleteCluster RPC method. It returns a google.longrunning.Operation. - sed '/^ rpc UndeleteCluster(/,/^ }$/d' | - # Drop annotations, long-running operations and timestamps. They aren't supported (yet). - sed '/"google\/longrunning\/operations.proto"/d' | - sed '/google.longrunning.Operation/d' | - sed '/"google\/protobuf\/timestamp.proto"/d' | - sed '/google\.protobuf\.Timestamp/d' | - sed '/"google\/api\/annotations.proto"/d' | - sed '/option.*google\.api\.http.*{.*};$/d' | - cat > $PKG/$f -done - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=plugins=grpc:. $dir/*.proto -done -echo 1>&2 "All OK" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go deleted file mode 100644 index 74b3837669..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.pb.go +++ /dev/null @@ -1,287 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto -// DO NOT EDIT! - -package google_bigtable_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_bigtable_v11 "google.golang.org/cloud/bigtable/internal/data_proto" -import google_protobuf "google.golang.org/cloud/bigtable/internal/empty" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// Client API for BigtableService service - -type BigtableServiceClient interface { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) - // Mutates a row atomically based on the output of a predicate Reader filter. - CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) -} - -type bigtableServiceClient struct { - cc *grpc.ClientConn -} - -func NewBigtableServiceClient(cc *grpc.ClientConn) BigtableServiceClient { - return &bigtableServiceClient{cc} -} - -func (c *bigtableServiceClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) { - stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[0], c.cc, "/google.bigtable.v1.BigtableService/ReadRows", opts...) - if err != nil { - return nil, err - } - x := &bigtableServiceReadRowsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type BigtableService_ReadRowsClient interface { - Recv() (*ReadRowsResponse, error) - grpc.ClientStream -} - -type bigtableServiceReadRowsClient struct { - grpc.ClientStream -} - -func (x *bigtableServiceReadRowsClient) Recv() (*ReadRowsResponse, error) { - m := new(ReadRowsResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *bigtableServiceClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) { - stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[1], c.cc, "/google.bigtable.v1.BigtableService/SampleRowKeys", opts...) - if err != nil { - return nil, err - } - x := &bigtableServiceSampleRowKeysClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type BigtableService_SampleRowKeysClient interface { - Recv() (*SampleRowKeysResponse, error) - grpc.ClientStream -} - -type bigtableServiceSampleRowKeysClient struct { - grpc.ClientStream -} - -func (x *bigtableServiceSampleRowKeysClient) Recv() (*SampleRowKeysResponse, error) { - m := new(SampleRowKeysResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { - out := new(google_protobuf.Empty) - err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRow", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) { - out := new(CheckAndMutateRowResponse) - err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/CheckAndMutateRow", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableServiceClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*google_bigtable_v11.Row, error) { - out := new(google_bigtable_v11.Row) - err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for BigtableService service - -type BigtableServiceServer interface { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - ReadRows(*ReadRowsRequest, BigtableService_ReadRowsServer) error - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - SampleRowKeys(*SampleRowKeysRequest, BigtableService_SampleRowKeysServer) error - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - MutateRow(context.Context, *MutateRowRequest) (*google_protobuf.Empty, error) - // Mutates a row atomically based on the output of a predicate Reader filter. - CheckAndMutateRow(context.Context, *CheckAndMutateRowRequest) (*CheckAndMutateRowResponse, error) - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - ReadModifyWriteRow(context.Context, *ReadModifyWriteRowRequest) (*google_bigtable_v11.Row, error) -} - -func RegisterBigtableServiceServer(s *grpc.Server, srv BigtableServiceServer) { - s.RegisterService(&_BigtableService_serviceDesc, srv) -} - -func _BigtableService_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ReadRowsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BigtableServiceServer).ReadRows(m, &bigtableServiceReadRowsServer{stream}) -} - -type BigtableService_ReadRowsServer interface { - Send(*ReadRowsResponse) error - grpc.ServerStream -} - -type bigtableServiceReadRowsServer struct { - grpc.ServerStream -} - -func (x *bigtableServiceReadRowsServer) Send(m *ReadRowsResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _BigtableService_SampleRowKeys_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SampleRowKeysRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BigtableServiceServer).SampleRowKeys(m, &bigtableServiceSampleRowKeysServer{stream}) -} - -type BigtableService_SampleRowKeysServer interface { - Send(*SampleRowKeysResponse) error - grpc.ServerStream -} - -type bigtableServiceSampleRowKeysServer struct { - grpc.ServerStream -} - -func (x *bigtableServiceSampleRowKeysServer) Send(m *SampleRowKeysResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _BigtableService_MutateRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(MutateRowRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableServiceServer).MutateRow(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableService_CheckAndMutateRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(CheckAndMutateRowRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableServiceServer).CheckAndMutateRow(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableService_ReadModifyWriteRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(ReadModifyWriteRowRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableServiceServer).ReadModifyWriteRow(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -var _BigtableService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.bigtable.v1.BigtableService", - HandlerType: (*BigtableServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "MutateRow", - Handler: _BigtableService_MutateRow_Handler, - }, - { - MethodName: "CheckAndMutateRow", - Handler: _BigtableService_CheckAndMutateRow_Handler, - }, - { - MethodName: "ReadModifyWriteRow", - Handler: _BigtableService_ReadModifyWriteRow_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ReadRows", - Handler: _BigtableService_ReadRows_Handler, - ServerStreams: true, - }, - { - StreamName: "SampleRowKeys", - Handler: _BigtableService_SampleRowKeys_Handler, - ServerStreams: true, - }, - }, -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto deleted file mode 100644 index 814940a18b..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto"; -import "google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto"; -import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; - -option java_generic_services = true; -option java_multiple_files = true; -option java_outer_classname = "BigtableServicesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Service for reading from and writing to existing Bigtables. -service BigtableService { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - } - - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go deleted file mode 100644 index 0b870d00a1..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.pb.go +++ /dev/null @@ -1,444 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto -// DO NOT EDIT! - -/* -Package google_bigtable_v1 is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto - google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service.proto - -It has these top-level messages: - ReadRowsRequest - ReadRowsResponse - SampleRowKeysRequest - SampleRowKeysResponse - MutateRowRequest - CheckAndMutateRowRequest - CheckAndMutateRowResponse - ReadModifyWriteRowRequest -*/ -package google_bigtable_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_bigtable_v11 "google.golang.org/cloud/bigtable/internal/data_proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Request message for BigtableServer.ReadRows. -type ReadRowsRequest struct { - // The unique name of the table from which to read. - TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` - // If neither row_key nor row_range is set, reads from all rows. - // - // Types that are valid to be assigned to Target: - // *ReadRowsRequest_RowKey - // *ReadRowsRequest_RowRange - Target isReadRowsRequest_Target `protobuf_oneof:"target"` - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - Filter *google_bigtable_v11.RowFilter `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - AllowRowInterleaving bool `protobuf:"varint,6,opt,name=allow_row_interleaving" json:"allow_row_interleaving,omitempty"` - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - NumRowsLimit int64 `protobuf:"varint,7,opt,name=num_rows_limit" json:"num_rows_limit,omitempty"` -} - -func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } -func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } -func (*ReadRowsRequest) ProtoMessage() {} - -type isReadRowsRequest_Target interface { - isReadRowsRequest_Target() -} - -type ReadRowsRequest_RowKey struct { - RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3,oneof"` -} -type ReadRowsRequest_RowRange struct { - RowRange *google_bigtable_v11.RowRange `protobuf:"bytes,3,opt,name=row_range,oneof"` -} - -func (*ReadRowsRequest_RowKey) isReadRowsRequest_Target() {} -func (*ReadRowsRequest_RowRange) isReadRowsRequest_Target() {} - -func (m *ReadRowsRequest) GetTarget() isReadRowsRequest_Target { - if m != nil { - return m.Target - } - return nil -} - -func (m *ReadRowsRequest) GetRowKey() []byte { - if x, ok := m.GetTarget().(*ReadRowsRequest_RowKey); ok { - return x.RowKey - } - return nil -} - -func (m *ReadRowsRequest) GetRowRange() *google_bigtable_v11.RowRange { - if x, ok := m.GetTarget().(*ReadRowsRequest_RowRange); ok { - return x.RowRange - } - return nil -} - -func (m *ReadRowsRequest) GetFilter() *google_bigtable_v11.RowFilter { - if m != nil { - return m.Filter - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ReadRowsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _ReadRowsRequest_OneofMarshaler, _ReadRowsRequest_OneofUnmarshaler, []interface{}{ - (*ReadRowsRequest_RowKey)(nil), - (*ReadRowsRequest_RowRange)(nil), - } -} - -func _ReadRowsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ReadRowsRequest) - // target - switch x := m.Target.(type) { - case *ReadRowsRequest_RowKey: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeRawBytes(x.RowKey) - case *ReadRowsRequest_RowRange: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RowRange); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ReadRowsRequest.Target has unexpected type %T", x) - } - return nil -} - -func _ReadRowsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ReadRowsRequest) - switch tag { - case 2: // target.row_key - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Target = &ReadRowsRequest_RowKey{x} - return true, err - case 3: // target.row_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(google_bigtable_v11.RowRange) - err := b.DecodeMessage(msg) - m.Target = &ReadRowsRequest_RowRange{msg} - return true, err - default: - return false, nil - } -} - -// Response message for BigtableService.ReadRows. -type ReadRowsResponse struct { - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - RowKey []byte `protobuf:"bytes,1,opt,name=row_key,proto3" json:"row_key,omitempty"` - // One or more chunks of the row specified by "row_key". - Chunks []*ReadRowsResponse_Chunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"` -} - -func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } -func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } -func (*ReadRowsResponse) ProtoMessage() {} - -func (m *ReadRowsResponse) GetChunks() []*ReadRowsResponse_Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -// Specifies a piece of a row's contents returned as part of the read -// response stream. -type ReadRowsResponse_Chunk struct { - // Types that are valid to be assigned to Chunk: - // *ReadRowsResponse_Chunk_RowContents - // *ReadRowsResponse_Chunk_ResetRow - // *ReadRowsResponse_Chunk_CommitRow - Chunk isReadRowsResponse_Chunk_Chunk `protobuf_oneof:"chunk"` -} - -func (m *ReadRowsResponse_Chunk) Reset() { *m = ReadRowsResponse_Chunk{} } -func (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) } -func (*ReadRowsResponse_Chunk) ProtoMessage() {} - -type isReadRowsResponse_Chunk_Chunk interface { - isReadRowsResponse_Chunk_Chunk() -} - -type ReadRowsResponse_Chunk_RowContents struct { - RowContents *google_bigtable_v11.Family `protobuf:"bytes,1,opt,name=row_contents,oneof"` -} -type ReadRowsResponse_Chunk_ResetRow struct { - ResetRow bool `protobuf:"varint,2,opt,name=reset_row,oneof"` -} -type ReadRowsResponse_Chunk_CommitRow struct { - CommitRow bool `protobuf:"varint,3,opt,name=commit_row,oneof"` -} - -func (*ReadRowsResponse_Chunk_RowContents) isReadRowsResponse_Chunk_Chunk() {} -func (*ReadRowsResponse_Chunk_ResetRow) isReadRowsResponse_Chunk_Chunk() {} -func (*ReadRowsResponse_Chunk_CommitRow) isReadRowsResponse_Chunk_Chunk() {} - -func (m *ReadRowsResponse_Chunk) GetChunk() isReadRowsResponse_Chunk_Chunk { - if m != nil { - return m.Chunk - } - return nil -} - -func (m *ReadRowsResponse_Chunk) GetRowContents() *google_bigtable_v11.Family { - if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_RowContents); ok { - return x.RowContents - } - return nil -} - -func (m *ReadRowsResponse_Chunk) GetResetRow() bool { - if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_ResetRow); ok { - return x.ResetRow - } - return false -} - -func (m *ReadRowsResponse_Chunk) GetCommitRow() bool { - if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_CommitRow); ok { - return x.CommitRow - } - return false -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ReadRowsResponse_Chunk) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _ReadRowsResponse_Chunk_OneofMarshaler, _ReadRowsResponse_Chunk_OneofUnmarshaler, []interface{}{ - (*ReadRowsResponse_Chunk_RowContents)(nil), - (*ReadRowsResponse_Chunk_ResetRow)(nil), - (*ReadRowsResponse_Chunk_CommitRow)(nil), - } -} - -func _ReadRowsResponse_Chunk_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ReadRowsResponse_Chunk) - // chunk - switch x := m.Chunk.(type) { - case *ReadRowsResponse_Chunk_RowContents: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RowContents); err != nil { - return err - } - case *ReadRowsResponse_Chunk_ResetRow: - t := uint64(0) - if x.ResetRow { - t = 1 - } - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *ReadRowsResponse_Chunk_CommitRow: - t := uint64(0) - if x.CommitRow { - t = 1 - } - b.EncodeVarint(3<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("ReadRowsResponse_Chunk.Chunk has unexpected type %T", x) - } - return nil -} - -func _ReadRowsResponse_Chunk_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ReadRowsResponse_Chunk) - switch tag { - case 1: // chunk.row_contents - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(google_bigtable_v11.Family) - err := b.DecodeMessage(msg) - m.Chunk = &ReadRowsResponse_Chunk_RowContents{msg} - return true, err - case 2: // chunk.reset_row - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Chunk = &ReadRowsResponse_Chunk_ResetRow{x != 0} - return true, err - case 3: // chunk.commit_row - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Chunk = &ReadRowsResponse_Chunk_CommitRow{x != 0} - return true, err - default: - return false, nil - } -} - -// Request message for BigtableService.SampleRowKeys. -type SampleRowKeysRequest struct { - // The unique name of the table from which to sample row keys. - TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` -} - -func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} } -func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) } -func (*SampleRowKeysRequest) ProtoMessage() {} - -// Response message for BigtableService.SampleRowKeys. -type SampleRowKeysResponse struct { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - RowKey []byte `protobuf:"bytes,1,opt,name=row_key,proto3" json:"row_key,omitempty"` - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes" json:"offset_bytes,omitempty"` -} - -func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} } -func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) } -func (*SampleRowKeysResponse) ProtoMessage() {} - -// Request message for BigtableService.MutateRow. -type MutateRowRequest struct { - // The unique name of the table to which the mutation should be applied. - TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` - // The key of the row to which the mutation should be applied. - RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - Mutations []*google_bigtable_v11.Mutation `protobuf:"bytes,3,rep,name=mutations" json:"mutations,omitempty"` -} - -func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} } -func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) } -func (*MutateRowRequest) ProtoMessage() {} - -func (m *MutateRowRequest) GetMutations() []*google_bigtable_v11.Mutation { - if m != nil { - return m.Mutations - } - return nil -} - -// Request message for BigtableService.CheckAndMutateRowRequest -type CheckAndMutateRowRequest struct { - // The unique name of the table to which the conditional mutation should be - // applied. - TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` - // The key of the row to which the conditional mutation should be applied. - RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - PredicateFilter *google_bigtable_v11.RowFilter `protobuf:"bytes,6,opt,name=predicate_filter" json:"predicate_filter,omitempty"` - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - TrueMutations []*google_bigtable_v11.Mutation `protobuf:"bytes,4,rep,name=true_mutations" json:"true_mutations,omitempty"` - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - FalseMutations []*google_bigtable_v11.Mutation `protobuf:"bytes,5,rep,name=false_mutations" json:"false_mutations,omitempty"` -} - -func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} } -func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) } -func (*CheckAndMutateRowRequest) ProtoMessage() {} - -func (m *CheckAndMutateRowRequest) GetPredicateFilter() *google_bigtable_v11.RowFilter { - if m != nil { - return m.PredicateFilter - } - return nil -} - -func (m *CheckAndMutateRowRequest) GetTrueMutations() []*google_bigtable_v11.Mutation { - if m != nil { - return m.TrueMutations - } - return nil -} - -func (m *CheckAndMutateRowRequest) GetFalseMutations() []*google_bigtable_v11.Mutation { - if m != nil { - return m.FalseMutations - } - return nil -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -type CheckAndMutateRowResponse struct { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched" json:"predicate_matched,omitempty"` -} - -func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} } -func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) } -func (*CheckAndMutateRowResponse) ProtoMessage() {} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -type ReadModifyWriteRowRequest struct { - // The unique name of the table to which the read/modify/write rules should be - // applied. - TableName string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` - // The key of the row to which the read/modify/write rules should be applied. - RowKey []byte `protobuf:"bytes,2,opt,name=row_key,proto3" json:"row_key,omitempty"` - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - Rules []*google_bigtable_v11.ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` -} - -func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} } -func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) } -func (*ReadModifyWriteRowRequest) ProtoMessage() {} - -func (m *ReadModifyWriteRowRequest) GetRules() []*google_bigtable_v11.ReadModifyWriteRule { - if m != nil { - return m.Rules - } - return nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto deleted file mode 100644 index 661310ad4a..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/service_proto/bigtable_service_messages.proto +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google.golang.org/cloud/bigtable/internal/data_proto/bigtable_data.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableServiceMessagesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Request message for BigtableServer.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - string table_name = 1; - - // If neither row_key nor row_range is set, reads from all rows. - oneof target { - // The key of a single row from which to read. - bytes row_key = 2; - - // A range of rows from which to read. - RowRange row_range = 3; - } - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - RowFilter filter = 5; - - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - bool allow_row_interleaving = 6; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - int64 num_rows_limit = 7; -} - -// Response message for BigtableService.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message Chunk { - oneof chunk { - // A subset of the data from a particular row. As long as no "reset_row" - // is received in between, multiple "row_contents" from the same row are - // from the same atomic view of that row, and will be received in the - // expected family/column/timestamp order. - Family row_contents = 1; - - // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. - bool reset_row = 2; - - // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. - bool commit_row = 3; - } - } - - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - bytes row_key = 1; - - // One or more chunks of the row specified by "row_key". - repeated Chunk chunks = 2; -} - -// Request message for BigtableService.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - string table_name = 1; -} - -// Response message for BigtableService.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - int64 offset_bytes = 2; -} - -// Request message for BigtableService.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Request message for BigtableService.CheckAndMutateRowRequest -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go deleted file mode 100644 index 42d8b7bb31..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.pb.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto -// DO NOT EDIT! - -/* -Package google_bigtable_admin_table_v1 is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto - -It has these top-level messages: - Table - ColumnFamily - GcRule -*/ -package google_bigtable_admin_table_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "google.golang.org/cloud/bigtable/internal/duration_proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Table_TimestampGranularity int32 - -const ( - Table_MILLIS Table_TimestampGranularity = 0 -) - -var Table_TimestampGranularity_name = map[int32]string{ - 0: "MILLIS", -} -var Table_TimestampGranularity_value = map[string]int32{ - "MILLIS": 0, -} - -func (x Table_TimestampGranularity) String() string { - return proto.EnumName(Table_TimestampGranularity_name, int32(x)) -} - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -type Table struct { - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The column families configured for this table, mapped by column family id. - ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity" json:"granularity,omitempty"` -} - -func (m *Table) Reset() { *m = Table{} } -func (m *Table) String() string { return proto.CompactTextString(m) } -func (*Table) ProtoMessage() {} - -func (m *Table) GetColumnFamilies() map[string]*ColumnFamily { - if m != nil { - return m.ColumnFamilies - } - return nil -} - -// A set of columns within a table which share a common configuration. -type ColumnFamily struct { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - GcExpression string `protobuf:"bytes,2,opt,name=gc_expression" json:"gc_expression,omitempty"` - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule *GcRule `protobuf:"bytes,3,opt,name=gc_rule" json:"gc_rule,omitempty"` -} - -func (m *ColumnFamily) Reset() { *m = ColumnFamily{} } -func (m *ColumnFamily) String() string { return proto.CompactTextString(m) } -func (*ColumnFamily) ProtoMessage() {} - -func (m *ColumnFamily) GetGcRule() *GcRule { - if m != nil { - return m.GcRule - } - return nil -} - -// Rule for determining which cells to delete during garbage collection. -type GcRule struct { - // Types that are valid to be assigned to Rule: - // *GcRule_MaxNumVersions - // *GcRule_MaxAge - // *GcRule_Intersection_ - // *GcRule_Union_ - Rule isGcRule_Rule `protobuf_oneof:"rule"` -} - -func (m *GcRule) Reset() { *m = GcRule{} } -func (m *GcRule) String() string { return proto.CompactTextString(m) } -func (*GcRule) ProtoMessage() {} - -type isGcRule_Rule interface { - isGcRule_Rule() -} - -type GcRule_MaxNumVersions struct { - MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,oneof"` -} -type GcRule_MaxAge struct { - MaxAge *google_protobuf.Duration `protobuf:"bytes,2,opt,name=max_age,oneof"` -} -type GcRule_Intersection_ struct { - Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,oneof"` -} -type GcRule_Union_ struct { - Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,oneof"` -} - -func (*GcRule_MaxNumVersions) isGcRule_Rule() {} -func (*GcRule_MaxAge) isGcRule_Rule() {} -func (*GcRule_Intersection_) isGcRule_Rule() {} -func (*GcRule_Union_) isGcRule_Rule() {} - -func (m *GcRule) GetRule() isGcRule_Rule { - if m != nil { - return m.Rule - } - return nil -} - -func (m *GcRule) GetMaxNumVersions() int32 { - if x, ok := m.GetRule().(*GcRule_MaxNumVersions); ok { - return x.MaxNumVersions - } - return 0 -} - -func (m *GcRule) GetMaxAge() *google_protobuf.Duration { - if x, ok := m.GetRule().(*GcRule_MaxAge); ok { - return x.MaxAge - } - return nil -} - -func (m *GcRule) GetIntersection() *GcRule_Intersection { - if x, ok := m.GetRule().(*GcRule_Intersection_); ok { - return x.Intersection - } - return nil -} - -func (m *GcRule) GetUnion() *GcRule_Union { - if x, ok := m.GetRule().(*GcRule_Union_); ok { - return x.Union - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GcRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _GcRule_OneofMarshaler, _GcRule_OneofUnmarshaler, []interface{}{ - (*GcRule_MaxNumVersions)(nil), - (*GcRule_MaxAge)(nil), - (*GcRule_Intersection_)(nil), - (*GcRule_Union_)(nil), - } -} - -func _GcRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GcRule) - // rule - switch x := m.Rule.(type) { - case *GcRule_MaxNumVersions: - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.MaxNumVersions)) - case *GcRule_MaxAge: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.MaxAge); err != nil { - return err - } - case *GcRule_Intersection_: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Intersection); err != nil { - return err - } - case *GcRule_Union_: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Union); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GcRule.Rule has unexpected type %T", x) - } - return nil -} - -func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GcRule) - switch tag { - case 1: // rule.max_num_versions - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Rule = &GcRule_MaxNumVersions{int32(x)} - return true, err - case 2: // rule.max_age - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(google_protobuf.Duration) - err := b.DecodeMessage(msg) - m.Rule = &GcRule_MaxAge{msg} - return true, err - case 3: // rule.intersection - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(GcRule_Intersection) - err := b.DecodeMessage(msg) - m.Rule = &GcRule_Intersection_{msg} - return true, err - case 4: // rule.union - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(GcRule_Union) - err := b.DecodeMessage(msg) - m.Rule = &GcRule_Union_{msg} - return true, err - default: - return false, nil - } -} - -// A GcRule which deletes cells matching all of the given rules. -type GcRule_Intersection struct { - // Only delete cells which would be deleted by every element of `rules`. - Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` -} - -func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} } -func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) } -func (*GcRule_Intersection) ProtoMessage() {} - -func (m *GcRule_Intersection) GetRules() []*GcRule { - if m != nil { - return m.Rules - } - return nil -} - -// A GcRule which deletes cells matching any of the given rules. -type GcRule_Union struct { - // Delete cells which would be deleted by any element of `rules`. - Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` -} - -func (m *GcRule_Union) Reset() { *m = GcRule_Union{} } -func (m *GcRule_Union) String() string { return proto.CompactTextString(m) } -func (*GcRule_Union) ProtoMessage() {} - -func (m *GcRule_Union) GetRules() []*GcRule { - if m != nil { - return m.Rules - } - return nil -} - -func init() { - proto.RegisterEnum("google.bigtable.admin.table.v1.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto deleted file mode 100644 index a815152d16..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google.golang.org/cloud/bigtable/internal/duration_proto/duration.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go deleted file mode 100644 index 756f188892..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.pb.go +++ /dev/null @@ -1,293 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto -// DO NOT EDIT! - -package google_bigtable_admin_table_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_bigtable_admin_table_v11 "google.golang.org/cloud/bigtable/internal/table_data_proto" -import google_protobuf1 "google.golang.org/cloud/bigtable/internal/empty" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// Client API for BigtableTableService service - -type BigtableTableServiceClient interface { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) - // Lists the names of all tables served from a specified cluster. - ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) - // Gets the schema of the specified table, including its column families. - GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) - // Permanently deletes a specified table and all of its data. - DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) - // Creates a new column family within a specified table. - CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) - // Changes the configuration of a specified column family. - UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) - // Permanently deletes a specified column family and all of its data. - DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) -} - -type bigtableTableServiceClient struct { - cc *grpc.ClientConn -} - -func NewBigtableTableServiceClient(cc *grpc.ClientConn) BigtableTableServiceClient { - return &bigtableTableServiceClient{cc} -} - -func (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) { - out := new(google_bigtable_admin_table_v11.Table) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) { - out := new(ListTablesResponse) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.Table, error) { - out := new(google_bigtable_admin_table_v11.Table) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) { - out := new(google_bigtable_admin_table_v11.ColumnFamily) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) UpdateColumnFamily(ctx context.Context, in *google_bigtable_admin_table_v11.ColumnFamily, opts ...grpc.CallOption) (*google_bigtable_admin_table_v11.ColumnFamily, error) { - out := new(google_bigtable_admin_table_v11.ColumnFamily) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for BigtableTableService service - -type BigtableTableServiceServer interface { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - CreateTable(context.Context, *CreateTableRequest) (*google_bigtable_admin_table_v11.Table, error) - // Lists the names of all tables served from a specified cluster. - ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) - // Gets the schema of the specified table, including its column families. - GetTable(context.Context, *GetTableRequest) (*google_bigtable_admin_table_v11.Table, error) - // Permanently deletes a specified table and all of its data. - DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf1.Empty, error) - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - RenameTable(context.Context, *RenameTableRequest) (*google_protobuf1.Empty, error) - // Creates a new column family within a specified table. - CreateColumnFamily(context.Context, *CreateColumnFamilyRequest) (*google_bigtable_admin_table_v11.ColumnFamily, error) - // Changes the configuration of a specified column family. - UpdateColumnFamily(context.Context, *google_bigtable_admin_table_v11.ColumnFamily) (*google_bigtable_admin_table_v11.ColumnFamily, error) - // Permanently deletes a specified column family and all of its data. - DeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*google_protobuf1.Empty, error) -} - -func RegisterBigtableTableServiceServer(s *grpc.Server, srv BigtableTableServiceServer) { - s.RegisterService(&_BigtableTableService_serviceDesc, srv) -} - -func _BigtableTableService_CreateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(CreateTableRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).CreateTable(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(ListTablesRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).ListTables(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_GetTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(GetTableRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).GetTable(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(DeleteTableRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).DeleteTable(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_RenameTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(RenameTableRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).RenameTable(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_CreateColumnFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(CreateColumnFamilyRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).CreateColumnFamily(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_UpdateColumnFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(google_bigtable_admin_table_v11.ColumnFamily) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).UpdateColumnFamily(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -func _BigtableTableService_DeleteColumnFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(DeleteColumnFamilyRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(BigtableTableServiceServer).DeleteColumnFamily(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - -var _BigtableTableService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.bigtable.admin.table.v1.BigtableTableService", - HandlerType: (*BigtableTableServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateTable", - Handler: _BigtableTableService_CreateTable_Handler, - }, - { - MethodName: "ListTables", - Handler: _BigtableTableService_ListTables_Handler, - }, - { - MethodName: "GetTable", - Handler: _BigtableTableService_GetTable_Handler, - }, - { - MethodName: "DeleteTable", - Handler: _BigtableTableService_DeleteTable_Handler, - }, - { - MethodName: "RenameTable", - Handler: _BigtableTableService_RenameTable_Handler, - }, - { - MethodName: "CreateColumnFamily", - Handler: _BigtableTableService_CreateColumnFamily_Handler, - }, - { - MethodName: "UpdateColumnFamily", - Handler: _BigtableTableService_UpdateColumnFamily_Handler, - }, - { - MethodName: "DeleteColumnFamily", - Handler: _BigtableTableService_DeleteColumnFamily_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto deleted file mode 100644 index 1ccdfa24a9..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto"; -import "google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto"; -import "google.golang.org/cloud/bigtable/internal/empty/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go deleted file mode 100644 index 268f4ea0a5..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.pb.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto -// DO NOT EDIT! - -/* -Package google_bigtable_admin_table_v1 is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto - google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service.proto - -It has these top-level messages: - CreateTableRequest - ListTablesRequest - ListTablesResponse - GetTableRequest - DeleteTableRequest - RenameTableRequest - CreateColumnFamilyRequest - DeleteColumnFamilyRequest -*/ -package google_bigtable_admin_table_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_bigtable_admin_table_v11 "google.golang.org/cloud/bigtable/internal/table_data_proto" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type CreateTableRequest struct { - // The unique name of the cluster in which to create the new table. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - TableId string `protobuf:"bytes,2,opt,name=table_id" json:"table_id,omitempty"` - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table *google_bigtable_admin_table_v11.Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - InitialSplitKeys []string `protobuf:"bytes,4,rep,name=initial_split_keys" json:"initial_split_keys,omitempty"` -} - -func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } -func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } -func (*CreateTableRequest) ProtoMessage() {} - -func (m *CreateTableRequest) GetTable() *google_bigtable_admin_table_v11.Table { - if m != nil { - return m.Table - } - return nil -} - -type ListTablesRequest struct { - // The unique name of the cluster for which tables should be listed. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} } -func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) } -func (*ListTablesRequest) ProtoMessage() {} - -type ListTablesResponse struct { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - Tables []*google_bigtable_admin_table_v11.Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` -} - -func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} } -func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) } -func (*ListTablesResponse) ProtoMessage() {} - -func (m *ListTablesResponse) GetTables() []*google_bigtable_admin_table_v11.Table { - if m != nil { - return m.Tables - } - return nil -} - -type GetTableRequest struct { - // The unique name of the requested table. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GetTableRequest) Reset() { *m = GetTableRequest{} } -func (m *GetTableRequest) String() string { return proto.CompactTextString(m) } -func (*GetTableRequest) ProtoMessage() {} - -type DeleteTableRequest struct { - // The unique name of the table to be deleted. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } -func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteTableRequest) ProtoMessage() {} - -type RenameTableRequest struct { - // The current unique name of the table. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - NewId string `protobuf:"bytes,2,opt,name=new_id" json:"new_id,omitempty"` -} - -func (m *RenameTableRequest) Reset() { *m = RenameTableRequest{} } -func (m *RenameTableRequest) String() string { return proto.CompactTextString(m) } -func (*RenameTableRequest) ProtoMessage() {} - -type CreateColumnFamilyRequest struct { - // The unique name of the table in which to create the new column family. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - ColumnFamilyId string `protobuf:"bytes,2,opt,name=column_family_id" json:"column_family_id,omitempty"` - // The column family to create. The `name` field must be left blank. - ColumnFamily *google_bigtable_admin_table_v11.ColumnFamily `protobuf:"bytes,3,opt,name=column_family" json:"column_family,omitempty"` -} - -func (m *CreateColumnFamilyRequest) Reset() { *m = CreateColumnFamilyRequest{} } -func (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) } -func (*CreateColumnFamilyRequest) ProtoMessage() {} - -func (m *CreateColumnFamilyRequest) GetColumnFamily() *google_bigtable_admin_table_v11.ColumnFamily { - if m != nil { - return m.ColumnFamily - } - return nil -} - -type DeleteColumnFamilyRequest struct { - // The unique name of the column family to be deleted. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *DeleteColumnFamilyRequest) Reset() { *m = DeleteColumnFamilyRequest{} } -func (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteColumnFamilyRequest) ProtoMessage() {} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto b/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto deleted file mode 100644 index 9fa1b6a1c4..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/table_service_proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google.golang.org/cloud/bigtable/internal/table_data_proto/bigtable_table_data.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/cloud.go deleted file mode 100644 index 96d36baf2c..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/cloud.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cloud contains Google Cloud Platform APIs related types -// and common functions. -package cloud // import "google.golang.org/cloud" - -import ( - "net/http" - - "golang.org/x/net/context" - "google.golang.org/cloud/internal" -) - -// NewContext returns a new context that uses the provided http.Client. -// Provided http.Client is responsible to authorize and authenticate -// the requests made to the Google Cloud APIs. -// It mutates the client's original Transport to append the cloud -// package's user-agent to the outgoing requests. -// You can obtain the project ID from the Google Developers Console, -// https://console.developers.google.com. -func NewContext(projID string, c *http.Client) context.Context { - if c == nil { - panic("invalid nil *http.Client passed to NewContext") - } - return WithContext(context.Background(), projID, c) -} - -// WithContext returns a new context in a similar way NewContext does, -// but initiates the new context with the specified parent. -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. - // Do User-Agent some other way. - if _, ok := c.Transport.(*internal.Transport); !ok { - c.Transport = &internal.Transport{Base: c.Transport} - } - return internal.WithContext(parent, projID, c) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/cloud_test.go b/Godeps/_workspace/src/google.golang.org/cloud/cloud_test.go deleted file mode 100644 index 2561fb030e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/cloud_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cloud - -import ( - "net/http" - "testing" - - "google.golang.org/cloud/internal" -) - -func TestClientTransportMutate(t *testing.T) { - c := &http.Client{Transport: http.DefaultTransport} - NewContext("project-id", c) - NewContext("project-id", c) - - tr, ok := c.Transport.(*internal.Transport) - if !ok { - t.Errorf("Transport is expected to be an internal.Transport, found to be a %T", c.Transport) - } - if _, ok := tr.Base.(*internal.Transport); ok { - t.Errorf("Transport's Base shouldn't have been an internal.Transport, found to be a %T", tr.Base) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go index 972972dd76..3dd684e088 100644 --- a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go +++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go @@ -17,7 +17,7 @@ // // This package is a wrapper around the GCE metadata service, // as documented at https://developers.google.com/compute/docs/metadata. -package metadata // import "google.golang.org/cloud/compute/metadata" +package metadata import ( "encoding/json" diff --git a/Godeps/_workspace/src/google.golang.org/cloud/container/container.go b/Godeps/_workspace/src/google.golang.org/cloud/container/container.go deleted file mode 100644 index b57bd95158..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/container/container.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package container contains a Google Container Engine client. -// -// For more information about the API, -// see https://cloud.google.com/container-engine/docs -package container // import "google.golang.org/cloud/container" - -import ( - "errors" - "net/http" - "time" - - "golang.org/x/net/context" - raw "google.golang.org/api/container/v1" - "google.golang.org/cloud/internal" -) - -type Type string - -var ( - TypeCreate Type = Type("createCluster") - TypeDelete Type = Type("deleteCluster") -) - -type Status string - -var ( - Done = Status("done") - Pending = Status("pending") - Running = Status("running") - Error = Status("error") - Provisioning = Status("provisioning") - Stopping = Status("stopping") -) - -// Resource is a Google Container Engine cluster resource. -type Resource struct { - // Name is the name of this cluster. The name must be unique - // within this project and zone, and can be up to 40 characters. - Name string - - // Description is the description of the cluster. Optional. - Description string - - // Zone is the Google Compute Engine zone in which the cluster resides. - Zone string - - // Status is the current status of the cluster. It could either be - // StatusError, StatusProvisioning, StatusRunning or StatusStopping. - Status Status - - // Num is the number of the nodes in this cluster resource. - Num int64 - - // APIVersion is the version of the Kubernetes master and kubelets running - // in this cluster. Allowed value is 0.4.2, or leave blank to - // pick up the latest stable release. - APIVersion string - - // Endpoint is the IP address of this cluster's Kubernetes master. - // The endpoint can be accessed at https://username:password@endpoint/. - // See Username and Password fields for the username and password information. - Endpoint string - - // Username is the username to use when accessing the Kubernetes master endpoint. - Username string - - // Password is the password to use when accessing the Kubernetes master endpoint. - Password string - - // ContainerIPv4CIDR is the IP addresses of the container pods in - // this cluster, in CIDR notation (e.g. 1.2.3.4/29). - ContainerIPv4CIDR string - - // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this - // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are - // always in the 10.0.0.0/16 range. - ServicesIPv4CIDR string - - // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). - // If none set, the default type is used while creating a new cluster. - MachineType string - - // This field is ignored. It was removed from the underlying container API in v1. - SourceImage string - - // Created is the creation time of this cluster. - Created time.Time -} - -func resourceFromRaw(c *raw.Cluster) *Resource { - if c == nil { - return nil - } - r := &Resource{ - Name: c.Name, - Description: c.Description, - Zone: c.Zone, - Status: Status(c.Status), - Num: c.InitialNodeCount, - APIVersion: c.InitialClusterVersion, - Endpoint: c.Endpoint, - Username: c.MasterAuth.Username, - Password: c.MasterAuth.Password, - ContainerIPv4CIDR: c.ClusterIpv4Cidr, - ServicesIPv4CIDR: c.ServicesIpv4Cidr, - MachineType: c.NodeConfig.MachineType, - } - r.Created, _ = time.Parse(time.RFC3339, c.CreateTime) - return r -} - -func resourcesFromRaw(c []*raw.Cluster) []*Resource { - r := make([]*Resource, len(c)) - for i, val := range c { - r[i] = resourceFromRaw(val) - } - return r -} - -// Op represents a Google Container Engine API operation. -type Op struct { - // Name is the name of the operation. - Name string - - // Zone is the Google Compute Engine zone. - Zone string - - // This field is ignored. It was removed from the underlying container API in v1. - TargetURL string - - // Type is the operation type. It could be either be TypeCreate or TypeDelete. - Type Type - - // Status is the current status of this operation. It could be either - // OpDone or OpPending. - Status Status -} - -func opFromRaw(o *raw.Operation) *Op { - if o == nil { - return nil - } - return &Op{ - Name: o.Name, - Zone: o.Zone, - Type: Type(o.OperationType), - Status: Status(o.Status), - } -} - -func opsFromRaw(o []*raw.Operation) []*Op { - ops := make([]*Op, len(o)) - for i, val := range o { - ops[i] = opFromRaw(val) - } - return ops -} - -// Clusters returns a list of cluster resources from the specified zone. -// If no zone is specified, it returns all clusters under the user project. -func Clusters(ctx context.Context, zone string) ([]*Resource, error) { - s := rawService(ctx) - if zone == "" { - resp, err := s.Projects.Zones.Clusters.List(internal.ProjID(ctx), "-").Do() - if err != nil { - return nil, err - } - return resourcesFromRaw(resp.Clusters), nil - } - resp, err := s.Projects.Zones.Clusters.List(internal.ProjID(ctx), zone).Do() - if err != nil { - return nil, err - } - return resourcesFromRaw(resp.Clusters), nil -} - -// Cluster returns metadata about the specified cluster. -func Cluster(ctx context.Context, zone, name string) (*Resource, error) { - s := rawService(ctx) - resp, err := s.Projects.Zones.Clusters.Get(internal.ProjID(ctx), zone, name).Do() - if err != nil { - return nil, err - } - return resourceFromRaw(resp), nil -} - -// CreateCluster creates a new cluster with the provided metadata -// in the specified zone. -func CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { - panic("not implemented") -} - -// DeleteCluster deletes a cluster. -func DeleteCluster(ctx context.Context, zone, name string) error { - s := rawService(ctx) - _, err := s.Projects.Zones.Clusters.Delete(internal.ProjID(ctx), zone, name).Do() - return err -} - -// Operations returns a list of operations from the specified zone. -// If no zone is specified, it looks up for all of the operations -// that are running under the user's project. -func Operations(ctx context.Context, zone string) ([]*Op, error) { - s := rawService(ctx) - if zone == "" { - resp, err := s.Projects.Zones.Operations.List(internal.ProjID(ctx), "-").Do() - if err != nil { - return nil, err - } - return opsFromRaw(resp.Operations), nil - } - resp, err := s.Projects.Zones.Operations.List(internal.ProjID(ctx), zone).Do() - if err != nil { - return nil, err - } - return opsFromRaw(resp.Operations), nil -} - -// Operation returns an operation. -func Operation(ctx context.Context, zone, name string) (*Op, error) { - s := rawService(ctx) - resp, err := s.Projects.Zones.Operations.Get(internal.ProjID(ctx), zone, name).Do() - if err != nil { - return nil, err - } - if resp.StatusMessage != "" { - return nil, errors.New(resp.StatusMessage) - } - return opFromRaw(resp), nil -} - -func rawService(ctx context.Context) *raw.Service { - return internal.Service(ctx, "container", func(hc *http.Client) interface{} { - svc, _ := raw.New(hc) - return svc - }).(*raw.Service) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore.go deleted file mode 100644 index 35e704b61d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package datastore contains a Google Cloud Datastore client. -// -// This package is experimental and may make backwards-incompatible changes. -package datastore // import "google.golang.org/cloud/datastore" - -import ( - "errors" - "fmt" - "reflect" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/cloud" - pb "google.golang.org/cloud/internal/datastore" - "google.golang.org/cloud/internal/transport" -) - -const prodAddr = "https://www.googleapis.com/datastore/v1beta2/datasets/" - -const userAgent = "gcloud-golang-datastore/20150727" - -const ( - // ScopeDatastore grants permissions to view and/or manage datastore entities - ScopeDatastore = "https://www.googleapis.com/auth/datastore" - - // ScopeUserEmail grants permission to view the user's email address. - // It is required to access the datastore. - ScopeUserEmail = "https://www.googleapis.com/auth/userinfo.email" -) - -// protoClient is an interface for *transport.ProtoClient to support injecting -// fake clients in tests. -type protoClient interface { - Call(context.Context, string, proto.Message, proto.Message) error -} - -// Client is a client for reading and writing data in a datastore dataset. -type Client struct { - client protoClient - endpoint string - dataset string // Called dataset by the datastore API, synonym for project ID. -} - -// NewClient creates a new Client for a given dataset. -func NewClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*Client, error) { - o := []cloud.ClientOption{ - cloud.WithEndpoint(prodAddr), - cloud.WithScopes(ScopeDatastore, ScopeUserEmail), - cloud.WithUserAgent(userAgent), - } - o = append(o, opts...) - client, err := transport.NewProtoClient(ctx, o...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - return &Client{ - client: client, - dataset: projectID, - }, nil - -} - -var ( - // ErrInvalidEntityType is returned when functions like Get or Next are - // passed a dst or src argument of invalid type. - ErrInvalidEntityType = errors.New("datastore: invalid entity type") - // ErrInvalidKey is returned when an invalid key is presented. - ErrInvalidKey = errors.New("datastore: invalid key") - // ErrNoSuchEntity is returned when no entity was found for a given key. - ErrNoSuchEntity = errors.New("datastore: no such entity") -) - -type multiArgType int - -const ( - multiArgTypeInvalid multiArgType = iota - multiArgTypePropertyLoadSaver - multiArgTypeStruct - multiArgTypeStructPtr - multiArgTypeInterface -) - -// nsKey is the type of the context.Context key to store the datastore -// namespace. -type nsKey struct{} - -// WithNamespace returns a new context that limits the scope its parent -// context with a Datastore namespace. -func WithNamespace(parent context.Context, namespace string) context.Context { - return context.WithValue(parent, nsKey{}, namespace) -} - -// ctxNamespace returns the active namespace for a context. -// It defaults to "" if no namespace was specified. -func ctxNamespace(ctx context.Context) string { - v, _ := ctx.Value(nsKey{}).(string) - return v -} - -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. -// StructType is the type of the struct pointed to by the destination argument -// passed to Get or to Iterator.Next. -type ErrFieldMismatch struct { - StructType reflect.Type - FieldName string - Reason string -} - -func (e *ErrFieldMismatch) Error() string { - return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", - e.FieldName, e.StructType, e.Reason) -} - -func (c *Client) call(ctx context.Context, method string, req, resp proto.Message) error { - return c.client.Call(ctx, c.dataset+"/"+method, req, resp) -} - -func keyToProto(k *Key) *pb.Key { - if k == nil { - return nil - } - - // TODO(jbd): Eliminate unrequired allocations. - path := []*pb.Key_PathElement(nil) - for { - el := &pb.Key_PathElement{ - Kind: proto.String(k.kind), - } - if k.id != 0 { - el.Id = proto.Int64(k.id) - } - if k.name != "" { - el.Name = proto.String(k.name) - } - path = append([]*pb.Key_PathElement{el}, path...) - if k.parent == nil { - break - } - k = k.parent - } - key := &pb.Key{ - PathElement: path, - } - if k.namespace != "" { - key.PartitionId = &pb.PartitionId{ - Namespace: proto.String(k.namespace), - } - } - return key -} - -func protoToKey(p *pb.Key) *Key { - keys := make([]*Key, len(p.GetPathElement())) - for i, el := range p.GetPathElement() { - keys[i] = &Key{ - namespace: p.GetPartitionId().GetNamespace(), - kind: el.GetKind(), - id: el.GetId(), - name: el.GetName(), - } - } - for i := 0; i < len(keys)-1; i++ { - keys[i+1].parent = keys[i] - } - return keys[len(keys)-1] -} - -// multiKeyToProto is a batch version of keyToProto. -func multiKeyToProto(keys []*Key) []*pb.Key { - ret := make([]*pb.Key, len(keys)) - for i, k := range keys { - ret[i] = keyToProto(k) - } - return ret -} - -// multiKeyToProto is a batch version of keyToProto. -func multiProtoToKey(keys []*pb.Key) []*Key { - ret := make([]*Key, len(keys)) - for i, k := range keys { - ret[i] = protoToKey(k) - } - return ret -} - -// multiValid is a batch version of Key.valid. It returns an error, not a -// []bool. -func multiValid(key []*Key) error { - invalid := false - for _, k := range key { - if !k.valid() { - invalid = true - break - } - } - if !invalid { - return nil - } - err := make(MultiError, len(key)) - for i, k := range key { - if !k.valid() { - err[i] = ErrInvalidKey - } - } - return err -} - -// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct -// type S, for some interface type I, or some non-interface non-pointer type P -// such that P or *P implements PropertyLoadSaver. -// -// It returns what category the slice's elements are, and the reflect.Type -// that represents S, I or P. -// -// As a special case, PropertyList is an invalid type for v. -func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { - if v.Kind() != reflect.Slice { - return multiArgTypeInvalid, nil - } - if v.Type() == typeOfPropertyList { - return multiArgTypeInvalid, nil - } - elemType = v.Type().Elem() - if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { - return multiArgTypePropertyLoadSaver, elemType - } - switch elemType.Kind() { - case reflect.Struct: - return multiArgTypeStruct, elemType - case reflect.Interface: - return multiArgTypeInterface, elemType - case reflect.Ptr: - elemType = elemType.Elem() - if elemType.Kind() == reflect.Struct { - return multiArgTypeStructPtr, elemType - } - } - return multiArgTypeInvalid, nil -} - -// Get loads the entity stored for key into dst, which must be a struct pointer -// or implement PropertyLoadSaver. If there is no such entity for the key, Get -// returns ErrNoSuchEntity. -// -// The values of dst's unmatched struct fields are not modified, and matching -// slice-typed fields are not reset before appending to them. In particular, it -// is recommended to pass a pointer to a zero valued struct on each Get call. -// -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. ErrFieldMismatch is only returned if -// dst is a struct pointer. -func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { - err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) - if me, ok := err.(MultiError); ok { - return me[0] - } - return err -} - -// GetMulti is a batch version of Get. -// -// dst must be a []S, []*S, []I or []P, for some struct type S, some interface -// type I, or some non-interface non-pointer type P such that P or *P -// implements PropertyLoadSaver. If an []I, each element must be a valid dst -// for Get: it must be a struct pointer or implement PropertyLoadSaver. -// -// As a special case, PropertyList is an invalid type for dst, even though a -// PropertyList is a slice of structs. It is treated as invalid to avoid being -// mistakenly passed when []PropertyList was intended. -func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { - return c.get(ctx, keys, dst, nil) -} - -func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { - v := reflect.ValueOf(dst) - multiArgType, _ := checkMultiArg(v) - - // Sanity checks - if multiArgType == multiArgTypeInvalid { - return errors.New("datastore: dst has invalid type") - } - if len(keys) != v.Len() { - return errors.New("datastore: keys and dst slices have different length") - } - if len(keys) == 0 { - return nil - } - - // Go through keys, validate them, serialize then, and create a dict mapping them to their index - multiErr, any := make(MultiError, len(keys)), false - keyMap := make(map[string]int) - pbKeys := make([]*pb.Key, len(keys)) - for i, k := range keys { - if !k.valid() { - multiErr[i] = ErrInvalidKey - any = true - } else { - keyMap[k.String()] = i - pbKeys[i] = keyToProto(k) - } - } - if any { - return multiErr - } - req := &pb.LookupRequest{ - Key: pbKeys, - ReadOptions: opts, - } - resp := &pb.LookupResponse{} - if err := c.call(ctx, "lookup", req, resp); err != nil { - return err - } - if len(resp.Deferred) > 0 { - // TODO(jbd): Assess whether we should retry the deferred keys. - return errors.New("datastore: some entities temporarily unavailable") - } - if len(keys) != len(resp.Found)+len(resp.Missing) { - return errors.New("datastore: internal error: server returned the wrong number of entities") - } - for _, e := range resp.Found { - k := protoToKey(e.Entity.Key) - index := keyMap[k.String()] - elem := v.Index(index) - if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { - elem = elem.Addr() - } - err := loadEntity(elem.Interface(), e.Entity) - if err != nil { - multiErr[index] = err - any = true - } - } - for _, e := range resp.Missing { - k := protoToKey(e.Entity.Key) - multiErr[keyMap[k.String()]] = ErrNoSuchEntity - any = true - } - if any { - return multiErr - } - return nil -} - -// Put saves the entity src into the datastore with key k. src must be a struct -// pointer or implement PropertyLoadSaver; if a struct pointer then any -// unexported fields of that struct will be skipped. If k is an incomplete key, -// the returned key will be a unique key generated by the datastore. -func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { - k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) - if err != nil { - if me, ok := err.(MultiError); ok { - return nil, me[0] - } - return nil, err - } - return k[0], nil -} - -// PutMulti is a batch version of Put. -// -// src must satisfy the same conditions as the dst argument to GetMulti. -func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { - mutation, err := putMutation(keys, src) - if err != nil { - return nil, err - } - - // Make the request. - req := &pb.CommitRequest{ - Mutation: mutation, - Mode: pb.CommitRequest_NON_TRANSACTIONAL.Enum(), - } - resp := &pb.CommitResponse{} - if err := c.call(ctx, "commit", req, resp); err != nil { - return nil, err - } - - // Copy any newly minted keys into the returned keys. - newKeys := make(map[int]int) // Map of index in returned slice to index in response. - ret := make([]*Key, len(keys)) - var idx int - for i, key := range keys { - if key.Incomplete() { - // This key will be in the mutation result. - newKeys[i] = idx - idx++ - } else { - ret[i] = key - } - } - if len(newKeys) != len(resp.MutationResult.InsertAutoIdKey) { - return nil, errors.New("datastore: internal error: server returned the wrong number of keys") - } - for retI, respI := range newKeys { - ret[retI] = protoToKey(resp.MutationResult.InsertAutoIdKey[respI]) - } - return ret, nil -} - -func putMutation(keys []*Key, src interface{}) (*pb.Mutation, error) { - v := reflect.ValueOf(src) - multiArgType, _ := checkMultiArg(v) - if multiArgType == multiArgTypeInvalid { - return nil, errors.New("datastore: src has invalid type") - } - if len(keys) != v.Len() { - return nil, errors.New("datastore: key and src slices have different length") - } - if len(keys) == 0 { - return nil, nil - } - if err := multiValid(keys); err != nil { - return nil, err - } - var upsert, insert []*pb.Entity - for i, k := range keys { - val := reflect.ValueOf(src).Index(i) - // If src is an interface slice []interface{}{ent1, ent2} - if val.Kind() == reflect.Interface && val.Elem().Kind() == reflect.Slice { - val = val.Elem() - } - // If src is a slice of ptrs []*T{ent1, ent2} - if val.Kind() == reflect.Ptr && val.Elem().Kind() == reflect.Slice { - val = val.Elem() - } - p, err := saveEntity(k, val.Interface()) - if err != nil { - return nil, fmt.Errorf("datastore: Error while saving %v: %v", k.String(), err) - } - if k.Incomplete() { - insert = append(insert, p) - } else { - upsert = append(upsert, p) - } - } - - return &pb.Mutation{ - InsertAutoId: insert, - Upsert: upsert, - }, nil -} - -// Delete deletes the entity for the given key. -func (c *Client) Delete(ctx context.Context, key *Key) error { - err := c.DeleteMulti(ctx, []*Key{key}) - if me, ok := err.(MultiError); ok { - return me[0] - } - return err -} - -// DeleteMulti is a batch version of Delete. -func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { - mutation, err := deleteMutation(keys) - if err != nil { - return err - } - - req := &pb.CommitRequest{ - Mutation: mutation, - Mode: pb.CommitRequest_NON_TRANSACTIONAL.Enum(), - } - resp := &pb.CommitResponse{} - return c.call(ctx, "commit", req, resp) -} - -func deleteMutation(keys []*Key) (*pb.Mutation, error) { - protoKeys := make([]*pb.Key, len(keys)) - for i, k := range keys { - if k.Incomplete() { - return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) - } - protoKeys[i] = keyToProto(k) - } - - return &pb.Mutation{ - Delete: protoKeys, - }, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore_test.go deleted file mode 100644 index 86896bf0fd..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/datastore_test.go +++ /dev/null @@ -1,1359 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "strings" - "testing" - "time" -) - -type ( - myBlob []byte - myByte byte - myString string -) - -func makeMyByteSlice(n int) []myByte { - b := make([]myByte, n) - for i := range b { - b[i] = myByte(i) - } - return b -} - -func makeInt8Slice(n int) []int8 { - b := make([]int8, n) - for i := range b { - b[i] = int8(i) - } - return b -} - -func makeUint8Slice(n int) []uint8 { - b := make([]uint8, n) - for i := range b { - b[i] = uint8(i) - } - return b -} - -func newKey(stringID string, parent *Key) *Key { - return &Key{ - kind: "kind", - name: stringID, - id: 0, - parent: parent, - } -} - -var ( - testKey0 = newKey("name0", nil) - testKey1a = newKey("name1", nil) - testKey1b = newKey("name1", nil) - testKey2a = newKey("name2", testKey0) - testKey2b = newKey("name2", testKey0) -) - -type B0 struct { - B []byte `datastore:",noindex"` -} - -type B1 struct { - B []int8 -} - -type B2 struct { - B myBlob `datastore:",noindex"` -} - -type B3 struct { - B []myByte `datastore:",noindex"` -} - -type B4 struct { - B [][]byte -} - -type C0 struct { - I int - C chan int -} - -type C1 struct { - I int - C *chan int -} - -type C2 struct { - I int - C []chan int -} - -type C3 struct { - C string -} - -type E struct{} - -type K0 struct { - K *Key -} - -type K1 struct { - K []*Key -} - -type N0 struct { - X0 - Nonymous X0 - Ignore string `datastore:"-"` - Other string -} - -type N1 struct { - X0 - Nonymous []X0 - Ignore string `datastore:"-"` - Other string -} - -type N2 struct { - N1 `datastore:"red"` - Green N1 `datastore:"green"` - Blue N1 - White N1 `datastore:"-"` -} - -type O0 struct { - I int64 -} - -type O1 struct { - I int32 -} - -type U0 struct { - U uint -} - -type U1 struct { - U string -} - -type T struct { - T time.Time -} - -type X0 struct { - S string - I int - i int -} - -type X1 struct { - S myString - I int32 - J int64 -} - -type X2 struct { - Z string - i int -} - -type X3 struct { - S bool - I int -} - -type Y0 struct { - B bool - F []float64 - G []float64 -} - -type Y1 struct { - B bool - F float64 -} - -type Y2 struct { - B bool - F []int64 -} - -type Tagged struct { - A int `datastore:"a,noindex"` - B []int `datastore:"b"` - C int `datastore:",noindex"` - D int `datastore:""` - E int - I int `datastore:"-"` - J int `datastore:",noindex" json:"j"` - - Y0 `datastore:"-"` - Z chan int `datastore:"-,"` -} - -type InvalidTagged1 struct { - I int `datastore:"\t"` -} - -type InvalidTagged2 struct { - I int - J int `datastore:"I"` -} - -type Inner1 struct { - W int32 - X string -} - -type Inner2 struct { - Y float64 -} - -type Inner3 struct { - Z bool -} - -type Outer struct { - A int16 - I []Inner1 - J Inner2 - Inner3 -} - -type OuterEquivalent struct { - A int16 - IDotW []int32 `datastore:"I.W"` - IDotX []string `datastore:"I.X"` - JDotY float64 `datastore:"J.Y"` - Z bool -} - -type Dotted struct { - A DottedA `datastore:"A0.A1.A2"` -} - -type DottedA struct { - B DottedB `datastore:"B3"` -} - -type DottedB struct { - C int `datastore:"C4.C5"` -} - -type SliceOfSlices struct { - I int - S []struct { - J int - F []float64 - } -} - -type Recursive struct { - I int - R []Recursive -} - -type MutuallyRecursive0 struct { - I int - R []MutuallyRecursive1 -} - -type MutuallyRecursive1 struct { - I int - R []MutuallyRecursive0 -} - -type Doubler struct { - S string - I int64 - B bool -} - -func (d *Doubler) Load(props []Property) error { - return LoadStruct(d, props) -} - -func (d *Doubler) Save() ([]Property, error) { - // Save the default Property slice to an in-memory buffer (a PropertyList). - props, err := SaveStruct(d) - if err != nil { - return nil, err - } - var list PropertyList - if err := list.Load(props); err != nil { - return nil, err - } - - // Edit that PropertyList, and send it on. - for i := range list { - switch v := list[i].Value.(type) { - case string: - // + means string concatenation. - list[i].Value = v + v - case int64: - // + means integer addition. - list[i].Value = v + v - } - } - return list.Save() -} - -var _ PropertyLoadSaver = (*Doubler)(nil) - -type Deriver struct { - S, Derived, Ignored string -} - -func (e *Deriver) Load(props []Property) error { - for _, p := range props { - if p.Name != "S" { - continue - } - e.S = p.Value.(string) - e.Derived = "derived+" + e.S - } - return nil -} - -func (e *Deriver) Save() ([]Property, error) { - return []Property{ - { - Name: "S", - Value: e.S, - }, - }, nil -} - -var _ PropertyLoadSaver = (*Deriver)(nil) - -type BadMultiPropEntity struct{} - -func (e *BadMultiPropEntity) Load(props []Property) error { - return errors.New("unimplemented") -} - -func (e *BadMultiPropEntity) Save() ([]Property, error) { - // Write multiple properties with the same name "I", but Multiple is false. - var props []Property - for i := 0; i < 3; i++ { - props = append(props, Property{ - Name: "I", - Value: int64(i), - }) - } - return props, nil -} - -var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) - -type testCase struct { - desc string - src interface{} - want interface{} - putErr string - getErr string -} - -var testCases = []testCase{ - { - "chan save fails", - &C0{I: -1}, - &E{}, - "unsupported struct field", - "", - }, - { - "*chan save fails", - &C1{I: -1}, - &E{}, - "unsupported struct field", - "", - }, - { - "[]chan save fails", - &C2{I: -1, C: make([]chan int, 8)}, - &E{}, - "unsupported struct field", - "", - }, - { - "chan load fails", - &C3{C: "not a chan"}, - &C0{}, - "", - "type mismatch", - }, - { - "*chan load fails", - &C3{C: "not a *chan"}, - &C1{}, - "", - "type mismatch", - }, - { - "[]chan load fails", - &C3{C: "not a []chan"}, - &C2{}, - "", - "type mismatch", - }, - { - "empty struct", - &E{}, - &E{}, - "", - "", - }, - { - "key", - &K0{K: testKey1a}, - &K0{K: testKey1b}, - "", - "", - }, - { - "key with parent", - &K0{K: testKey2a}, - &K0{K: testKey2b}, - "", - "", - }, - { - "nil key", - &K0{}, - &K0{}, - "", - "", - }, - { - "all nil keys in slice", - &K1{[]*Key{nil, nil}}, - &K1{[]*Key{nil, nil}}, - "", - "", - }, - { - "some nil keys in slice", - &K1{[]*Key{testKey1a, nil, testKey2a}}, - &K1{[]*Key{testKey1b, nil, testKey2b}}, - "", - "", - }, - { - "overflow", - &O0{I: 1 << 48}, - &O1{}, - "", - "overflow", - }, - { - "time", - &T{T: time.Unix(1e9, 0)}, - &T{T: time.Unix(1e9, 0)}, - "", - "", - }, - { - "time as props", - &T{T: time.Unix(1e9, 0)}, - &PropertyList{ - Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, - }, - "", - "", - }, - { - "uint save", - &U0{U: 1}, - &U0{}, - "unsupported struct field", - "", - }, - { - "uint load", - &U1{U: "not a uint"}, - &U0{}, - "", - "type mismatch", - }, - { - "zero", - &X0{}, - &X0{}, - "", - "", - }, - { - "basic", - &X0{S: "one", I: 2, i: 3}, - &X0{S: "one", I: 2}, - "", - "", - }, - { - "save string/int load myString/int32", - &X0{S: "one", I: 2, i: 3}, - &X1{S: "one", I: 2}, - "", - "", - }, - { - "missing fields", - &X0{S: "one", I: 2, i: 3}, - &X2{}, - "", - "no such struct field", - }, - { - "save string load bool", - &X0{S: "one", I: 2, i: 3}, - &X3{I: 2}, - "", - "type mismatch", - }, - { - "basic slice", - &Y0{B: true, F: []float64{7, 8, 9}}, - &Y0{B: true, F: []float64{7, 8, 9}}, - "", - "", - }, - { - "save []float64 load float64", - &Y0{B: true, F: []float64{7, 8, 9}}, - &Y1{B: true}, - "", - "requires a slice", - }, - { - "save []float64 load []int64", - &Y0{B: true, F: []float64{7, 8, 9}}, - &Y2{B: true}, - "", - "type mismatch", - }, - { - "single slice is too long", - &Y0{F: make([]float64, maxIndexedProperties+1)}, - &Y0{}, - "too many indexed properties", - "", - }, - { - "two slices are too long", - &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, - &Y0{}, - "too many indexed properties", - "", - }, - { - "one slice and one scalar are too long", - &Y0{F: make([]float64, maxIndexedProperties), B: true}, - &Y0{}, - "too many indexed properties", - "", - }, - { - "long blob", - &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, - &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, - "", - "", - }, - { - "long []int8 is too long", - &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, - &B1{}, - "too many indexed properties", - "", - }, - { - "short []int8", - &B1{B: makeInt8Slice(3)}, - &B1{B: makeInt8Slice(3)}, - "", - "", - }, - { - "long myBlob", - &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, - &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, - "", - "", - }, - { - "short myBlob", - &B2{B: makeUint8Slice(3)}, - &B2{B: makeUint8Slice(3)}, - "", - "", - }, - { - "long []myByte", - &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, - &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, - "", - "", - }, - { - "short []myByte", - &B3{B: makeMyByteSlice(3)}, - &B3{B: makeMyByteSlice(3)}, - "", - "", - }, - { - "slice of blobs", - &B4{B: [][]byte{ - makeUint8Slice(3), - makeUint8Slice(4), - makeUint8Slice(5), - }}, - &B4{B: [][]byte{ - makeUint8Slice(3), - makeUint8Slice(4), - makeUint8Slice(5), - }}, - "", - "", - }, - { - "[]byte must be noindex", - &PropertyList{ - Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, - }, - nil, - "cannot index a Property", - "", - }, - { - "save tagged load props", - &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, - &PropertyList{ - // A and B are renamed to a and b; A and C are noindex, I is ignored. - // Order is alphabetical - Property{Name: "a", Value: int64(1), NoIndex: true}, - Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true}, - Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true}, - Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true}, - Property{Name: "C", Value: int64(3), NoIndex: true}, - Property{Name: "D", Value: int64(4), NoIndex: false}, - Property{Name: "E", Value: int64(5), NoIndex: false}, - Property{Name: "J", Value: int64(7), NoIndex: true}, - }, - "", - "", - }, - { - "save tagged load tagged", - &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, - &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, - "", - "", - }, - { - "save props load tagged", - &PropertyList{ - Property{Name: "A", Value: int64(11), NoIndex: true}, - Property{Name: "a", Value: int64(12), NoIndex: true}, - }, - &Tagged{A: 12}, - "", - `cannot load field "A"`, - }, - { - "invalid tagged1", - &InvalidTagged1{I: 1}, - &InvalidTagged1{}, - "struct tag has invalid property name", - "", - }, - { - "invalid tagged2", - &InvalidTagged2{I: 1, J: 2}, - &InvalidTagged2{}, - "struct tag has repeated property name", - "", - }, - { - "doubler", - &Doubler{S: "s", I: 1, B: true}, - &Doubler{S: "ss", I: 2, B: true}, - "", - "", - }, - { - "save struct load props", - &X0{S: "s", I: 1}, - &PropertyList{ - Property{Name: "S", Value: "s", NoIndex: false}, - Property{Name: "I", Value: int64(1), NoIndex: false}, - }, - "", - "", - }, - { - "save props load struct", - &PropertyList{ - Property{Name: "S", Value: "s", NoIndex: false}, - Property{Name: "I", Value: int64(1), NoIndex: false}, - }, - &X0{S: "s", I: 1}, - "", - "", - }, - { - "nil-value props", - &PropertyList{ - Property{Name: "I", Value: nil, NoIndex: false}, - Property{Name: "B", Value: nil, NoIndex: false}, - Property{Name: "S", Value: nil, NoIndex: false}, - Property{Name: "F", Value: nil, NoIndex: false}, - Property{Name: "K", Value: nil, NoIndex: false}, - Property{Name: "T", Value: nil, NoIndex: false}, - Property{Name: "J", Value: nil, NoIndex: false}, - Property{Name: "J", Value: int64(7), NoIndex: false}, - Property{Name: "J", Value: nil, NoIndex: false}, - }, - &struct { - I int64 - B bool - S string - F float64 - K *Key - T time.Time - J []int64 - }{ - J: []int64{0, 7, 0}, - }, - "", - "", - }, - { - "save outer load props", - &Outer{ - A: 1, - I: []Inner1{ - {10, "ten"}, - {20, "twenty"}, - {30, "thirty"}, - }, - J: Inner2{ - Y: 3.14, - }, - Inner3: Inner3{ - Z: true, - }, - }, - &PropertyList{ - Property{Name: "A", Value: int64(1), NoIndex: false}, - Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true}, - Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true}, - Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true}, - Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true}, - Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true}, - Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true}, - Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, - Property{Name: "Z", Value: true, NoIndex: false}, - }, - "", - "", - }, - { - "save props load outer-equivalent", - &PropertyList{ - Property{Name: "A", Value: int64(1), NoIndex: false}, - Property{Name: "I.W", Value: int64(10), NoIndex: false}, - Property{Name: "I.X", Value: "ten", NoIndex: false}, - Property{Name: "I.W", Value: int64(20), NoIndex: false}, - Property{Name: "I.X", Value: "twenty", NoIndex: false}, - Property{Name: "I.W", Value: int64(30), NoIndex: false}, - Property{Name: "I.X", Value: "thirty", NoIndex: false}, - Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, - Property{Name: "Z", Value: true, NoIndex: false}, - }, - &OuterEquivalent{ - A: 1, - IDotW: []int32{10, 20, 30}, - IDotX: []string{"ten", "twenty", "thirty"}, - JDotY: 3.14, - Z: true, - }, - "", - "", - }, - { - "save outer-equivalent load outer", - &OuterEquivalent{ - A: 1, - IDotW: []int32{10, 20, 30}, - IDotX: []string{"ten", "twenty", "thirty"}, - JDotY: 3.14, - Z: true, - }, - &Outer{ - A: 1, - I: []Inner1{ - {10, "ten"}, - {20, "twenty"}, - {30, "thirty"}, - }, - J: Inner2{ - Y: 3.14, - }, - Inner3: Inner3{ - Z: true, - }, - }, - "", - "", - }, - { - "dotted names save", - &Dotted{A: DottedA{B: DottedB{C: 88}}}, - &PropertyList{ - Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false}, - }, - "", - "", - }, - { - "dotted names load", - &PropertyList{ - Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false}, - }, - &Dotted{A: DottedA{B: DottedB{C: 99}}}, - "", - "", - }, - { - "save struct load deriver", - &X0{S: "s", I: 1}, - &Deriver{S: "s", Derived: "derived+s"}, - "", - "", - }, - { - "save deriver load struct", - &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, - &X0{S: "s"}, - "", - "", - }, - { - "zero time.Time", - &T{T: time.Time{}}, - &T{T: time.Time{}}, - "", - "", - }, - { - "time.Time near Unix zero time", - &T{T: time.Unix(0, 4e3)}, - &T{T: time.Unix(0, 4e3)}, - "", - "", - }, - { - "time.Time, far in the future", - &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, - &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, - "", - "", - }, - { - "time.Time, very far in the past", - &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, - &T{}, - "time value out of range", - "", - }, - { - "time.Time, very far in the future", - &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, - &T{}, - "time value out of range", - "", - }, - { - "structs", - &N0{ - X0: X0{S: "one", I: 2, i: 3}, - Nonymous: X0{S: "four", I: 5, i: 6}, - Ignore: "ignore", - Other: "other", - }, - &N0{ - X0: X0{S: "one", I: 2}, - Nonymous: X0{S: "four", I: 5}, - Other: "other", - }, - "", - "", - }, - { - "slice of structs", - &N1{ - X0: X0{S: "one", I: 2, i: 3}, - Nonymous: []X0{ - {S: "four", I: 5, i: 6}, - {S: "seven", I: 8, i: 9}, - {S: "ten", I: 11, i: 12}, - {S: "thirteen", I: 14, i: 15}, - }, - Ignore: "ignore", - Other: "other", - }, - &N1{ - X0: X0{S: "one", I: 2}, - Nonymous: []X0{ - {S: "four", I: 5}, - {S: "seven", I: 8}, - {S: "ten", I: 11}, - {S: "thirteen", I: 14}, - }, - Other: "other", - }, - "", - "", - }, - { - "structs with slices of structs", - &N2{ - N1: N1{ - X0: X0{S: "rouge"}, - Nonymous: []X0{ - {S: "rosso0"}, - {S: "rosso1"}, - }, - }, - Green: N1{ - X0: X0{S: "vert"}, - Nonymous: []X0{ - {S: "verde0"}, - {S: "verde1"}, - {S: "verde2"}, - }, - }, - Blue: N1{ - X0: X0{S: "bleu"}, - Nonymous: []X0{ - {S: "blu0"}, - {S: "blu1"}, - {S: "blu2"}, - {S: "blu3"}, - }, - }, - }, - &N2{ - N1: N1{ - X0: X0{S: "rouge"}, - Nonymous: []X0{ - {S: "rosso0"}, - {S: "rosso1"}, - }, - }, - Green: N1{ - X0: X0{S: "vert"}, - Nonymous: []X0{ - {S: "verde0"}, - {S: "verde1"}, - {S: "verde2"}, - }, - }, - Blue: N1{ - X0: X0{S: "bleu"}, - Nonymous: []X0{ - {S: "blu0"}, - {S: "blu1"}, - {S: "blu2"}, - {S: "blu3"}, - }, - }, - }, - "", - "", - }, - { - "save structs load props", - &N2{ - N1: N1{ - X0: X0{S: "rouge"}, - Nonymous: []X0{ - {S: "rosso0"}, - {S: "rosso1"}, - }, - }, - Green: N1{ - X0: X0{S: "vert"}, - Nonymous: []X0{ - {S: "verde0"}, - {S: "verde1"}, - {S: "verde2"}, - }, - }, - Blue: N1{ - X0: X0{S: "bleu"}, - Nonymous: []X0{ - {S: "blu0"}, - {S: "blu1"}, - {S: "blu2"}, - {S: "blu3"}, - }, - }, - }, - &PropertyList{ - Property{Name: "red.S", Value: "rouge", NoIndex: false}, - Property{Name: "red.I", Value: int64(0), NoIndex: false}, - Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true}, - Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true}, - Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "red.Other", Value: "", NoIndex: false}, - Property{Name: "green.S", Value: "vert", NoIndex: false}, - Property{Name: "green.I", Value: int64(0), NoIndex: false}, - Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true}, - Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true}, - Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true}, - Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "green.Other", Value: "", NoIndex: false}, - Property{Name: "Blue.S", Value: "bleu", NoIndex: false}, - Property{Name: "Blue.I", Value: int64(0), NoIndex: false}, - Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, - Property{Name: "Blue.Other", Value: "", NoIndex: false}, - }, - "", - "", - }, - { - "save props load structs with ragged fields", - &PropertyList{ - Property{Name: "red.S", Value: "rot", NoIndex: false}, - Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false}, - Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false}, - Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false}, - Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false}, - Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false}, - Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false}, - Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false}, - Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false}, - Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false}, - }, - &N2{ - N1: N1{ - X0: X0{S: "rot"}, - }, - Green: N1{ - Nonymous: []X0{ - {I: 10}, - {I: 11}, - {I: 12}, - {I: 13}, - }, - }, - Blue: N1{ - Nonymous: []X0{ - {S: "blau0", I: 20}, - {S: "blau1", I: 21}, - {S: "blau2"}, - }, - }, - }, - "", - "", - }, - { - "save structs with noindex tags", - &struct { - A struct { - X string `datastore:",noindex"` - Y string - } `datastore:",noindex"` - B struct { - X string `datastore:",noindex"` - Y string - } - }{}, - &PropertyList{ - Property{Name: "A.X", Value: "", NoIndex: true}, - Property{Name: "A.Y", Value: "", NoIndex: true}, - Property{Name: "B.X", Value: "", NoIndex: true}, - Property{Name: "B.Y", Value: "", NoIndex: false}, - }, - "", - "", - }, - { - "embedded struct with name override", - &struct { - Inner1 `datastore:"foo"` - }{}, - &PropertyList{ - Property{Name: "foo.W", Value: int64(0), NoIndex: false}, - Property{Name: "foo.X", Value: "", NoIndex: false}, - }, - "", - "", - }, - { - "slice of slices", - &SliceOfSlices{}, - nil, - "flattening nested structs leads to a slice of slices", - "", - }, - { - "recursive struct", - &Recursive{}, - nil, - "recursive struct", - "", - }, - { - "mutually recursive struct", - &MutuallyRecursive0{}, - nil, - "recursive struct", - "", - }, - { - "non-exported struct fields", - &struct { - i, J int64 - }{i: 1, J: 2}, - &PropertyList{ - Property{Name: "J", Value: int64(2), NoIndex: false}, - }, - "", - "", - }, - { - "json.RawMessage", - &struct { - J json.RawMessage - }{ - J: json.RawMessage("rawr"), - }, - &PropertyList{ - Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, - }, - "", - "", - }, - { - "json.RawMessage to myBlob", - &struct { - B json.RawMessage - }{ - B: json.RawMessage("rawr"), - }, - &B2{B: myBlob("rawr")}, - "", - "", - }, -} - -// checkErr returns the empty string if either both want and err are zero, -// or if want is a non-empty substring of err's string representation. -func checkErr(want string, err error) string { - if err != nil { - got := err.Error() - if want == "" || strings.Index(got, want) == -1 { - return got - } - } else if want != "" { - return fmt.Sprintf("want error %q", want) - } - return "" -} - -func TestRoundTrip(t *testing.T) { - for _, tc := range testCases { - p, err := saveEntity(testKey0, tc.src) - if s := checkErr(tc.putErr, err); s != "" { - t.Errorf("%s: save: %s", tc.desc, s) - continue - } - if p == nil { - continue - } - var got interface{} - if _, ok := tc.want.(*PropertyList); ok { - got = new(PropertyList) - } else { - got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() - } - err = loadEntity(got, p) - if s := checkErr(tc.getErr, err); s != "" { - t.Errorf("%s: load: %s", tc.desc, s) - continue - } - equal := false - if gotT, ok := got.(*T); ok { - // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. - // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. - equal = gotT.T.Equal(tc.want.(*T).T) - } else { - equal = reflect.DeepEqual(got, tc.want) - } - if !equal { - t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want) - continue - } - } -} - -func TestQueryConstruction(t *testing.T) { - tests := []struct { - q, exp *Query - err string - }{ - { - q: NewQuery("Foo"), - exp: &Query{ - kind: "Foo", - limit: -1, - }, - }, - { - // Regular filtered query with standard spacing. - q: NewQuery("Foo").Filter("foo >", 7), - exp: &Query{ - kind: "Foo", - filter: []filter{ - { - FieldName: "foo", - Op: greaterThan, - Value: 7, - }, - }, - limit: -1, - }, - }, - { - // Filtered query with no spacing. - q: NewQuery("Foo").Filter("foo=", 6), - exp: &Query{ - kind: "Foo", - filter: []filter{ - { - FieldName: "foo", - Op: equal, - Value: 6, - }, - }, - limit: -1, - }, - }, - { - // Filtered query with funky spacing. - q: NewQuery("Foo").Filter(" foo< ", 8), - exp: &Query{ - kind: "Foo", - filter: []filter{ - { - FieldName: "foo", - Op: lessThan, - Value: 8, - }, - }, - limit: -1, - }, - }, - { - // Filtered query with multicharacter op. - q: NewQuery("Foo").Filter("foo >=", 9), - exp: &Query{ - kind: "Foo", - filter: []filter{ - { - FieldName: "foo", - Op: greaterEq, - Value: 9, - }, - }, - limit: -1, - }, - }, - { - // Query with ordering. - q: NewQuery("Foo").Order("bar"), - exp: &Query{ - kind: "Foo", - order: []order{ - { - FieldName: "bar", - Direction: ascending, - }, - }, - limit: -1, - }, - }, - { - // Query with reverse ordering, and funky spacing. - q: NewQuery("Foo").Order(" - bar"), - exp: &Query{ - kind: "Foo", - order: []order{ - { - FieldName: "bar", - Direction: descending, - }, - }, - limit: -1, - }, - }, - { - // Query with an empty ordering. - q: NewQuery("Foo").Order(""), - err: "empty order", - }, - { - // Query with a + ordering. - q: NewQuery("Foo").Order("+bar"), - err: "invalid order", - }, - } - for i, test := range tests { - if test.q.err != nil { - got := test.q.err.Error() - if !strings.Contains(got, test.err) { - t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) - } - continue - } - if !reflect.DeepEqual(test.q, test.exp) { - t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/errors.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/errors.go deleted file mode 100644 index 3077f80d3b..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file provides error functions for common API failure modes. - -package datastore - -import ( - "fmt" -) - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/example_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/example_test.go deleted file mode 100644 index 391991c95a..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/example_test.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore_test - -import ( - "io/ioutil" - "log" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - "google.golang.org/cloud" - "google.golang.org/cloud/datastore" -) - -// TODO(djd): reevaluate this example given new Client config. -func Example_auth() *datastore.Client { - // Initialize an authorized context with Google Developers Console - // JSON key. Read the google package examples to learn more about - // different authorization flows you can use. - // http://godoc.org/golang.org/x/oauth2/google - jsonKey, err := ioutil.ReadFile("/path/to/json/keyfile.json") - if err != nil { - log.Fatal(err) - } - conf, err := google.JWTConfigFromJSON( - jsonKey, - datastore.ScopeDatastore, - datastore.ScopeUserEmail, - ) - if err != nil { - log.Fatal(err) - } - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id", cloud.WithTokenSource(conf.TokenSource(ctx))) - if err != nil { - log.Fatal(err) - } - // Use the client (see other examples). - return client -} - -func ExampleGet() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - type Article struct { - Title string - Description string - Body string `datastore:",noindex"` - Author *datastore.Key - PublishedAt time.Time - } - key := datastore.NewKey(ctx, "Article", "articled1", 0, nil) - article := &Article{} - if err := client.Get(ctx, key, article); err != nil { - log.Fatal(err) - } -} - -func ExamplePut() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - type Article struct { - Title string - Description string - Body string `datastore:",noindex"` - Author *datastore.Key - PublishedAt time.Time - } - newKey := datastore.NewIncompleteKey(ctx, "Article", nil) - _, err = client.Put(ctx, newKey, &Article{ - Title: "The title of the article", - Description: "The description of the article...", - Body: "...", - Author: datastore.NewKey(ctx, "Author", "jbd", 0, nil), - PublishedAt: time.Now(), - }) - if err != nil { - log.Fatal(err) - } -} - -func ExampleDelete() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - key := datastore.NewKey(ctx, "Article", "articled1", 0, nil) - if err := client.Delete(ctx, key); err != nil { - log.Fatal(err) - } -} - -type Post struct { - Title string - PublishedAt time.Time - Comments int -} - -func ExampleGetMulti() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), - datastore.NewKey(ctx, "Post", "post3", 0, nil), - } - posts := make([]Post, 3) - if err := client.GetMulti(ctx, keys, posts); err != nil { - log.Println(err) - } -} - -func ExamplePutMulti_slice() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), - } - - // PutMulti with a Post slice. - posts := []*Post{ - {Title: "Post 1", PublishedAt: time.Now()}, - {Title: "Post 2", PublishedAt: time.Now()}, - } - if _, err := client.PutMulti(ctx, keys, posts); err != nil { - log.Fatal(err) - } -} - -func ExamplePutMulti_interfaceSlice() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), - } - - // PutMulti with an empty interface slice. - posts := []interface{}{ - &Post{Title: "Post 1", PublishedAt: time.Now()}, - &Post{Title: "Post 2", PublishedAt: time.Now()}, - } - if _, err := client.PutMulti(ctx, keys, posts); err != nil { - log.Fatal(err) - } -} - -func ExampleQuery() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - - // Count the number of the post entities. - q := datastore.NewQuery("Post") - n, err := client.Count(ctx, q) - if err != nil { - log.Fatal(err) - } - log.Println("There are %d posts.", n) - - // List the posts published since yesterday. - yesterday := time.Now().Add(-24 * time.Hour) - q = datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) - it := client.Run(ctx, q) - // Use the iterator. - _ = it - - // Order the posts by the number of comments they have recieved. - datastore.NewQuery("Post").Order("-Comments") - - // Start listing from an offset and limit the results. - datastore.NewQuery("Post").Offset(20).Limit(10) -} - -func ExampleTransaction() { - ctx := context.Background() - client, err := datastore.NewClient(ctx, "project-id") - if err != nil { - log.Fatal(err) - } - const retries = 3 - - // Increment a counter. - // See https://cloud.google.com/appengine/articles/sharding_counters for - // a more scalable solution. - type Counter struct { - Count int - } - - key := datastore.NewKey(ctx, "counter", "CounterA", 0, nil) - - for i := 0; i < retries; i++ { - tx, err := client.NewTransaction(ctx) - if err != nil { - break - } - - var c Counter - if err := tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { - break - } - c.Count++ - if _, err := tx.Put(key, &c); err != nil { - break - } - - // Attempt to commit the transaction. If there's a conflict, try again. - if _, err := tx.Commit(); err != datastore.ErrConcurrentTransaction { - break - } - } - -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/integration_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/integration_test.go deleted file mode 100644 index 01d52ab654..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/integration_test.go +++ /dev/null @@ -1,569 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build integration - -package datastore - -import ( - "fmt" - "log" - - "reflect" - "sort" - "strings" - "testing" - "time" - - "golang.org/x/net/context" - "google.golang.org/cloud" - "google.golang.org/cloud/internal/testutil" -) - -func newClient(ctx context.Context) *Client { - ts := testutil.TokenSource(ctx, ScopeDatastore, ScopeUserEmail) - client, err := NewClient(ctx, testutil.ProjID(), cloud.WithTokenSource(ts)) - if err != nil { - log.Fatal(err) - } - return client -} - -func TestBasics(t *testing.T) { - type X struct { - I int - S string - T time.Time - } - ctx := context.Background() - client := newClient(ctx) - x0 := X{66, "99", time.Now().Truncate(time.Millisecond)} - k, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsX", nil), &x0) - if err != nil { - t.Fatalf("client.Put: %v", err) - } - x1 := X{} - err = client.Get(ctx, k, &x1) - if err != nil { - t.Errorf("client.Get: %v", err) - } - err = client.Delete(ctx, k) - if err != nil { - t.Errorf("client.Delete: %v", err) - } - if !reflect.DeepEqual(x0, x1) { - t.Errorf("compare: x0=%v, x1=%v", x0, x1) - } -} - -func TestListValues(t *testing.T) { - p0 := PropertyList{ - {Name: "L", Value: int64(12), Multiple: true}, - {Name: "L", Value: "string", Multiple: true}, - {Name: "L", Value: true, Multiple: true}, - } - ctx := context.Background() - client := newClient(ctx) - k, err := client.Put(ctx, NewIncompleteKey(ctx, "ListValue", nil), &p0) - if err != nil { - t.Fatalf("client.Put: %v", err) - } - var p1 PropertyList - if err := client.Get(ctx, k, &p1); err != nil { - t.Errorf("client.Get: %v", err) - } - if !reflect.DeepEqual(p0, p1) { - t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) - } - if err = client.Delete(ctx, k); err != nil { - t.Errorf("client.Delete: %v", err) - } -} - -func TestGetMulti(t *testing.T) { - type X struct { - I int - } - ctx := context.Background() - client := newClient(ctx) - p := NewKey(ctx, "X", "", time.Now().Unix(), nil) - - cases := []struct { - key *Key - put bool - }{ - {key: NewKey(ctx, "X", "item1", 0, p), put: true}, - {key: NewKey(ctx, "X", "item2", 0, p), put: false}, - {key: NewKey(ctx, "X", "item3", 0, p), put: false}, - {key: NewKey(ctx, "X", "item4", 0, p), put: true}, - } - - var src, dst []*X - var srcKeys, dstKeys []*Key - for _, c := range cases { - dst = append(dst, &X{}) - dstKeys = append(dstKeys, c.key) - if c.put { - src = append(src, &X{}) - srcKeys = append(srcKeys, c.key) - } - } - if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { - t.Error(err) - } - err := client.GetMulti(ctx, dstKeys, dst) - if err == nil { - t.Errorf("client.GetMulti got %v, expected error", err) - } - e, ok := err.(MultiError) - if !ok { - t.Errorf("client.GetMulti got %t, expected MultiError", err) - } - for i, err := range e { - got, want := err, (error)(nil) - if !cases[i].put { - got, want = err, ErrNoSuchEntity - } - if got != want { - t.Errorf("MultiError[%d] == %v, want %v", i, got, want) - } - } -} - -type Z struct { - S string - T string `datastore:",noindex"` - P []byte - K []byte `datastore:",noindex"` -} - -func (z Z) String() string { - var lens []string - v := reflect.ValueOf(z) - for i := 0; i < v.NumField(); i++ { - if l := v.Field(i).Len(); l > 0 { - lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) - } - } - return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) -} - -func TestUnindexableValues(t *testing.T) { - x1500 := strings.Repeat("x", 1500) - x1501 := strings.Repeat("x", 1501) - testCases := []struct { - in Z - wantErr bool - }{ - {in: Z{S: x1500}, wantErr: false}, - {in: Z{S: x1501}, wantErr: true}, - {in: Z{T: x1500}, wantErr: false}, - {in: Z{T: x1501}, wantErr: false}, - {in: Z{P: []byte(x1500)}, wantErr: false}, - {in: Z{P: []byte(x1501)}, wantErr: true}, - {in: Z{K: []byte(x1500)}, wantErr: false}, - {in: Z{K: []byte(x1501)}, wantErr: false}, - } - ctx := context.Background() - client := newClient(ctx) - for _, tt := range testCases { - _, err := client.Put(ctx, NewIncompleteKey(ctx, "BasicsZ", nil), &tt.in) - if (err != nil) != tt.wantErr { - t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) - } - } -} - -type SQChild struct { - I, J int - T, U int64 -} - -type SQTestCase struct { - desc string - q *Query - wantCount int - wantSum int -} - -func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, - testCases []SQTestCase, extraTests ...func()) { - keys := make([]*Key, len(children)) - for i := range keys { - keys[i] = NewIncompleteKey(ctx, "SQChild", parent) - } - keys, err := client.PutMulti(ctx, keys, children) - if err != nil { - t.Fatalf("client.PutMulti: %v", err) - } - defer func() { - err := client.DeleteMulti(ctx, keys) - if err != nil { - t.Errorf("client.DeleteMulti: %v", err) - } - }() - - for _, tc := range testCases { - count, err := client.Count(ctx, tc.q) - if err != nil { - t.Errorf("Count %q: %v", tc.desc, err) - continue - } - if count != tc.wantCount { - t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) - continue - } - } - - for _, tc := range testCases { - var got []SQChild - _, err := client.GetAll(ctx, tc.q, &got) - if err != nil { - t.Errorf("client.GetAll %q: %v", tc.desc, err) - continue - } - sum := 0 - for _, c := range got { - sum += c.I + c.J - } - if sum != tc.wantSum { - t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) - continue - } - } - for _, x := range extraTests { - x() - } -} - -func TestFilters(t *testing.T) { - ctx := context.Background() - client := newClient(ctx) - parent := NewKey(ctx, "SQParent", "TestFilters", 0, nil) - now := time.Now().Truncate(time.Millisecond).Unix() - children := []*SQChild{ - {I: 0, T: now, U: now}, - {I: 1, T: now, U: now}, - {I: 2, T: now, U: now}, - {I: 3, T: now, U: now}, - {I: 4, T: now, U: now}, - {I: 5, T: now, U: now}, - {I: 6, T: now, U: now}, - {I: 7, T: now, U: now}, - } - baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) - testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ - { - "I>1", - baseQuery.Filter("I>", 1), - 6, - 2 + 3 + 4 + 5 + 6 + 7, - }, - { - "I>2 AND I<=5", - baseQuery.Filter("I>", 2).Filter("I<=", 5), - 3, - 3 + 4 + 5, - }, - { - "I>=3 AND I<3", - baseQuery.Filter("I>=", 3).Filter("I<", 3), - 0, - 0, - }, - { - "I=4", - baseQuery.Filter("I=", 4), - 1, - 4, - }, - }, func() { - got := []*SQChild{} - want := []*SQChild{ - {I: 0, T: now, U: now}, - {I: 1, T: now, U: now}, - {I: 2, T: now, U: now}, - {I: 3, T: now, U: now}, - {I: 4, T: now, U: now}, - {I: 5, T: now, U: now}, - {I: 6, T: now, U: now}, - {I: 7, T: now, U: now}, - } - _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) - if err != nil { - t.Errorf("client.GetAll: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("compare: got=%v, want=%v", got, want) - } - }, func() { - got := []*SQChild{} - want := []*SQChild{ - {I: 7, T: now, U: now}, - {I: 6, T: now, U: now}, - {I: 5, T: now, U: now}, - {I: 4, T: now, U: now}, - {I: 3, T: now, U: now}, - {I: 2, T: now, U: now}, - {I: 1, T: now, U: now}, - {I: 0, T: now, U: now}, - } - _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) - if err != nil { - t.Errorf("client.GetAll: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("compare: got=%v, want=%v", got, want) - } - }) -} - -func TestEventualConsistency(t *testing.T) { - ctx := context.Background() - client := newClient(ctx) - parent := NewKey(ctx, "SQParent", "TestEventualConsistency", 0, nil) - now := time.Now().Truncate(time.Millisecond).Unix() - children := []*SQChild{ - {I: 0, T: now, U: now}, - {I: 1, T: now, U: now}, - {I: 2, T: now, U: now}, - } - query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() - testSmallQueries(t, ctx, client, parent, children, nil, func() { - got, err := client.Count(ctx, query) - if err != nil { - t.Fatalf("Count: %v", err) - } - if got < 0 || 3 < got { - t.Errorf("Count: got %d, want [0,3]", got) - } - }) -} - -func TestProjection(t *testing.T) { - ctx := context.Background() - client := newClient(ctx) - parent := NewKey(ctx, "SQParent", "TestProjection", 0, nil) - now := time.Now().Truncate(time.Millisecond).Unix() - children := []*SQChild{ - {I: 1 << 0, J: 100, T: now, U: now}, - {I: 1 << 1, J: 100, T: now, U: now}, - {I: 1 << 2, J: 200, T: now, U: now}, - {I: 1 << 3, J: 300, T: now, U: now}, - {I: 1 << 4, J: 300, T: now, U: now}, - } - baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) - testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ - { - "project", - baseQuery.Project("J"), - 3, - 200 + 300 + 300, - }, - { - "distinct", - baseQuery.Project("J").Distinct(), - 2, - 200 + 300, - }, - { - "project on meaningful (GD_WHEN) field", - baseQuery.Project("U"), - 3, - 0, - }, - }) -} - -func TestAllocateIDs(t *testing.T) { - ctx := context.Background() - client := newClient(ctx) - keys := make([]*Key, 5) - for i := range keys { - keys[i] = NewIncompleteKey(ctx, "AllocID", nil) - } - keys, err := client.AllocateIDs(ctx, keys) - if err != nil { - t.Errorf("AllocID #0 failed: %v", err) - } - if want := len(keys); want != 5 { - t.Errorf("Expected to allocate 5 keys, %d keys are found", want) - } - for _, k := range keys { - if k.Incomplete() { - t.Errorf("Unexpeceted incomplete key found: %v", k) - } - } -} - -func TestGetAllWithFieldMismatch(t *testing.T) { - type Fat struct { - X, Y int - } - type Thin struct { - X int - } - - ctx := context.Background() - client := newClient(ctx) - // Ancestor queries (those within an entity group) are strongly consistent - // by default, which prevents a test from being flaky. - // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency - // for more information. - parent := NewKey(ctx, "SQParent", "TestGetAllWithFieldMismatch", 0, nil) - putKeys := make([]*Key, 3) - for i := range putKeys { - putKeys[i] = NewKey(ctx, "GetAllThing", "", int64(10+i), parent) - _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) - if err != nil { - t.Fatalf("client.Put: %v", err) - } - } - - var got []Thin - want := []Thin{ - {X: 20}, - {X: 21}, - {X: 22}, - } - getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) - if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) { - t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) - } - if _, ok := err.(*ErrFieldMismatch); !ok { - t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) - } -} - -func TestKindlessQueries(t *testing.T) { - type Dee struct { - I int - Why string - } - type Dum struct { - I int - Pling string - } - - ctx := context.Background() - client := newClient(ctx) - parent := NewKey(ctx, "Tweedle", "tweedle", 0, nil) - - keys := []*Key{ - NewKey(ctx, "Dee", "dee0", 0, parent), - NewKey(ctx, "Dum", "dum1", 0, parent), - NewKey(ctx, "Dum", "dum2", 0, parent), - NewKey(ctx, "Dum", "dum3", 0, parent), - } - src := []interface{}{ - &Dee{1, "binary0001"}, - &Dum{2, "binary0010"}, - &Dum{4, "binary0100"}, - &Dum{8, "binary1000"}, - } - keys, err := client.PutMulti(ctx, keys, src) - if err != nil { - t.Fatalf("put: %v", err) - } - - testCases := []struct { - desc string - query *Query - want []int - wantErr string - }{ - { - desc: "Dee", - query: NewQuery("Dee"), - want: []int{1}, - }, - { - desc: "Doh", - query: NewQuery("Doh"), - want: nil}, - { - desc: "Dum", - query: NewQuery("Dum"), - want: []int{2, 4, 8}, - }, - { - desc: "", - query: NewQuery(""), - want: []int{1, 2, 4, 8}, - }, - { - desc: "Kindless filter", - query: NewQuery("").Filter("__key__ =", keys[2]), - want: []int{4}, - }, - { - desc: "Kindless order", - query: NewQuery("").Order("__key__"), - want: []int{1, 2, 4, 8}, - }, - { - desc: "Kindless bad filter", - query: NewQuery("").Filter("I =", 4), - wantErr: "kind is required for filter: I", - }, - { - desc: "Kindless bad order", - query: NewQuery("").Order("-__key__"), - wantErr: "kind is required for all orders except __key__ ascending", - }, - } -loop: - for _, tc := range testCases { - q := tc.query.Ancestor(parent) - gotCount, err := client.Count(ctx, q) - if err != nil { - if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { - t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) - } - continue - } - if tc.wantErr != "" { - t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) - continue - } - if gotCount != len(tc.want) { - t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) - continue - } - var got []int - for iter := client.Run(ctx, q); ; { - var dst struct { - I int - Why, Pling string - } - _, err := iter.Next(&dst) - if err == Done { - break - } - if err != nil { - t.Errorf("iter.Next %q: %v", tc.desc, err) - continue loop - } - got = append(got, dst.I) - } - sort.Ints(got) - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) - continue - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/key.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/key.go deleted file mode 100644 index a1947d0c03..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/key.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "bytes" - "encoding/base64" - "encoding/gob" - "errors" - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - pb "google.golang.org/cloud/internal/datastore" -) - -// Key represents the datastore key for a stored entity, and is immutable. -type Key struct { - kind string - id int64 - name string - parent *Key - - namespace string -} - -func (k *Key) Kind() string { - return k.kind -} - -func (k *Key) ID() int64 { - return k.id -} - -func (k *Key) Name() string { - return k.name -} - -func (k *Key) Parent() *Key { - return k.parent -} - -func (k *Key) SetParent(v *Key) { - if v.Incomplete() { - panic("can't set an incomplete key as parent") - } - k.parent = v -} - -func (k *Key) Namespace() string { - return k.namespace -} - -// Complete returns whether the key does not refer to a stored entity. -func (k *Key) Incomplete() bool { - return k.name == "" && k.id == 0 -} - -// valid returns whether the key is valid. -func (k *Key) valid() bool { - if k == nil { - return false - } - for ; k != nil; k = k.parent { - if k.kind == "" { - return false - } - if k.name != "" && k.id != 0 { - return false - } - if k.parent != nil { - if k.parent.Incomplete() { - return false - } - if k.parent.namespace != k.namespace { - return false - } - } - } - return true -} - -func (k *Key) Equal(o *Key) bool { - for { - if k == nil || o == nil { - return k == o // if either is nil, both must be nil - } - if k.namespace != o.namespace || k.name != o.name || k.id != o.id || k.kind != o.kind { - return false - } - if k.parent == nil && o.parent == nil { - return true - } - k = k.parent - o = o.parent - } -} - -// marshal marshals the key's string representation to the buffer. -func (k *Key) marshal(b *bytes.Buffer) { - if k.parent != nil { - k.parent.marshal(b) - } - b.WriteByte('/') - b.WriteString(k.kind) - b.WriteByte(',') - if k.name != "" { - b.WriteString(k.name) - } else { - b.WriteString(strconv.FormatInt(k.id, 10)) - } -} - -// String returns a string representation of the key. -func (k *Key) String() string { - if k == nil { - return "" - } - b := bytes.NewBuffer(make([]byte, 0, 512)) - k.marshal(b) - return b.String() -} - -// Note: Fields not renamed compared to appengine gobKey struct -// This ensures gobs created by appengine can be read here, and vice/versa -type gobKey struct { - Kind string - StringID string - IntID int64 - Parent *gobKey - AppID string - Namespace string -} - -func keyToGobKey(k *Key) *gobKey { - if k == nil { - return nil - } - return &gobKey{ - Kind: k.kind, - StringID: k.name, - IntID: k.id, - Parent: keyToGobKey(k.parent), - Namespace: k.namespace, - } -} - -func gobKeyToKey(gk *gobKey) *Key { - if gk == nil { - return nil - } - return &Key{ - kind: gk.Kind, - name: gk.StringID, - id: gk.IntID, - parent: gobKeyToKey(gk.Parent), - namespace: gk.Namespace, - } -} - -func (k *Key) GobEncode() ([]byte, error) { - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (k *Key) GobDecode(buf []byte) error { - gk := new(gobKey) - if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { - return err - } - *k = *gobKeyToKey(gk) - return nil -} - -func (k *Key) MarshalJSON() ([]byte, error) { - return []byte(`"` + k.Encode() + `"`), nil -} - -func (k *Key) UnmarshalJSON(buf []byte) error { - if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { - return errors.New("datastore: bad JSON key") - } - k2, err := DecodeKey(string(buf[1 : len(buf)-1])) - if err != nil { - return err - } - *k = *k2 - return nil -} - -// Encode returns an opaque representation of the key -// suitable for use in HTML and URLs. -// This is compatible with the Python and Java runtimes. -func (k *Key) Encode() string { - pKey := keyToProto(k) - - b, err := proto.Marshal(pKey) - if err != nil { - panic(err) - } - - // Trailing padding is stripped. - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// DecodeKey decodes a key from the opaque representation returned by Encode. -func DecodeKey(encoded string) (*Key, error) { - // Re-add padding. - if m := len(encoded) % 4; m != 0 { - encoded += strings.Repeat("=", 4-m) - } - - b, err := base64.URLEncoding.DecodeString(encoded) - if err != nil { - return nil, err - } - - pKey := new(pb.Key) - if err := proto.Unmarshal(b, pKey); err != nil { - return nil, err - } - - return protoToKey(pKey), nil -} - -// NewIncompleteKey creates a new incomplete key. -// kind cannot be empty. -func NewIncompleteKey(ctx context.Context, kind string, parent *Key) *Key { - return NewKey(ctx, kind, "", 0, parent) -} - -// NewKey creates a new key. -// kind cannot be empty. -// Either one or both of name and id must be zero. If both are zero, -// the key returned is incomplete. -// parent must either be a complete key or nil. -func NewKey(ctx context.Context, kind, name string, id int64, parent *Key) *Key { - return &Key{ - kind: kind, - name: name, - id: id, - parent: parent, - namespace: ctxNamespace(ctx), - } -} - -// AllocateIDs accepts a slice of incomplete keys and returns a -// slice of complete keys that are guaranteed to be valid in the datastore -func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { - if keys == nil { - return nil, nil - } - - req := &pb.AllocateIdsRequest{Key: multiKeyToProto(keys)} - res := &pb.AllocateIdsResponse{} - if err := c.call(ctx, "allocateIds", req, res); err != nil { - return nil, err - } - - return multiProtoToKey(res.Key), nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/key_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/key_test.go deleted file mode 100644 index e134ffd586..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/key_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "testing" - - "golang.org/x/net/context" -) - -func TestNamespace(t *testing.T) { - c := context.Background() - k := NewIncompleteKey(c, "foo", nil) - if got, want := k.Namespace(), ""; got != want { - t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want) - } - - c = WithNamespace(c, "gopherspace") - k = NewIncompleteKey(c, "foo", nil) - if got, want := k.Namespace(), "gopherspace"; got != want { - t.Errorf("No namespace, k.Namespace() = %q, want %q", got, want) - } -} - -func TestParent(t *testing.T) { - c := context.Background() - k := NewIncompleteKey(c, "foo", nil) - par := NewKey(c, "foomum", "", 1248, nil) - k.SetParent(par) - if got := k.Parent(); got != par { - t.Errorf("k.Parent() = %v; want %v", got, par) - } -} - -func TestEqual(t *testing.T) { - c := context.Background() - cN := WithNamespace(c, "gopherspace") - - testCases := []struct { - x, y *Key - equal bool - }{ - { - x: nil, - y: nil, - equal: true, - }, - { - x: NewKey(c, "kindA", "", 0, nil), - y: NewIncompleteKey(c, "kindA", nil), - equal: true, - }, - { - x: NewKey(c, "kindA", "nameA", 0, nil), - y: NewKey(c, "kindA", "nameA", 0, nil), - equal: true, - }, - { - x: NewKey(cN, "kindA", "nameA", 0, nil), - y: NewKey(cN, "kindA", "nameA", 0, nil), - equal: true, - }, - { - x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), - y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), - equal: true, - }, - { - x: NewKey(c, "kindA", "nameA", 0, nil), - y: NewKey(c, "kindB", "nameA", 0, nil), - equal: false, - }, - { - x: NewKey(c, "kindA", "nameA", 0, nil), - y: NewKey(c, "kindA", "nameB", 0, nil), - equal: false, - }, - { - x: NewKey(c, "kindA", "nameA", 0, nil), - y: NewKey(c, "kindA", "", 1337, nil), - equal: false, - }, - { - x: NewKey(c, "kindA", "nameA", 0, nil), - y: NewKey(cN, "kindA", "nameA", 0, nil), - equal: false, - }, - { - x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), - y: NewKey(c, "kindA", "", 1337, NewKey(c, "kindY", "nameX", 0, nil)), - equal: false, - }, - { - x: NewKey(c, "kindA", "", 1337, NewKey(c, "kindX", "nameX", 0, nil)), - y: NewKey(c, "kindA", "", 1337, nil), - equal: false, - }, - } - - for _, tt := range testCases { - if got := tt.x.Equal(tt.y); got != tt.equal { - t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) - } - if got := tt.y.Equal(tt.x); got != tt.equal { - t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) - } - } -} - -func TestEncoding(t *testing.T) { - c := context.Background() - cN := WithNamespace(c, "gopherspace") - - testCases := []struct { - k *Key - valid bool - }{ - { - k: nil, - valid: false, - }, - { - k: NewKey(c, "", "", 0, nil), - valid: false, - }, - { - k: NewKey(c, "kindA", "", 0, nil), - valid: true, - }, - { - k: NewKey(cN, "kindA", "", 0, nil), - valid: true, - }, - { - k: NewKey(c, "kindA", "nameA", 0, nil), - valid: true, - }, - { - k: NewKey(c, "kindA", "", 1337, nil), - valid: true, - }, - { - k: NewKey(c, "kindA", "nameA", 1337, nil), - valid: false, - }, - { - k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "nameB", 0, nil)), - valid: true, - }, - { - k: NewKey(c, "kindA", "", 0, NewKey(c, "kindB", "", 0, nil)), - valid: false, - }, - { - k: NewKey(c, "kindA", "", 0, NewKey(cN, "kindB", "nameB", 0, nil)), - valid: false, - }, - } - - for _, tt := range testCases { - if got := tt.k.valid(); got != tt.valid { - t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) - } - - // Check encoding/decoding for valid keys. - if !tt.valid { - continue - } - enc := tt.k.Encode() - dec, err := DecodeKey(enc) - if err != nil { - t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) - continue - } - if !tt.k.Equal(dec) { - t.Errorf("Decoded key %v not equal to %v", dec, tt.k) - } - - b, err := json.Marshal(tt.k) - if err != nil { - t.Errorf("json.Marshal(%v): %v", tt.k, err) - continue - } - key := &Key{} - if err := json.Unmarshal(b, key); err != nil { - t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) - continue - } - if !tt.k.Equal(key) { - t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) - } - - buf := &bytes.Buffer{} - gobEnc := gob.NewEncoder(buf) - if err := gobEnc.Encode(tt.k); err != nil { - t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) - continue - } - gobDec := gob.NewDecoder(buf) - key = &Key{} - if err := gobDec.Decode(key); err != nil { - t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) - } - if !tt.k.Equal(key) { - t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/load.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/load.go deleted file mode 100644 index ef2e211bf5..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/load.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "fmt" - "reflect" - "time" - - pb "google.golang.org/cloud/internal/datastore" -) - -var ( - typeOfByteSlice = reflect.TypeOf([]byte(nil)) - typeOfTime = reflect.TypeOf(time.Time{}) -) - -// typeMismatchReason returns a string explaining why the property p could not -// be stored in an entity field of type v.Type(). -func typeMismatchReason(p Property, v reflect.Value) string { - entityType := "empty" - switch p.Value.(type) { - case int64: - entityType = "int" - case bool: - entityType = "bool" - case string: - entityType = "string" - case float64: - entityType = "float" - case *Key: - entityType = "*datastore.Key" - case time.Time: - entityType = "time.Time" - case []byte: - entityType = "[]byte" - } - - return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) -} - -type propertyLoader struct { - // m holds the number of times a substruct field like "Foo.Bar.Baz" has - // been seen so far. The map is constructed lazily. - m map[string]int -} - -func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, prev map[string]struct{}) string { - var sliceOk bool - var v reflect.Value - // Traverse a struct's struct-typed fields. - for name := p.Name; ; { - decoder, ok := codec.byName[name] - if !ok { - return "no such struct field" - } - v = structValue.Field(decoder.index) - if !v.IsValid() { - return "no such struct field" - } - if !v.CanSet() { - return "cannot set struct field" - } - - if decoder.substructCodec == nil { - break - } - - if v.Kind() == reflect.Slice { - if l.m == nil { - l.m = make(map[string]int) - } - index := l.m[p.Name] - l.m[p.Name] = index + 1 - for v.Len() <= index { - v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) - } - structValue = v.Index(index) - sliceOk = true - } else { - structValue = v - } - // Strip the "I." from "I.X". - name = name[len(codec.byIndex[decoder.index].name):] - codec = decoder.substructCodec - } - - var slice reflect.Value - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - slice = v - v = reflect.New(v.Type().Elem()).Elem() - } else if _, ok := prev[p.Name]; ok && !sliceOk { - // Zero the field back out that was set previously, turns out its a slice and we don't know what to do with it - v.Set(reflect.Zero(v.Type())) - - return "multiple-valued property requires a slice field type" - } - - prev[p.Name] = struct{}{} - - pValue := p.Value - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x, ok := pValue.(int64) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if v.OverflowInt(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) - } - v.SetInt(x) - case reflect.Bool: - x, ok := pValue.(bool) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - v.SetBool(x) - case reflect.String: - x, ok := pValue.(string) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - v.SetString(x) - case reflect.Float32, reflect.Float64: - x, ok := pValue.(float64) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if v.OverflowFloat(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) - } - v.SetFloat(x) - case reflect.Ptr: - x, ok := pValue.(*Key) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if _, ok := v.Interface().(*Key); !ok { - return typeMismatchReason(p, v) - } - v.Set(reflect.ValueOf(x)) - case reflect.Struct: - switch v.Type() { - case typeOfTime: - x, ok := pValue.(time.Time) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - v.Set(reflect.ValueOf(x)) - default: - return typeMismatchReason(p, v) - } - case reflect.Slice: - x, ok := pValue.([]byte) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if v.Type().Elem().Kind() != reflect.Uint8 { - return typeMismatchReason(p, v) - } - v.SetBytes(x) - default: - return typeMismatchReason(p, v) - } - if slice.IsValid() { - slice.Set(reflect.Append(slice, v)) - } - return "" -} - -// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. -func loadEntity(dst interface{}, src *pb.Entity) (err error) { - props := protoToProperties(src) - if e, ok := dst.(PropertyLoadSaver); ok { - return e.Load(props) - } - return LoadStruct(dst, props) -} - -func (s structPLS) Load(props []Property) error { - var fieldName, reason string - var l propertyLoader - - prev := make(map[string]struct{}) - for _, p := range props { - if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { - // We don't return early, as we try to load as many properties as possible. - // It is valid to load an entity into a struct that cannot fully represent it. - // That case returns an error, but the caller is free to ignore it. - fieldName, reason = p.Name, errStr - } - } - if reason != "" { - return &ErrFieldMismatch{ - StructType: s.v.Type(), - FieldName: fieldName, - Reason: reason, - } - } - return nil -} - -func protoToProperties(src *pb.Entity) []Property { - props := src.Property - out := make([]Property, 0, len(props)) - for { - var ( - x *pb.Property - noIndex bool - ) - if len(props) > 0 { - x, props = props[0], props[1:] - noIndex = !x.GetValue().GetIndexed() - } else { - break - } - - if x.Value.ListValue == nil { - out = append(out, Property{ - Name: x.GetName(), - Value: propValue(x.Value), - NoIndex: noIndex, - Multiple: false, - }) - } else { - for _, v := range x.Value.ListValue { - out = append(out, Property{ - Name: x.GetName(), - Value: propValue(v), - NoIndex: noIndex, - Multiple: true, - }) - } - } - } - return out -} - -// propValue returns a Go value that combines the raw PropertyValue with a -// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. -func propValue(v *pb.Value) interface{} { - //TODO(PSG-Luna): Support EntityValue - //TODO(PSG-Luna): GeoPoint seems gone from the v1 proto, reimplement it once it's readded - switch { - case v.IntegerValue != nil: - return *v.IntegerValue - case v.TimestampMicrosecondsValue != nil: - return fromUnixMicro(*v.TimestampMicrosecondsValue) - case v.BooleanValue != nil: - return *v.BooleanValue - case v.StringValue != nil: - return *v.StringValue - case v.BlobValue != nil: - return []byte(v.BlobValue) - case v.BlobKeyValue != nil: - return *v.BlobKeyValue - case v.DoubleValue != nil: - return *v.DoubleValue - case v.KeyValue != nil: - return protoToKey(v.KeyValue) - } - return nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/prop.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/prop.go deleted file mode 100644 index 43b6c22937..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/prop.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "fmt" - "reflect" - "strings" - "sync" - "unicode" -) - -// Entities with more than this many indexed properties will not be saved. -const maxIndexedProperties = 5000 - -// []byte fields more than 1 megabyte long will not be loaded or saved. -const maxBlobLen = 1 << 20 - -// Property is a name/value pair plus some metadata. A datastore entity's -// contents are loaded and saved as a sequence of Properties. An entity can -// have multiple Properties with the same name, provided that p.Multiple is -// true on all of that entity's Properties with that name. -type Property struct { - // Name is the property name. - Name string - // Value is the property value. The valid types are: - // - int64 - // - bool - // - string - // - float64 - // - *Key - // - time.Time - // - []byte (up to 1 megabyte in length) - // This set is smaller than the set of valid struct field types that the - // datastore can load and save. A Property Value cannot be a slice (apart - // from []byte); use multiple Properties instead. Also, a Value's type - // must be explicitly on the list above; it is not sufficient for the - // underlying type to be on that list. For example, a Value of "type - // myInt64 int64" is invalid. Smaller-width integers and floats are also - // invalid. Again, this is more restrictive than the set of valid struct - // field types. - // - // A Value will have an opaque type when loading entities from an index, - // such as via a projection query. Load entities into a struct instead - // of a PropertyLoadSaver when using a projection query. - // - // A Value may also be the nil interface value; this is equivalent to - // Python's None but not directly representable by a Go struct. Loading - // a nil-valued property into a struct will set that field to the zero - // value. - Value interface{} - // NoIndex is whether the datastore cannot index this property. - // If NoIndex is set to false, []byte values are limited to 1500 bytes and - // string values are limited to 1500 bytes. - NoIndex bool - // Multiple is whether the entity can have multiple properties with - // the same name. Even if a particular instance only has one property with - // a certain name, Multiple should be true if a struct would best represent - // it as a field of type []T instead of type T. - Multiple bool -} - -// PropertyLoadSaver can be converted from and to a slice of Properties. -type PropertyLoadSaver interface { - Load([]Property) error - Save() ([]Property, error) -} - -// PropertyList converts a []Property to implement PropertyLoadSaver. -type PropertyList []Property - -var ( - typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() - typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) -) - -// Load loads all of the provided properties into l. -// It does not first reset *l to an empty slice. -func (l *PropertyList) Load(p []Property) error { - *l = append(*l, p...) - return nil -} - -// Save saves all of l's properties as a slice of Properties. -func (l *PropertyList) Save() ([]Property, error) { - return *l, nil -} - -// validPropertyName returns whether name consists of one or more valid Go -// identifiers joined by ".". -func validPropertyName(name string) bool { - if name == "" { - return false - } - for _, s := range strings.Split(name, ".") { - if s == "" { - return false - } - first := true - for _, c := range s { - if first { - first = false - if c != '_' && !unicode.IsLetter(c) { - return false - } - } else { - if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - } - return true -} - -// structTag is the parsed `datastore:"name,options"` tag of a struct field. -// If a field has no tag, or the tag has an empty name, then the structTag's -// name is just the field name. A "-" name means that the datastore ignores -// that field. -type structTag struct { - name string - noIndex bool -} - -// structCodec describes how to convert a struct to and from a sequence of -// properties. -type structCodec struct { - // byIndex gives the structTag for the i'th field. - byIndex []structTag - // byName gives the field codec for the structTag with the given name. - byName map[string]fieldCodec - // hasSlice is whether a struct or any of its nested or embedded structs - // has a slice-typed field (other than []byte). - hasSlice bool - // complete is whether the structCodec is complete. An incomplete - // structCodec may be encountered when walking a recursive struct. - complete bool -} - -// fieldCodec is a struct field's index and, if that struct field's type is -// itself a struct, that substruct's structCodec. -type fieldCodec struct { - index int - substructCodec *structCodec -} - -// structCodecs collects the structCodecs that have already been calculated. -var ( - structCodecsMutex sync.Mutex - structCodecs = make(map[reflect.Type]*structCodec) -) - -// getStructCodec returns the structCodec for the given struct type. -func getStructCodec(t reflect.Type) (*structCodec, error) { - structCodecsMutex.Lock() - defer structCodecsMutex.Unlock() - return getStructCodecLocked(t) -} - -// getStructCodecLocked implements getStructCodec. The structCodecsMutex must -// be held when calling this function. -func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { - c, ok := structCodecs[t] - if ok { - return c, nil - } - c = &structCodec{ - byIndex: make([]structTag, t.NumField()), - byName: make(map[string]fieldCodec), - } - - // Add c to the structCodecs map before we are sure it is good. If t is - // a recursive type, it needs to find the incomplete entry for itself in - // the map. - structCodecs[t] = c - defer func() { - if retErr != nil { - delete(structCodecs, t) - } - }() - - for i := range c.byIndex { - f := t.Field(i) - name, opts := f.Tag.Get("datastore"), "" - if i := strings.Index(name, ","); i != -1 { - name, opts = name[:i], name[i+1:] - } - if name == "" { - if !f.Anonymous { - name = f.Name - } - } else if name == "-" { - c.byIndex[i] = structTag{name: name} - continue - } else if !validPropertyName(name) { - return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) - } - - substructType, fIsSlice := reflect.Type(nil), false - switch f.Type.Kind() { - case reflect.Struct: - substructType = f.Type - case reflect.Slice: - if f.Type.Elem().Kind() == reflect.Struct { - substructType = f.Type.Elem() - } - fIsSlice = f.Type != typeOfByteSlice - c.hasSlice = c.hasSlice || fIsSlice - } - - if substructType != nil && substructType != typeOfTime { - if name != "" { - name = name + "." - } - sub, err := getStructCodecLocked(substructType) - if err != nil { - return nil, err - } - if !sub.complete { - return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) - } - if fIsSlice && sub.hasSlice { - return nil, fmt.Errorf( - "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) - } - c.hasSlice = c.hasSlice || sub.hasSlice - for relName := range sub.byName { - absName := name + relName - if _, ok := c.byName[absName]; ok { - return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) - } - c.byName[absName] = fieldCodec{index: i, substructCodec: sub} - } - } else { - if _, ok := c.byName[name]; ok { - return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) - } - c.byName[name] = fieldCodec{index: i} - } - - c.byIndex[i] = structTag{ - name: name, - noIndex: opts == "noindex", - } - } - c.complete = true - return c, nil -} - -// structPLS adapts a struct to be a PropertyLoadSaver. -type structPLS struct { - v reflect.Value - codec *structCodec -} - -// newStructPLS returns a PropertyLoadSaver for the struct pointer p. -func newStructPLS(p interface{}) (PropertyLoadSaver, error) { - v := reflect.ValueOf(p) - if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct { - return nil, ErrInvalidEntityType - } - v = v.Elem() - codec, err := getStructCodec(v.Type()) - if err != nil { - return nil, err - } - return structPLS{v, codec}, nil -} - -// LoadStruct loads the properties from p to dst. -// dst must be a struct pointer. -func LoadStruct(dst interface{}, p []Property) error { - x, err := newStructPLS(dst) - if err != nil { - return err - } - return x.Load(p) -} - -// SaveStruct returns the properties from src as a slice of Properties. -// src must be a struct pointer. -func SaveStruct(src interface{}) ([]Property, error) { - x, err := newStructPLS(src) - if err != nil { - return nil, err - } - return x.Save() -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/query.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/query.go deleted file mode 100644 index 3eb8a8500d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/query.go +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - pb "google.golang.org/cloud/internal/datastore" -) - -type operator int - -const ( - lessThan operator = iota - lessEq - equal - greaterEq - greaterThan - - keyFieldName = "__key__" -) - -var operatorToProto = map[operator]*pb.PropertyFilter_Operator{ - lessThan: pb.PropertyFilter_LESS_THAN.Enum(), - lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL.Enum(), - equal: pb.PropertyFilter_EQUAL.Enum(), - greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL.Enum(), - greaterThan: pb.PropertyFilter_GREATER_THAN.Enum(), -} - -// filter is a conditional filter on query results. -type filter struct { - FieldName string - Op operator - Value interface{} -} - -type sortDirection int - -const ( - ascending sortDirection = iota - descending -) - -var sortDirectionToProto = map[sortDirection]*pb.PropertyOrder_Direction{ - ascending: pb.PropertyOrder_ASCENDING.Enum(), - descending: pb.PropertyOrder_DESCENDING.Enum(), -} - -// order is a sort order on query results. -type order struct { - FieldName string - Direction sortDirection -} - -// NewQuery creates a new Query for a specific entity kind. -// -// An empty kind means to return all entities, including entities created and -// managed by other App Engine features, and is called a kindless query. -// Kindless queries cannot include filters or sort orders on property values. -func NewQuery(kind string) *Query { - return &Query{ - kind: kind, - limit: -1, - } -} - -// Query represents a datastore query. -type Query struct { - kind string - ancestor *Key - filter []filter - order []order - projection []string - - distinct bool - keysOnly bool - eventual bool - limit int32 - offset int32 - start []byte - end []byte - - trans *Transaction - - err error -} - -func (q *Query) clone() *Query { - x := *q - // Copy the contents of the slice-typed fields to a new backing store. - if len(q.filter) > 0 { - x.filter = make([]filter, len(q.filter)) - copy(x.filter, q.filter) - } - if len(q.order) > 0 { - x.order = make([]order, len(q.order)) - copy(x.order, q.order) - } - return &x -} - -// Ancestor returns a derivative query with an ancestor filter. -// The ancestor should not be nil. -func (q *Query) Ancestor(ancestor *Key) *Query { - q = q.clone() - if ancestor == nil { - q.err = errors.New("datastore: nil query ancestor") - return q - } - q.ancestor = ancestor - return q -} - -// EventualConsistency returns a derivative query that returns eventually -// consistent results. -// It only has an effect on ancestor queries. -func (q *Query) EventualConsistency() *Query { - q = q.clone() - q.eventual = true - return q -} - -// Transaction returns a derivative query that is associated with the given -// transaction. -// -// All reads performed as part of the transaction will come from a single -// consistent snapshot. Furthermore, if the transaction is set to a -// serializable isolation level, another transaction cannot concurrently modify -// the data that is read or modified by this transaction. -func (q *Query) Transaction(t *Transaction) *Query { - q = q.clone() - q.trans = t - return q -} - -// Filter returns a derivative query with a field-based filter. -// The filterStr argument must be a field name followed by optional space, -// followed by an operator, one of ">", "<", ">=", "<=", or "=". -// Fields are compared against the provided value using the operator. -// Multiple filters are AND'ed together. -// Field names which contain spaces, quote marks, or operator characters -// should be passed as quoted Go string literals as returned by strconv.Quote -// or the fmt package's %q verb. -func (q *Query) Filter(filterStr string, value interface{}) *Query { - q = q.clone() - filterStr = strings.TrimSpace(filterStr) - if filterStr == "" { - q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) - return q - } - f := filter{ - FieldName: strings.TrimRight(filterStr, " ><=!"), - Value: value, - } - switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { - case "<=": - f.Op = lessEq - case ">=": - f.Op = greaterEq - case "<": - f.Op = lessThan - case ">": - f.Op = greaterThan - case "=": - f.Op = equal - default: - q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) - return q - } - var err error - f.FieldName, err = unquote(f.FieldName) - if err != nil { - q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) - return q - } - q.filter = append(q.filter, f) - return q -} - -// Order returns a derivative query with a field-based sort order. Orders are -// applied in the order they are added. The default order is ascending; to sort -// in descending order prefix the fieldName with a minus sign (-). -// Field names which contain spaces, quote marks, or the minus sign -// should be passed as quoted Go string literals as returned by strconv.Quote -// or the fmt package's %q verb. -func (q *Query) Order(fieldName string) *Query { - q = q.clone() - fieldName, dir := strings.TrimSpace(fieldName), ascending - if strings.HasPrefix(fieldName, "-") { - fieldName, dir = strings.TrimSpace(fieldName[1:]), descending - } else if strings.HasPrefix(fieldName, "+") { - q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) - return q - } - fieldName, err := unquote(fieldName) - if err != nil { - q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) - return q - } - if fieldName == "" { - q.err = errors.New("datastore: empty order") - return q - } - q.order = append(q.order, order{ - Direction: dir, - FieldName: fieldName, - }) - return q -} - -// unquote optionally interprets s as a double-quoted or backquoted Go -// string literal if it begins with the relevant character. -func unquote(s string) (string, error) { - if s == "" || (s[0] != '`' && s[0] != '"') { - return s, nil - } - return strconv.Unquote(s) -} - -// Project returns a derivative query that yields only the given fields. It -// cannot be used with KeysOnly. -func (q *Query) Project(fieldNames ...string) *Query { - q = q.clone() - q.projection = append([]string(nil), fieldNames...) - return q -} - -// Distinct returns a derivative query that yields de-duplicated entities with -// respect to the set of projected fields. It is only used for projection -// queries. -func (q *Query) Distinct() *Query { - q = q.clone() - q.distinct = true - return q -} - -// KeysOnly returns a derivative query that yields only keys, not keys and -// entities. It cannot be used with projection queries. -func (q *Query) KeysOnly() *Query { - q = q.clone() - q.keysOnly = true - return q -} - -// Limit returns a derivative query that has a limit on the number of results -// returned. A negative value means unlimited. -func (q *Query) Limit(limit int) *Query { - q = q.clone() - if limit < math.MinInt32 || limit > math.MaxInt32 { - q.err = errors.New("datastore: query limit overflow") - return q - } - q.limit = int32(limit) - return q -} - -// Offset returns a derivative query that has an offset of how many keys to -// skip over before returning results. A negative value is invalid. -func (q *Query) Offset(offset int) *Query { - q = q.clone() - if offset < 0 { - q.err = errors.New("datastore: negative query offset") - return q - } - if offset > math.MaxInt32 { - q.err = errors.New("datastore: query offset overflow") - return q - } - q.offset = int32(offset) - return q -} - -// Start returns a derivative query with the given start point. -func (q *Query) Start(c Cursor) *Query { - q = q.clone() - if c.cc == nil { - q.err = errors.New("datastore: invalid cursor") - return q - } - q.start = c.cc - return q -} - -// End returns a derivative query with the given end point. -func (q *Query) End(c Cursor) *Query { - q = q.clone() - if c.cc == nil { - q.err = errors.New("datastore: invalid cursor") - return q - } - q.end = c.cc - return q -} - -// toProto converts the query to a protocol buffer. -func (q *Query) toProto(req *pb.RunQueryRequest) error { - dst := pb.Query{} - if len(q.projection) != 0 && q.keysOnly { - return errors.New("datastore: query cannot both project and be keys-only") - } - dst.Reset() - if q.kind != "" { - dst.Kind = []*pb.KindExpression{&pb.KindExpression{Name: proto.String(q.kind)}} - } - if q.projection != nil { - for _, propertyName := range q.projection { - dst.Projection = append(dst.Projection, &pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(propertyName)}}) - } - - if q.distinct { - for _, propertyName := range q.projection { - dst.GroupBy = append(dst.GroupBy, &pb.PropertyReference{Name: proto.String(propertyName)}) - } - } - } - if q.keysOnly { - dst.Projection = []*pb.PropertyExpression{&pb.PropertyExpression{Property: &pb.PropertyReference{Name: proto.String(keyFieldName)}}} - } - - var filters []*pb.Filter - for _, qf := range q.filter { - if qf.FieldName == "" { - return errors.New("datastore: empty query filter field name") - } - v, errStr := interfaceToProto(reflect.ValueOf(qf.Value).Interface()) - if errStr != "" { - return errors.New("datastore: bad query filter value type: " + errStr) - } - xf := &pb.PropertyFilter{ - Operator: operatorToProto[qf.Op], - Property: &pb.PropertyReference{Name: proto.String(qf.FieldName)}, - Value: v, - } - if xf.Operator == nil { - return errors.New("datastore: unknown query filter operator") - } - filters = append(filters, &pb.Filter{PropertyFilter: xf}) - } - - if q.ancestor != nil { - filters = append(filters, &pb.Filter{ - PropertyFilter: &pb.PropertyFilter{ - Property: &pb.PropertyReference{Name: proto.String("__key__")}, - Operator: pb.PropertyFilter_HAS_ANCESTOR.Enum(), - Value: &pb.Value{KeyValue: keyToProto(q.ancestor)}, - }}) - } - - if len(filters) == 1 { - dst.Filter = filters[0] - } else if len(filters) > 1 { - dst.Filter = &pb.Filter{CompositeFilter: &pb.CompositeFilter{ - Operator: pb.CompositeFilter_AND.Enum(), - Filter: filters, - }} - } - - for _, qo := range q.order { - if qo.FieldName == "" { - return errors.New("datastore: empty query order field name") - } - xo := &pb.PropertyOrder{ - Property: &pb.PropertyReference{Name: proto.String(qo.FieldName)}, - Direction: sortDirectionToProto[qo.Direction], - } - if xo.Direction == nil { - return errors.New("datastore: unknown query order direction") - } - dst.Order = append(dst.Order, xo) - } - if q.limit >= 0 { - dst.Limit = proto.Int32(q.limit) - } - if q.offset != 0 { - dst.Offset = proto.Int32(q.offset) - } - dst.StartCursor = q.start - dst.EndCursor = q.end - - if t := q.trans; t != nil { - if t.id == nil { - return errExpiredTransaction - } - req.ReadOptions = &pb.ReadOptions{Transaction: t.id} - } - - req.Query = &dst - return nil -} - -// Count returns the number of results for the given query. -func (c *Client) Count(ctx context.Context, q *Query) (int, error) { - // Check that the query is well-formed. - if q.err != nil { - return 0, q.err - } - - // Run a copy of the query, with keysOnly true (if we're not a projection, - // since the two are incompatible). - newQ := q.clone() - newQ.keysOnly = len(newQ.projection) == 0 - req := &pb.RunQueryRequest{} - - if ns := ctxNamespace(ctx); ns != "" { - req.PartitionId = &pb.PartitionId{ - Namespace: proto.String(ns), - } - } - if err := newQ.toProto(req); err != nil { - return 0, err - } - res := &pb.RunQueryResponse{} - if err := c.call(ctx, "runQuery", req, res); err != nil { - return 0, err - } - var n int - b := res.Batch - for { - n += len(b.GetEntityResult()) - if b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED { - break - } - var err error - // TODO(jbd): Support count queries that have a limit and an offset. - if err = callNext(ctx, c, req, res, 0, 0); err != nil { - return 0, err - } - } - return int(n), nil -} - -func callNext(ctx context.Context, client *Client, req *pb.RunQueryRequest, res *pb.RunQueryResponse, offset, limit int32) error { - if res.GetBatch().EndCursor == nil { - return errors.New("datastore: internal error: server did not return a cursor") - } - req.Query.StartCursor = res.GetBatch().GetEndCursor() - if limit >= 0 { - req.Query.Limit = proto.Int32(limit) - } - if offset != 0 { - req.Query.Offset = proto.Int32(offset) - } - res.Reset() - return client.call(ctx, "runQuery", req, res) -} - -// GetAll runs the provided query in the given context and returns all keys -// that match that query, as well as appending the values to dst. -// -// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- -// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. -// -// As a special case, *PropertyList is an invalid type for dst, even though a -// PropertyList is a slice of structs. It is treated as invalid to avoid being -// mistakenly passed when *[]PropertyList was intended. -// -// The keys returned by GetAll will be in a 1-1 correspondence with the entities -// added to dst. -// -// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. -func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { - var ( - dv reflect.Value - mat multiArgType - elemType reflect.Type - errFieldMismatch error - ) - if !q.keysOnly { - dv = reflect.ValueOf(dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return nil, ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType = checkMultiArg(dv) - if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { - return nil, ErrInvalidEntityType - } - } - - var keys []*Key - for t := c.Run(ctx, q); ; { - k, e, err := t.next() - if err == Done { - break - } - if err != nil { - return keys, err - } - if !q.keysOnly { - ev := reflect.New(elemType) - if elemType.Kind() == reflect.Map { - // This is a special case. The zero values of a map type are - // not immediately useful; they have to be make'd. - // - // Funcs and channels are similar, in that a zero value is not useful, - // but even a freshly make'd channel isn't useful: there's no fixed - // channel buffer size that is always going to be large enough, and - // there's no goroutine to drain the other end. Theoretically, these - // types could be supported, for example by sniffing for a constructor - // method or requiring prior registration, but for now it's not a - // frequent enough concern to be worth it. Programmers can work around - // it by explicitly using Iterator.Next instead of the Query.GetAll - // convenience method. - x := reflect.MakeMap(elemType) - ev.Elem().Set(x) - } - if err = loadEntity(ev.Interface(), e); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return keys, err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - } - keys = append(keys, k) - } - return keys, errFieldMismatch -} - -// Run runs the given query in the given context. -func (c *Client) Run(ctx context.Context, q *Query) *Iterator { - if q.err != nil { - return &Iterator{err: q.err} - } - t := &Iterator{ - ctx: ctx, - client: c, - limit: q.limit, - q: q, - prevCC: q.start, - } - t.req.Reset() - if ns := ctxNamespace(ctx); ns != "" { - t.req.PartitionId = &pb.PartitionId{ - Namespace: proto.String(ns), - } - } - if err := q.toProto(&t.req); err != nil { - t.err = err - return t - } - if err := c.call(ctx, "runQuery", &t.req, &t.res); err != nil { - t.err = err - return t - } - b := t.res.GetBatch() - offset := q.offset - b.GetSkippedResults() - for offset > 0 && b.GetMoreResults() == pb.QueryResultBatch_NOT_FINISHED { - t.prevCC = b.GetEndCursor() - var err error - if err = callNext(t.ctx, c, &t.req, &t.res, offset, t.limit); err != nil { - t.err = err - break - } - skip := b.GetSkippedResults() - if skip < 0 { - t.err = errors.New("datastore: internal error: negative number of skipped_results") - break - } - offset -= skip - } - if offset < 0 { - t.err = errors.New("datastore: internal error: query offset was overshot") - } - return t -} - -// Iterator is the result of running a query. -type Iterator struct { - ctx context.Context - client *Client - err error - // req is the request we sent previously, we need to keep track of it to resend it - req pb.RunQueryRequest - // res is the result of the most recent RunQuery or Next API call. - res pb.RunQueryResponse - // i is how many elements of res.Result we have iterated over. - i int - // limit is the limit on the number of results this iterator should return. - // A negative value means unlimited. - limit int32 - // q is the original query which yielded this iterator. - q *Query - // prevCC is the compiled cursor that marks the end of the previous batch - // of results. - prevCC []byte -} - -// Done is returned when a query iteration has completed. -var Done = errors.New("datastore: query has no more results") - -// Next returns the key of the next result. When there are no more results, -// Done is returned as the error. -// -// If the query is not keys only and dst is non-nil, it also loads the entity -// stored for that key into the struct pointer or PropertyLoadSaver dst, with -// the same semantics and possible errors as for the Get function. -func (t *Iterator) Next(dst interface{}) (*Key, error) { - k, e, err := t.next() - if err != nil { - return nil, err - } - if dst != nil && !t.q.keysOnly { - err = loadEntity(dst, e) - } - return k, err -} - -func (t *Iterator) next() (*Key, *pb.Entity, error) { - if t.err != nil { - return nil, nil, t.err - } - - // Issue datastore_v3/Next RPCs as necessary. - b := t.res.GetBatch() - for t.i == len(b.EntityResult) { - if b.GetMoreResults() != pb.QueryResultBatch_NOT_FINISHED { - t.err = Done - return nil, nil, t.err - } - t.prevCC = b.GetEndCursor() - if err := callNext(t.ctx, t.client, &t.req, &t.res, 0, t.limit); err != nil { - t.err = err - return nil, nil, t.err - } - if b.GetSkippedResults() != 0 { - t.err = errors.New("datastore: internal error: iterator has skipped results") - return nil, nil, t.err - } - t.i = 0 - if t.limit >= 0 { - t.limit -= int32(len(b.EntityResult)) - if t.limit < 0 { - t.err = errors.New("datastore: internal error: query returned more results than the limit") - return nil, nil, t.err - } - } - } - - // Extract the key from the t.i'th element of t.res.Result. - e := b.EntityResult[t.i] - t.i++ - if e.Entity.Key == nil { - return nil, nil, errors.New("datastore: internal error: server did not return a key") - } - k := protoToKey(e.Entity.Key) - if k.Incomplete() { - return nil, nil, errors.New("datastore: internal error: server returned an invalid key") - } - return k, e.Entity, nil -} - -// Cursor returns a cursor for the iterator's current location. -func (t *Iterator) Cursor() (Cursor, error) { - if t.err != nil && t.err != Done { - return Cursor{}, t.err - } - // If we are at either end of the current batch of results, - // return the compiled cursor at that end. - b := t.res.Batch - skipped := b.GetSkippedResults() - if t.i == 0 && skipped == 0 { - if t.prevCC == nil { - // A nil pointer (of type *pb.CompiledCursor) means no constraint: - // passing it as the end cursor of a new query means unlimited results - // (glossing over the integer limit parameter for now). - // A non-nil pointer to an empty pb.CompiledCursor means the start: - // passing it as the end cursor of a new query means 0 results. - // If prevCC was nil, then the original query had no start cursor, but - // Iterator.Cursor should return "the start" instead of unlimited. - return Cursor{}, nil - } - return Cursor{t.prevCC}, nil - } - if t.i == len(b.EntityResult) { - return Cursor{b.EndCursor}, nil - } - // Otherwise, re-run the query offset to this iterator's position, starting from - // the most recent compiled cursor. This is done on a best-effort basis, as it - // is racy; if a concurrent process has added or removed entities, then the - // cursor returned may be inconsistent. - q := t.q.clone() - q.start = t.prevCC - q.offset = skipped + int32(t.i) - q.limit = 0 - q.keysOnly = len(q.projection) == 0 - t1 := t.client.Run(t.ctx, t.q) - _, _, err := t1.next() - if err != Done { - if err == nil { - err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") - } - return Cursor{}, err - } - return Cursor{t1.res.Batch.EndCursor}, nil -} - -// Cursor is an iterator's position. It can be converted to and from an opaque -// string. A cursor can be used from different HTTP requests, but only with a -// query with the same kind, ancestor, filter and order constraints. -type Cursor struct { - cc []byte -} - -// String returns a base-64 string representation of a cursor. -func (c Cursor) String() string { - if c.cc == nil { - return "" - } - - return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") -} - -// Decode decodes a cursor from its base-64 string representation. -func DecodeCursor(s string) (Cursor, error) { - if s == "" { - return Cursor{}, nil - } - if n := len(s) % 4; n != 0 { - s += strings.Repeat("=", 4-n) - } - b, err := base64.URLEncoding.DecodeString(s) - if err != nil { - return Cursor{}, err - } - return Cursor{b}, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/query_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/query_test.go deleted file mode 100644 index 5d0caaff5d..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/query_test.go +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "errors" - "fmt" - "reflect" - "testing" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - pb "google.golang.org/cloud/internal/datastore" -) - -var ( - key1 = &pb.Key{ - PathElement: []*pb.Key_PathElement{ - { - Kind: proto.String("Gopher"), - Id: proto.Int64(6), - }, - }, - } - key2 = &pb.Key{ - PathElement: []*pb.Key_PathElement{ - { - Kind: proto.String("Gopher"), - Id: proto.Int64(6), - }, - { - Kind: proto.String("Gopher"), - Id: proto.Int64(8), - }, - }, - } -) - -type fakeClient func(req, resp proto.Message) (err error) - -func (c fakeClient) Call(ctx context.Context, method string, req, resp proto.Message) error { - return c(req, resp) -} - -func fakeRunQuery(in *pb.RunQueryRequest, out *pb.RunQueryResponse) error { - expectedIn := &pb.RunQueryRequest{ - Query: &pb.Query{ - Kind: []*pb.KindExpression{&pb.KindExpression{Name: proto.String("Gopher")}}, - }, - } - if !proto.Equal(in, expectedIn) { - return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) - } - *out = pb.RunQueryResponse{ - Batch: &pb.QueryResultBatch{ - MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS.Enum(), - EntityResultType: pb.EntityResult_FULL.Enum(), - EntityResult: []*pb.EntityResult{ - &pb.EntityResult{ - Entity: &pb.Entity{ - Key: key1, - Property: []*pb.Property{ - { - Name: proto.String("Name"), - Value: &pb.Value{StringValue: proto.String("George")}, - }, - { - Name: proto.String("Height"), - Value: &pb.Value{ - IntegerValue: proto.Int64(32), - }, - }, - }, - }, - }, - &pb.EntityResult{ - Entity: &pb.Entity{ - Key: key2, - Property: []*pb.Property{ - { - Name: proto.String("Name"), - Value: &pb.Value{StringValue: proto.String("Rufus")}, - }, - // No height for Rufus. - }, - }, - }, - }, - }, - } - return nil -} - -type StructThatImplementsPLS struct{} - -func (StructThatImplementsPLS) Load(p []Property) error { return nil } -func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } - -var _ PropertyLoadSaver = StructThatImplementsPLS{} - -type StructPtrThatImplementsPLS struct{} - -func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } -func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } - -var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} - -type PropertyMap map[string]Property - -func (m PropertyMap) Load(props []Property) error { - for _, p := range props { - m[p.Name] = p - } - return nil -} - -func (m PropertyMap) Save() ([]Property, error) { - props := make([]Property, 0, len(m)) - for _, p := range m { - props = append(props, p) - } - return props, nil -} - -var _ PropertyLoadSaver = PropertyMap{} - -type Gopher struct { - Name string - Height int -} - -// typeOfEmptyInterface is the type of interface{}, but we can't use -// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an -// interface{}. -var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() - -func TestCheckMultiArg(t *testing.T) { - testCases := []struct { - v interface{} - mat multiArgType - elemType reflect.Type - }{ - // Invalid cases. - {nil, multiArgTypeInvalid, nil}, - {Gopher{}, multiArgTypeInvalid, nil}, - {&Gopher{}, multiArgTypeInvalid, nil}, - {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. - {PropertyMap{}, multiArgTypeInvalid, nil}, - {[]*PropertyList(nil), multiArgTypeInvalid, nil}, - {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, - {[]**Gopher(nil), multiArgTypeInvalid, nil}, - {[]*interface{}(nil), multiArgTypeInvalid, nil}, - // Valid cases. - { - []PropertyList(nil), - multiArgTypePropertyLoadSaver, - reflect.TypeOf(PropertyList{}), - }, - { - []PropertyMap(nil), - multiArgTypePropertyLoadSaver, - reflect.TypeOf(PropertyMap{}), - }, - { - []StructThatImplementsPLS(nil), - multiArgTypePropertyLoadSaver, - reflect.TypeOf(StructThatImplementsPLS{}), - }, - { - []StructPtrThatImplementsPLS(nil), - multiArgTypePropertyLoadSaver, - reflect.TypeOf(StructPtrThatImplementsPLS{}), - }, - { - []Gopher(nil), - multiArgTypeStruct, - reflect.TypeOf(Gopher{}), - }, - { - []*Gopher(nil), - multiArgTypeStructPtr, - reflect.TypeOf(Gopher{}), - }, - { - []interface{}(nil), - multiArgTypeInterface, - typeOfEmptyInterface, - }, - } - for _, tc := range testCases { - mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) - if mat != tc.mat || elemType != tc.elemType { - t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", - tc.v, mat, elemType, tc.mat, tc.elemType) - } - } -} - -func TestSimpleQuery(t *testing.T) { - struct1 := Gopher{Name: "George", Height: 32} - struct2 := Gopher{Name: "Rufus"} - pList1 := PropertyList{ - { - Name: "Name", - Value: "George", - }, - { - Name: "Height", - Value: int64(32), - }, - } - pList2 := PropertyList{ - { - Name: "Name", - Value: "Rufus", - }, - } - pMap1 := PropertyMap{ - "Name": Property{ - Name: "Name", - Value: "George", - }, - "Height": Property{ - Name: "Height", - Value: int64(32), - }, - } - pMap2 := PropertyMap{ - "Name": Property{ - Name: "Name", - Value: "Rufus", - }, - } - - testCases := []struct { - dst interface{} - want interface{} - }{ - // The destination must have type *[]P, *[]S or *[]*S, for some non-interface - // type P such that *P implements PropertyLoadSaver, or for some struct type S. - {new([]Gopher), &[]Gopher{struct1, struct2}}, - {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, - {new([]PropertyList), &[]PropertyList{pList1, pList2}}, - {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, - - // Any other destination type is invalid. - {0, nil}, - {Gopher{}, nil}, - {PropertyList{}, nil}, - {PropertyMap{}, nil}, - {[]int{}, nil}, - {[]Gopher{}, nil}, - {[]PropertyList{}, nil}, - {new(int), nil}, - {new(Gopher), nil}, - {new(PropertyList), nil}, // This is a special case. - {new(PropertyMap), nil}, - {new([]int), nil}, - {new([]map[int]int), nil}, - {new([]map[string]Property), nil}, - {new([]map[string]interface{}), nil}, - {new([]*int), nil}, - {new([]*map[int]int), nil}, - {new([]*map[string]Property), nil}, - {new([]*map[string]interface{}), nil}, - {new([]**Gopher), nil}, - {new([]*PropertyList), nil}, - {new([]*PropertyMap), nil}, - } - for _, tc := range testCases { - nCall := 0 - client := &Client{ - client: fakeClient(func(in, out proto.Message) error { - nCall++ - return fakeRunQuery(in.(*pb.RunQueryRequest), out.(*pb.RunQueryResponse)) - }), - } - ctx := context.Background() - - var ( - expectedErr error - expectedNCall int - ) - if tc.want == nil { - expectedErr = ErrInvalidEntityType - } else { - expectedNCall = 1 - } - keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) - if err != expectedErr { - t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) - continue - } - if nCall != expectedNCall { - t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) - continue - } - if err != nil { - continue - } - - key1 := NewKey(ctx, "Gopher", "", 6, nil) - expectedKeys := []*Key{ - key1, - NewKey(ctx, "Gopher", "", 8, key1), - } - if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { - t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) - continue - } - for i, key := range keys { - if !keysEqual(key, expectedKeys[i]) { - t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) - continue - } - } - - if !reflect.DeepEqual(tc.dst, tc.want) { - t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want) - continue - } - } -} - -// keysEqual is like (*Key).Equal, but ignores the App ID. -func keysEqual(a, b *Key) bool { - for a != nil && b != nil { - if a.Kind() != b.Kind() || a.Name() != b.Name() || a.ID() != b.ID() { - return false - } - a, b = a.Parent(), b.Parent() - } - return a == b -} - -func TestQueriesAreImmutable(t *testing.T) { - // Test that deriving q2 from q1 does not modify q1. - q0 := NewQuery("foo") - q1 := NewQuery("foo") - q2 := q1.Offset(2) - if !reflect.DeepEqual(q0, q1) { - t.Errorf("q0 and q1 were not equal") - } - if reflect.DeepEqual(q1, q2) { - t.Errorf("q1 and q2 were equal") - } - - // Test that deriving from q4 twice does not conflict, even though - // q4 has a long list of order clauses. This tests that the arrays - // backed by a query's slice of orders are not shared. - f := func() *Query { - q := NewQuery("bar") - // 47 is an ugly number that is unlikely to be near a re-allocation - // point in repeated append calls. For example, it's not near a power - // of 2 or a multiple of 10. - for i := 0; i < 47; i++ { - q = q.Order(fmt.Sprintf("x%d", i)) - } - return q - } - q3 := f().Order("y") - q4 := f() - q5 := q4.Order("y") - q6 := q4.Order("z") - if !reflect.DeepEqual(q3, q5) { - t.Errorf("q3 and q5 were not equal") - } - if reflect.DeepEqual(q5, q6) { - t.Errorf("q5 and q6 were equal") - } -} - -func TestFilterParser(t *testing.T) { - testCases := []struct { - filterStr string - wantOK bool - wantFieldName string - wantOp operator - }{ - // Supported ops. - {"x<", true, "x", lessThan}, - {"x <", true, "x", lessThan}, - {"x <", true, "x", lessThan}, - {" x < ", true, "x", lessThan}, - {"x <=", true, "x", lessEq}, - {"x =", true, "x", equal}, - {"x >=", true, "x", greaterEq}, - {"x >", true, "x", greaterThan}, - {"in >", true, "in", greaterThan}, - {"in>", true, "in", greaterThan}, - // Valid but (currently) unsupported ops. - {"x!=", false, "", 0}, - {"x !=", false, "", 0}, - {" x != ", false, "", 0}, - {"x IN", false, "", 0}, - {"x in", false, "", 0}, - // Invalid ops. - {"x EQ", false, "", 0}, - {"x lt", false, "", 0}, - {"x <>", false, "", 0}, - {"x >>", false, "", 0}, - {"x ==", false, "", 0}, - {"x =<", false, "", 0}, - {"x =>", false, "", 0}, - {"x !", false, "", 0}, - {"x ", false, "", 0}, - {"x", false, "", 0}, - // Quoted and interesting field names. - {"x > y =", true, "x > y", equal}, - {"` x ` =", true, " x ", equal}, - {`" x " =`, true, " x ", equal}, - {`" \"x " =`, true, ` "x `, equal}, - {`" x =`, false, "", 0}, - {`" x ="`, false, "", 0}, - {"` x \" =", false, "", 0}, - } - for _, tc := range testCases { - q := NewQuery("foo").Filter(tc.filterStr, 42) - if ok := q.err == nil; ok != tc.wantOK { - t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) - continue - } - if !tc.wantOK { - continue - } - if len(q.filter) != 1 { - t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) - continue - } - got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} - if got != want { - t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) - continue - } - } -} - -func TestNamespaceQuery(t *testing.T) { - gotNamespace := make(chan string, 1) - ctx := context.Background() - client := &Client{ - client: fakeClient(func(req, resp proto.Message) error { - gotNamespace <- req.(*pb.RunQueryRequest).GetPartitionId().GetNamespace() - return errors.New("not implemented") - }), - } - - var gs []Gopher - - client.GetAll(ctx, NewQuery("gopher"), &gs) - if got, want := <-gotNamespace, ""; got != want { - t.Errorf("GetAll: got namespace %q, want %q", got, want) - } - client.Count(ctx, NewQuery("gopher")) - if got, want := <-gotNamespace, ""; got != want { - t.Errorf("Count: got namespace %q, want %q", got, want) - } - - const ns = "not_default" - ctx = WithNamespace(ctx, ns) - - client.GetAll(ctx, NewQuery("gopher"), &gs) - if got, want := <-gotNamespace, ns; got != want { - t.Errorf("GetAll: got namespace %q, want %q", got, want) - } - client.Count(ctx, NewQuery("gopher")) - if got, want := <-gotNamespace, ns; got != want { - t.Errorf("Count: got namespace %q, want %q", got, want) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/save.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/save.go deleted file mode 100644 index a208000aa2..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/save.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "errors" - "fmt" - "reflect" - "time" - - "github.com/golang/protobuf/proto" - pb "google.golang.org/cloud/internal/datastore" -) - -// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. -func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { - var err error - var props []Property - if e, ok := src.(PropertyLoadSaver); ok { - props, err = e.Save() - } else { - props, err = SaveStruct(src) - } - if err != nil { - return nil, err - } - return propertiesToProto(key, props) -} - -func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { - p := Property{ - Name: name, - NoIndex: noIndex, - Multiple: multiple, - } - - switch x := v.Interface().(type) { - case *Key, time.Time: - p.Value = x - default: - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p.Value = v.Int() - case reflect.Bool: - p.Value = v.Bool() - case reflect.String: - p.Value = v.String() - case reflect.Float32, reflect.Float64: - p.Value = v.Float() - case reflect.Slice: - if v.Type().Elem().Kind() == reflect.Uint8 { - p.Value = v.Bytes() - } - case reflect.Struct: - if !v.CanAddr() { - return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") - } - sub, err := newStructPLS(v.Addr().Interface()) - if err != nil { - return fmt.Errorf("datastore: unsupported struct field: %v", err) - } - return sub.(structPLS).save(props, name, noIndex, multiple) - } - } - if p.Value == nil { - return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) - } - *props = append(*props, p) - return nil -} - -func (s structPLS) Save() ([]Property, error) { - var props []Property - if err := s.save(&props, "", false, false); err != nil { - return nil, err - } - return props, nil -} - -func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { - for i, t := range s.codec.byIndex { - if t.name == "-" { - continue - } - name := t.name - if prefix != "" { - name = prefix + name - } - v := s.v.Field(i) - if !v.IsValid() || !v.CanSet() { - continue - } - noIndex1 := noIndex || t.noIndex - // For slice fields that aren't []byte, save each element. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - for j := 0; j < v.Len(); j++ { - if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { - return err - } - } - continue - } - // Otherwise, save the field itself. - if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { - return err - } - } - return nil -} - -func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { - e := &pb.Entity{ - Key: keyToProto(key), - } - indexedProps := 0 - prevMultiple := make(map[string]*pb.Property) - for _, p := range props { - val, err := interfaceToProto(p.Value) - if err != "" { - return nil, fmt.Errorf("datastore: %s for a Property with Name %q", err, p.Name) - } - if !p.NoIndex { - rVal := reflect.ValueOf(p.Value) - if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { - indexedProps += rVal.Len() - } else { - indexedProps++ - } - } - if indexedProps > maxIndexedProperties { - return nil, errors.New("datastore: too many indexed properties") - } - switch v := p.Value.(type) { - case string: - case []byte: - if len(v) > 1500 && !p.NoIndex { - return nil, fmt.Errorf("datastore: cannot index a Property with Name %q", p.Name) - } - } - val.Indexed = proto.Bool(!p.NoIndex) - if p.Multiple { - x, ok := prevMultiple[p.Name] - if !ok { - x = &pb.Property{ - Name: proto.String(p.Name), - Value: &pb.Value{}, - } - prevMultiple[p.Name] = x - e.Property = append(e.Property, x) - } - x.Value.ListValue = append(x.Value.ListValue, val) - } else { - e.Property = append(e.Property, &pb.Property{ - Name: proto.String(p.Name), - Value: val, - }) - } - } - return e, nil -} - -func interfaceToProto(iv interface{}) (p *pb.Value, errStr string) { - val := new(pb.Value) - switch v := iv.(type) { - case int: - val.IntegerValue = proto.Int64(int64(v)) - case int32: - val.IntegerValue = proto.Int64(int64(v)) - case int64: - val.IntegerValue = proto.Int64(v) - case bool: - val.BooleanValue = proto.Bool(v) - case string: - val.StringValue = proto.String(v) - case float32: - val.DoubleValue = proto.Float64(float64(v)) - case float64: - val.DoubleValue = proto.Float64(v) - case *Key: - if v != nil { - val.KeyValue = keyToProto(v) - } - case time.Time: - if v.Before(minTime) || v.After(maxTime) { - return nil, fmt.Sprintf("time value out of range") - } - val.TimestampMicrosecondsValue = proto.Int64(toUnixMicro(v)) - case []byte: - val.BlobValue = v - default: - if iv != nil { - return nil, fmt.Sprintf("invalid Value type %t", iv) - } - } - // TODO(jbd): Support ListValue and EntityValue. - // TODO(jbd): Support types whose underlying type is one of the types above. - return val, "" -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/testdata/index.yaml b/Godeps/_workspace/src/google.golang.org/cloud/datastore/testdata/index.yaml deleted file mode 100644 index 47bc9de867..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/testdata/index.yaml +++ /dev/null @@ -1,41 +0,0 @@ -indexes: - -- kind: SQChild - ancestor: yes - properties: - - name: T - - name: I - -- kind: SQChild - ancestor: yes - properties: - - name: T - - name: I - direction: desc - -- kind: SQChild - ancestor: yes - properties: - - name: I - - name: T - - name: U - -- kind: SQChild - ancestor: yes - properties: - - name: I - - name: T - - name: U - -- kind: SQChild - ancestor: yes - properties: - - name: T - - name: J - -- kind: SQChild - ancestor: yes - properties: - - name: T - - name: J - - name: U \ No newline at end of file diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/time.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/time.go deleted file mode 100644 index e7f6a19314..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/time.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "math" - "time" -) - -var ( - minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) - maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) -) - -func toUnixMicro(t time.Time) int64 { - // We cannot use t.UnixNano() / 1e3 because we want to handle times more than - // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot - // be represented in the numerator of a single int64 divide. - return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) -} - -func fromUnixMicro(t int64) time.Time { - return time.Unix(t/1e6, (t%1e6)*1e3) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/time_test.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/time_test.go deleted file mode 100644 index 5cc846c4cc..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/time_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "testing" - "time" -) - -func TestUnixMicro(t *testing.T) { - // Test that all these time.Time values survive a round trip to unix micros. - testCases := []time.Time{ - {}, - time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), - time.Unix(-1e6, -1000), - time.Unix(-1e6, 0), - time.Unix(-1e6, +1000), - time.Unix(-60, -1000), - time.Unix(-60, 0), - time.Unix(-60, +1000), - time.Unix(-1, -1000), - time.Unix(-1, 0), - time.Unix(-1, +1000), - time.Unix(0, -3000), - time.Unix(0, -2000), - time.Unix(0, -1000), - time.Unix(0, 0), - time.Unix(0, +1000), - time.Unix(0, +2000), - time.Unix(+60, -1000), - time.Unix(+60, 0), - time.Unix(+60, +1000), - time.Unix(+1e6, -1000), - time.Unix(+1e6, 0), - time.Unix(+1e6, +1000), - time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), - time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), - time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), - time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), - time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), - } - for _, tc := range testCases { - got := fromUnixMicro(toUnixMicro(tc)) - if !got.Equal(tc) { - t.Errorf("got %q, want %q", got, tc) - } - } - - // Test that a time.Time that isn't an integral number of microseconds - // is not perfectly reconstructed after a round trip. - t0 := time.Unix(0, 123) - t1 := fromUnixMicro(toUnixMicro(t0)) - if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { - t.Errorf("quantization to Β΅s: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/datastore/transaction.go b/Godeps/_workspace/src/google.golang.org/cloud/datastore/transaction.go deleted file mode 100644 index 344c313e90..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/datastore/transaction.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datastore - -import ( - "errors" - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - pb "google.golang.org/cloud/internal/datastore" - "google.golang.org/cloud/internal/transport" -) - -// ErrConcurrentTransaction is returned when a transaction is rolled back due -// to a conflict with a concurrent transaction. -var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") - -var errExpiredTransaction = errors.New("datastore: transaction expired") - -// A TransactionOption configures the Transaction returned by NewTransaction. -type TransactionOption interface { - apply(*pb.BeginTransactionRequest) -} - -type isolation struct { - level pb.BeginTransactionRequest_IsolationLevel -} - -func (i isolation) apply(req *pb.BeginTransactionRequest) { - req.IsolationLevel = i.level.Enum() -} - -var ( - // Snapshot causes the transaction to enforce a snapshot isolation level. - Snapshot TransactionOption = isolation{pb.BeginTransactionRequest_SNAPSHOT} - // Serializable causes the transaction to enforce a serializable isolation level. - Serializable TransactionOption = isolation{pb.BeginTransactionRequest_SERIALIZABLE} -) - -// Transaction represents a set of datastore operations to be committed atomically. -// -// Operations are enqueued by calling the Put and Delete methods on Transaction -// (or their Multi-equivalents). These operations are only committed when the -// Commit method is invoked. To ensure consistency, reads must be performed by -// using Transaction's Get method or by using the Transaction method when -// building a query. -// -// A Transaction must be committed or rolled back exactly once. -type Transaction struct { - id []byte - client *Client - ctx context.Context - mutation *pb.Mutation // The mutations to apply. - pending []*PendingKey // Incomplete keys pending transaction completion. -} - -// NewTransaction starts a new transaction. -func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { - req, resp := &pb.BeginTransactionRequest{}, &pb.BeginTransactionResponse{} - for _, o := range opts { - o.apply(req) - } - if err := c.call(ctx, "beginTransaction", req, resp); err != nil { - return nil, err - } - - return &Transaction{ - id: resp.Transaction, - ctx: ctx, - client: c, - mutation: &pb.Mutation{}, - }, nil -} - -// Commit applies the enqueued operations atomically. -func (t *Transaction) Commit() (*Commit, error) { - if t.id == nil { - return nil, errExpiredTransaction - } - req := &pb.CommitRequest{ - Transaction: t.id, - Mutation: t.mutation, - Mode: pb.CommitRequest_TRANSACTIONAL.Enum(), - } - t.id = nil - resp := &pb.CommitResponse{} - if err := t.client.call(t.ctx, "commit", req, resp); err != nil { - if e, ok := err.(*transport.ErrHTTP); ok && e.StatusCode == http.StatusConflict { - // TODO(jbd): Make sure that we explicitly handle the case where response - // has an HTTP 409 and the error message indicates that it's an concurrent - // transaction error. - return nil, ErrConcurrentTransaction - } - return nil, err - } - - // Copy any newly minted keys into the returned keys. - if len(t.pending) != len(resp.MutationResult.InsertAutoIdKey) { - return nil, errors.New("datastore: internal error: server returned the wrong number of keys") - } - commit := &Commit{} - for i, p := range t.pending { - p.key = protoToKey(resp.MutationResult.InsertAutoIdKey[i]) - p.commit = commit - } - - return commit, nil -} - -// Rollback abandons a pending transaction. -func (t *Transaction) Rollback() error { - if t.id == nil { - return errExpiredTransaction - } - id := t.id - t.id = nil - return t.client.call(t.ctx, "rollback", &pb.RollbackRequest{Transaction: id}, &pb.RollbackResponse{}) -} - -// Get is the transaction-specific version of the package function Get. -// All reads performed during the transaction will come from a single consistent -// snapshot. Furthermore, if the transaction is set to a serializable isolation -// level, another transaction cannot concurrently modify the data that is read -// or modified by this transaction. -func (t *Transaction) Get(key *Key, dst interface{}) error { - err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, &pb.ReadOptions{Transaction: t.id}) - if me, ok := err.(MultiError); ok { - return me[0] - } - return err -} - -// GetMulti is a batch version of Get. -func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { - if t.id == nil { - return errExpiredTransaction - } - return t.client.get(t.ctx, keys, dst, &pb.ReadOptions{Transaction: t.id}) -} - -// Put is the transaction-specific version of the package function Put. -// -// Put returns a PendingKey which can be resolved into a Key using the -// return value from a successful Commit. If key is an incomplete key, the -// returned pending key will resolve to a unique key generated by the -// datastore. -func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { - h, err := t.PutMulti([]*Key{key}, []interface{}{src}) - if err != nil { - if me, ok := err.(MultiError); ok { - return nil, me[0] - } - return nil, err - } - return h[0], nil -} - -// PutMulti is a batch version of Put. One PendingKey is returned for each -// element of src in the same order. -func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { - if t.id == nil { - return nil, errExpiredTransaction - } - mutation, err := putMutation(keys, src) - if err != nil { - return nil, err - } - proto.Merge(t.mutation, mutation) - - // Prepare the returned handles, pre-populating where possible. - ret := make([]*PendingKey, len(keys)) - for i, key := range keys { - h := &PendingKey{} - if key.Incomplete() { - // This key will be in the final commit result. - t.pending = append(t.pending, h) - } else { - h.key = key - } - - ret[i] = h - } - - return ret, nil -} - -// Delete is the transaction-specific version of the package function Delete. -// Delete enqueues the deletion of the entity for the given key, to be -// committed atomically upon calling Commit. -func (t *Transaction) Delete(key *Key) error { - err := t.DeleteMulti([]*Key{key}) - if me, ok := err.(MultiError); ok { - return me[0] - } - return err -} - -// DeleteMulti is a batch version of Delete. -func (t *Transaction) DeleteMulti(keys []*Key) error { - if t.id == nil { - return errExpiredTransaction - } - mutation, err := deleteMutation(keys) - if err != nil { - return err - } - proto.Merge(t.mutation, mutation) - return nil -} - -// Commit represents the result of a committed transaction. -type Commit struct{} - -// Key resolves a pending key handle into a final key. -func (c *Commit) Key(p *PendingKey) *Key { - if c != p.commit { - panic("PendingKey was not created by corresponding transaction") - } - return p.key -} - -// PendingKey represents the key for newly-inserted entity. It can be -// resolved into a Key by calling the Key method of Commit. -type PendingKey struct { - key *Key - commit *Commit -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/concat_table/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/concat_table/main.go deleted file mode 100644 index 978801b043..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/concat_table/main.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// concat_table is an example client of the bigquery client library. -// It concatenates two BigQuery tables and writes the result to another table. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - "google.golang.org/cloud/bigquery" -) - -var ( - project = flag.String("project", "", "The ID of a Google Cloud Platform project") - dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") - src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate") - src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate") - dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to") - pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") -) - -func main() { - flag.Parse() - - flagsOk := true - for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} { - if flag.Lookup(f).Value.String() == "" { - fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) - flagsOk = false - } - } - if !flagsOk { - os.Exit(1) - } - if *src1 == *src2 || *src1 == *dest || *src2 == *dest { - log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest") - } - - httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) - if err != nil { - log.Fatalf("Creating http client: %v", err) - } - - client, err := bigquery.NewClient(httpClient, *project) - if err != nil { - log.Fatalf("Creating bigquery client: %v", err) - } - - s1 := &bigquery.Table{ - ProjectID: *project, - DatasetID: *dataset, - TableID: *src1, - } - - s2 := &bigquery.Table{ - ProjectID: *project, - DatasetID: *dataset, - TableID: *src2, - } - - d := &bigquery.Table{ - ProjectID: *project, - DatasetID: *dataset, - TableID: *dest, - } - - // Concatenate data. - job, err := client.Copy(context.Background(), d, bigquery.Tables{s1, s2}, bigquery.WriteTruncate) - - if err != nil { - log.Fatalf("Concatenating: %v", err) - } - - fmt.Printf("Job for concatenation operation: %+v\n", job) - fmt.Printf("Waiting for job to complete.\n") - - for range time.Tick(*pollint) { - status, err := job.Status(context.Background()) - if err != nil { - fmt.Printf("Failure determining status: %v", err) - break - } - if !status.Done() { - continue - } - if err := status.Err(); err == nil { - fmt.Printf("Success\n") - } else { - fmt.Printf("Failure: %+v\n", err) - } - break - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/load/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/load/main.go deleted file mode 100644 index 30ed9dbba0..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/load/main.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// load is an example client of the bigquery client library. -// It loads a file from Google Cloud Storage into a BigQuery table. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - "google.golang.org/cloud/bigquery" -) - -var ( - project = flag.String("project", "", "The ID of a Google Cloud Platform project") - dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") - table = flag.String("table", "", "The ID of a BigQuery table to load data into") - bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from") - object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket") - skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading") - pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") -) - -func main() { - flag.Parse() - - flagsOk := true - for _, f := range []string{"project", "dataset", "table", "bucket", "object"} { - if flag.Lookup(f).Value.String() == "" { - fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) - flagsOk = false - } - } - if !flagsOk { - os.Exit(1) - } - - httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) - if err != nil { - log.Fatalf("Creating http client: %v", err) - } - - client, err := bigquery.NewClient(httpClient, *project) - if err != nil { - log.Fatalf("Creating bigquery client: %v", err) - } - - table := &bigquery.Table{ - ProjectID: *project, - DatasetID: *dataset, - TableID: *table, - } - - gcs := client.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object)) - gcs.SkipLeadingRows = *skiprows - - // Load data from Google Cloud Storage into a BigQuery table. - job, err := client.Copy( - context.Background(), table, gcs, - bigquery.MaxBadRecords(1), - bigquery.AllowQuotedNewlines(), - bigquery.WriteTruncate) - - if err != nil { - log.Fatalf("Loading data: %v", err) - } - - fmt.Printf("Job for data load operation: %+v\n", job) - fmt.Printf("Waiting for job to complete.\n") - - for range time.Tick(*pollint) { - status, err := job.Status(context.Background()) - if err != nil { - fmt.Printf("Failure determining status: %v", err) - break - } - if !status.Done() { - continue - } - if err := status.Err(); err == nil { - fmt.Printf("Success\n") - } else { - fmt.Printf("Failure: %+v\n", err) - } - break - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/query/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/query/main.go deleted file mode 100644 index d6dc0b6a1b..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/query/main.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// query is an example client of the bigquery client library. -// It submits a query and writes the result to a table. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - "google.golang.org/cloud/bigquery" -) - -var ( - project = flag.String("project", "", "The ID of a Google Cloud Platform project") - dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") - q = flag.String("q", "", "The query string") - dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.") - pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") - wait = flag.Bool("wait", false, "Whether to wait for the query job to complete.") -) - -func main() { - flag.Parse() - - flagsOk := true - for _, f := range []string{"project", "dataset", "q"} { - if flag.Lookup(f).Value.String() == "" { - fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) - flagsOk = false - } - } - if !flagsOk { - os.Exit(1) - } - - httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) - if err != nil { - log.Fatalf("Creating http client: %v", err) - } - - client, err := bigquery.NewClient(httpClient, *project) - if err != nil { - log.Fatalf("Creating bigquery client: %v", err) - } - - d := &bigquery.Table{} - - if *dest != "" { - d.ProjectID = *project - d.DatasetID = *dataset - d.TableID = *dest - } - - query := &bigquery.Query{ - Q: *q, - DefaultProjectID: *project, - DefaultDatasetID: *dataset, - } - - // Query data. - job, err := client.Copy(context.Background(), d, query, bigquery.WriteTruncate) - - if err != nil { - log.Fatalf("Querying: %v", err) - } - - fmt.Printf("Submitted query. Job ID: %s\n", job.ID()) - if !*wait { - return - } - - fmt.Printf("Waiting for job to complete.\n") - - for range time.Tick(*pollint) { - status, err := job.Status(context.Background()) - if err != nil { - fmt.Printf("Failure determining status: %v", err) - break - } - if !status.Done() { - continue - } - if err := status.Err(); err == nil { - fmt.Printf("Success\n") - } else { - fmt.Printf("Failure: %+v\n", err) - } - break - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/read/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/read/main.go deleted file mode 100644 index 181bd7c780..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigquery/read/main.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// read is an example client of the bigquery client library. -// It reads from a table, returning the data via an Iterator. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "regexp" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - "google.golang.org/cloud/bigquery" -) - -var ( - project = flag.String("project", "", "The ID of a Google Cloud Platform project") - dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") - table = flag.String("table", ".*", "A regular expression to match the IDs of tables to read.") - jobID = flag.String("jobid", "", "The ID of a query job that has already been submitted."+ - " If set, --dataset, --table will be ignored, and results will be read from the specified job.") -) - -func printValues(it *bigquery.Iterator) { - // one-space padding. - tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) - - for it.Next(context.Background()) { - var vals bigquery.ValueList - if err := it.Get(&vals); err != nil { - fmt.Printf("err calling get: %v\n", err) - } else { - sep := "" - for _, v := range vals { - fmt.Fprintf(tw, "%s%v", sep, v) - sep = "\t" - } - fmt.Fprintf(tw, "\n") - } - } - tw.Flush() - - fmt.Printf("\n") - if err := it.Err(); err != nil { - fmt.Printf("err reading: %v\n", err) - } -} - -func printTable(client *bigquery.Client, t *bigquery.Table) { - it, err := client.Read(context.Background(), t) - if err != nil { - log.Fatalf("Reading: %v", err) - } - - id := t.FullyQualifiedName() - fmt.Printf("%s\n%s\n", id, strings.Repeat("-", len(id))) - printValues(it) -} - -func printQueryResults(client *bigquery.Client, queryJobID string) { - job, err := client.JobFromID(context.Background(), queryJobID) - if err != nil { - log.Fatalf("Loading job: %v", err) - } - - it, err := client.Read(context.Background(), job) - if err != nil { - log.Fatalf("Reading: %v", err) - } - - // TODO: print schema. - printValues(it) -} - -func main() { - flag.Parse() - - flagsOk := true - if flag.Lookup("project").Value.String() == "" { - fmt.Fprintf(os.Stderr, "Flag --project is required\n") - flagsOk = false - } - - var sourceFlagCount int - if flag.Lookup("dataset").Value.String() != "" { - sourceFlagCount++ - } - if flag.Lookup("jobid").Value.String() != "" { - sourceFlagCount++ - } - if sourceFlagCount != 1 { - fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n") - flagsOk = false - } - - if !flagsOk { - os.Exit(1) - } - - tableRE, err := regexp.Compile(*table) - if err != nil { - fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table) - os.Exit(1) - } - - httpClient, err := google.DefaultClient(context.Background(), bigquery.Scope) - if err != nil { - log.Fatalf("Creating http client: %v", err) - } - - client, err := bigquery.NewClient(httpClient, *project) - if err != nil { - log.Fatalf("Creating bigquery client: %v", err) - } - - if *jobID != "" { - printQueryResults(client, *jobID) - return - } - ds := client.Dataset(*dataset) - var tables []*bigquery.Table - tables, err = ds.ListTables(context.Background()) - if err != nil { - log.Fatalf("Listing tables: %v", err) - } - for _, t := range tables { - if tableRE.MatchString(t.TableID) { - printTable(client, t) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/README.md b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/README.md deleted file mode 100644 index 3f3e92155e..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Cloud Bigtable on Managed VMs using Go -# (Hello World for Cloud Bigtable) - -This app counts how often each user visits. - -## Prerequisites - -1. Set up Cloud Console. - 1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project. - You will need the project ID later. - 1. Go to **Settings > Project Billing Settings** and enable billing. - 1. Select **APIs & Auth > APIs**. - 1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**. - (You may need to search for the API). -1. Set up gcloud. - 1. `gcloud components update` - 1. `gcloud auth login` - 1. `gcloud config set project PROJECT_ID` -1. Download App Engine SDK for Go. - 1. `go get -u google.golang.org/appengine/...` -1. In helloworld.go, change the constants `project`, `zone` and `cluster` - -## Running locally - -1. From the sample project folder, `gcloud preview app run app.yaml` - -## Deploying on Google App Engine Managed VM - -1. Install and start [Docker](https://cloud.google.com/appengine/docs/managed-vms/getting-started#install_docker). -1. From the sample project folder, `aedeploy gcloud preview app deploy app.yaml` diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/app.yaml b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/app.yaml deleted file mode 100644 index 4f5fef0e5a..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/app.yaml +++ /dev/null @@ -1,11 +0,0 @@ -runtime: go -api_version: go1 -vm: true - -manual_scaling: - instances: 1 - -handlers: -# Serve only the web root. -- url: / - script: _go_app diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/helloworld.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/helloworld.go deleted file mode 100644 index 8f1bd702e9..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/bigtable-hello/helloworld.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -/* -helloworld tracks how often a user has visited the index page. - -This program demonstrates usage of the Cloud Bigtable API for Managed VMs and Go. -Instructions for running this program are in the README.md. -*/ -package main - -import ( - "bytes" - "encoding/binary" - "html/template" - "log" - "net/http" - - "golang.org/x/net/context" - "google.golang.org/appengine" - aelog "google.golang.org/appengine/log" - "google.golang.org/appengine/user" - "google.golang.org/cloud/bigtable" -) - -// User-provided constants. -const ( - project = "PROJECT_ID" - zone = "CLUSTER_ZONE" - cluster = "CLUSTER_NAME" -) - -var ( - tableName = "bigtable-hello" - familyName = "emails" - - // Client is initialized by main. - client *bigtable.Client -) - -func main() { - ctx := context.Background() - - // Set up admin client, tables, and column families. - // NewAdminClient uses Application Default Credentials to authenticate. - adminClient, err := bigtable.NewAdminClient(ctx, project, zone, cluster) - if err != nil { - log.Fatalf("Unable to create a table admin client. %v", err) - } - tables, err := adminClient.Tables(ctx) - if err != nil { - log.Fatalf("Unable to fetch table list. %v", err) - } - if !sliceContains(tables, tableName) { - if err := adminClient.CreateTable(ctx, tableName); err != nil { - log.Fatalf("Unable to create table: %v. %v", tableName, err) - } - } - tblInfo, err := adminClient.TableInfo(ctx, tableName) - if err != nil { - log.Fatalf("Unable to read info for table: %v. %v", tableName, err) - } - if !sliceContains(tblInfo.Families, familyName) { - if err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil { - log.Fatalf("Unable to create column family: %v. %v", familyName, err) - } - } - adminClient.Close() - - // Set up Bigtable data operations client. - // NewClient uses Application Default Credentials to authenticate. - client, err = bigtable.NewClient(ctx, project, zone, cluster) - if err != nil { - log.Fatalf("Unable to create data operations client. %v", err) - } - - http.Handle("/", appHandler(mainHandler)) - appengine.Main() // Never returns. -} - -// mainHandler tracks how many times each user has visited this page. -func mainHandler(w http.ResponseWriter, r *http.Request) *appError { - if r.URL.Path != "/" { - http.NotFound(w, r) - return nil - } - - ctx := appengine.NewContext(r) - u := user.Current(ctx) - if u == nil { - login, err := user.LoginURL(ctx, r.URL.String()) - if err != nil { - return &appError{err, "Error finding login URL", http.StatusInternalServerError} - } - http.Redirect(w, r, login, http.StatusFound) - return nil - } - logoutURL, err := user.LogoutURL(ctx, "/") - if err != nil { - return &appError{err, "Error finding logout URL", http.StatusInternalServerError} - } - - // Display hello page. - tbl := client.Open(tableName) - rmw := bigtable.NewReadModifyWrite() - rmw.Increment(familyName, u.Email, 1) - row, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw) - if err != nil { - return &appError{err, "Error applying ReadModifyWrite to row: " + u.Email, http.StatusInternalServerError} - } - data := struct { - Username, Logout string - Visits uint64 - }{ - Username: u.Email, - // Retrieve the most recently edited column. - Visits: binary.BigEndian.Uint64(row[familyName][0].Value), - Logout: logoutURL, - } - var buf bytes.Buffer - if err := tmpl.Execute(&buf, data); err != nil { - return &appError{err, "Error writing template", http.StatusInternalServerError} - } - buf.WriteTo(w) - return nil -} - -var tmpl = template.Must(template.New("").Parse(` - - -

-{{with .Username}} Hello {{.}}{{end}} -{{with .Logout}}Sign out{{end}} - -

- -

-You have visited {{.Visits}} -

- -`)) - -// sliceContains reports whether the provided string is present in the given slice of strings. -func sliceContains(list []string, target string) bool { - for _, s := range list { - if s == target { - return true - } - } - return false -} - -// More info about this method of error handling can be found at: http://blog.golang.org/error-handling-and-go -type appHandler func(http.ResponseWriter, *http.Request) *appError - -type appError struct { - Error error - Message string - Code int -} - -func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if e := fn(w, r); e != nil { - ctx := appengine.NewContext(r) - aelog.Errorf(ctx, "%v", e.Error) - http.Error(w, e.Message, e.Code) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/search/search.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/search/search.go deleted file mode 100644 index c9b1339d58..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/bigtable/search/search.go +++ /dev/null @@ -1,439 +0,0 @@ -/* -Copyright 2015 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This is a sample web server that uses Cloud Bigtable as the storage layer -// for a simple document-storage and full-text-search service. -// It has three functions: -// - Add a document. This adds the content of a user-supplied document to the -// Bigtable, and adds references to the document to an index in the Bigtable. -// The document is indexed under each unique word in the document. -// - Search the index. This returns documents containing each word in a user -// query, with snippets and links to view the whole document. -// - Clear the table. This deletes and recreates the Bigtable, -package main - -import ( - "bytes" - "flag" - "fmt" - "html/template" - "io" - "log" - "net/http" - "os" - "strings" - "sync" - "time" - "unicode" - - "golang.org/x/net/context" - "google.golang.org/cloud/bigtable" -) - -var ( - project = flag.String("project", "", "The name of the project.") - zone = flag.String("zone", "", "The zone of the project.") - cluster = flag.String("cluster", "", "The name of the Cloud Bigtable cluster.") - tableName = flag.String("table", "docindex", "The name of the table containing the documents and index.") - credFile = flag.String("creds", "", "File containing credentials") - rebuild = flag.Bool("rebuild", false, "Rebuild the table from scratch on startup.") - - client *bigtable.Client - adminClient *bigtable.AdminClient - table *bigtable.Table - - addTemplate = template.Must(template.New("").Parse(` -Added {{.Title}} -`)) - - contentTemplate = template.Must(template.New("").Parse(` -{{.Title}}

-{{.Content}} -`)) - - searchTemplate = template.Must(template.New("").Parse(` -Results for {{.Query}}:

-{{range .Results}} -{{.Title}}
-{{.Snippet}}

-{{end}} -`)) -) - -const ( - // prototypeTableName is an existing table containing some documents. - // Rebuilding a table will populate it with the data from this table. - prototypeTableName = "shakespearetemplate" - indexColumnFamily = "i" - contentColumnFamily = "c" - mainPage = ` - - - Document Search - - - Search for documents: -
-
-
- - - Add a document: -
- Document name: -
- Document text: -
-
- - - Rebuild table: -
-
- - - - ` -) - -func main() { - flag.Parse() - - if *tableName == prototypeTableName { - log.Fatal("Can't use " + prototypeTableName + " as your table.") - } - - // Let the library get credentials from file. - os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", *credFile) - - // Make an admin client. - var err error - if adminClient, err = bigtable.NewAdminClient(context.Background(), *project, *zone, *cluster); err != nil { - log.Fatal("Bigtable NewAdminClient:", err) - } - - // Make a regular client. - client, err = bigtable.NewClient(context.Background(), *project, *zone, *cluster) - if err != nil { - log.Fatal("Bigtable NewClient:", err) - } - - // Open the table. - table = client.Open(*tableName) - - // Rebuild the table if the command-line flag is set. - if *rebuild { - if err := rebuildTable(); err != nil { - log.Fatal(err) - } - } - - // Set up HTML handlers, and start the web server. - http.HandleFunc("/search", handleSearch) - http.HandleFunc("/content", handleContent) - http.HandleFunc("/add", handleAddDoc) - http.HandleFunc("/clearindex", handleClear) - http.HandleFunc("/", handleMain) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -// handleMain outputs the home page, containing a search box, an "add document" box, and "clear table" button. -func handleMain(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, mainPage) -} - -// tokenize splits a string into tokens. -// This is very simple, it's not a good tokenization function. -func tokenize(s string) []string { - wordMap := make(map[string]bool) - f := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) }) - for _, word := range f { - word = strings.ToLower(word) - wordMap[word] = true - } - words := make([]string, 0, len(wordMap)) - for word := range wordMap { - words = append(words, word) - } - return words -} - -// handleContent fetches the content of a document from the Bigtable and returns it. -func handleContent(w http.ResponseWriter, r *http.Request) { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) - name := r.FormValue("name") - if len(name) == 0 { - http.Error(w, "No document name supplied.", http.StatusBadRequest) - return - } - - row, err := table.ReadRow(ctx, name) - if err != nil { - http.Error(w, "Error reading content: "+err.Error(), http.StatusInternalServerError) - return - } - content := row[contentColumnFamily] - if len(content) == 0 { - http.Error(w, "Document not found.", http.StatusNotFound) - return - } - var buf bytes.Buffer - if err := contentTemplate.ExecuteTemplate(&buf, "", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil { - http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) - return - } - io.Copy(w, &buf) -} - -// handleSearch responds to search queries, returning links and snippets for matching documents. -func handleSearch(w http.ResponseWriter, r *http.Request) { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) - query := r.FormValue("q") - // Split the query into words. - words := tokenize(query) - if len(words) == 0 { - http.Error(w, "Empty query.", http.StatusBadRequest) - return - } - - // readRows reads from many rows concurrently. - readRows := func(rows []string) ([]bigtable.Row, error) { - results := make([]bigtable.Row, len(rows)) - errors := make([]error, len(rows)) - var wg sync.WaitGroup - for i, row := range rows { - wg.Add(1) - go func(i int, row string) { - defer wg.Done() - results[i], errors[i] = table.ReadRow(ctx, row) - }(i, row) - } - wg.Wait() - for _, err := range errors { - if err != nil { - return nil, err - } - } - return results, nil - } - - // For each query word, get the list of documents containing it. - results, err := readRows(words) - if err != nil { - http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError) - return - } - - // Count how many of the query words each result contained. - hits := make(map[string]int) - for _, r := range results { - for _, r := range r[indexColumnFamily] { - hits[r.Column]++ - } - } - - // Build a slice of all the documents that matched every query word. - var matches []string - for doc, count := range hits { - if count == len(words) { - matches = append(matches, doc[len(indexColumnFamily+":"):]) - } - } - - // Fetch the content of those documents from the Bigtable. - content, err := readRows(matches) - if err != nil { - http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError) - return - } - - type result struct{ Title, Snippet string } - data := struct { - Query string - Results []result - }{query, nil} - - // Output links and snippets. - for i, doc := range matches { - var text string - c := content[i][contentColumnFamily] - if len(c) > 0 { - text = string(c[0].Value) - } - if len(text) > 100 { - text = text[:100] + "..." - } - data.Results = append(data.Results, result{doc, text}) - } - var buf bytes.Buffer - if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil { - http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) - return - } - io.Copy(w, &buf) -} - -// handleAddDoc adds a document to the index. -func handleAddDoc(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - http.Error(w, "POST requests only", http.StatusMethodNotAllowed) - return - } - - ctx, _ := context.WithTimeout(context.Background(), time.Minute) - - name := r.FormValue("name") - if len(name) == 0 { - http.Error(w, "Empty document name!", http.StatusBadRequest) - return - } - - content := r.FormValue("content") - if len(content) == 0 { - http.Error(w, "Empty document content!", http.StatusBadRequest) - return - } - - var ( - writeErr error // Set if any write fails. - mu sync.Mutex // Protects writeErr - wg sync.WaitGroup // Used to wait for all writes to finish. - ) - - // writeOneColumn writes one column in one row, updates err if there is an error, - // and signals wg that one operation has finished. - writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) { - mut := bigtable.NewMutation() - mut.Set(family, column, ts, []byte(value)) - err := table.Apply(ctx, row, mut) - if err != nil { - mu.Lock() - writeErr = err - mu.Unlock() - } - } - - // Start a write to store the document content. - wg.Add(1) - go func() { - writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now()) - wg.Done() - }() - - // Start writes to store the document name in the index for each word in the document. - words := tokenize(content) - for _, word := range words { - var ( - row = word - family = indexColumnFamily - column = name - value = "" - ts = bigtable.Now() - ) - wg.Add(1) - go func() { - // TODO: should use a semaphore to limit the number of concurrent writes. - writeOneColumn(row, family, column, value, ts) - wg.Done() - }() - } - wg.Wait() - if writeErr != nil { - http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError) - return - } - var buf bytes.Buffer - if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil { - http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) - return - } - io.Copy(w, &buf) -} - -// rebuildTable deletes the table if it exists, then creates the table, with the index column family. -func rebuildTable() error { - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) - adminClient.DeleteTable(ctx, *tableName) - if err := adminClient.CreateTable(ctx, *tableName); err != nil { - return fmt.Errorf("CreateTable: %v", err) - } - time.Sleep(20 * time.Second) - if err := adminClient.CreateColumnFamily(ctx, *tableName, indexColumnFamily); err != nil { - return fmt.Errorf("CreateColumnFamily: %v", err) - } - if err := adminClient.CreateColumnFamily(ctx, *tableName, contentColumnFamily); err != nil { - return fmt.Errorf("CreateColumnFamily: %v", err) - } - - // Open the prototype table. It contains a number of documents to get started with. - prototypeTable := client.Open(prototypeTableName) - - var ( - writeErr error // Set if any write fails. - mu sync.Mutex // Protects writeErr - wg sync.WaitGroup // Used to wait for all writes to finish. - ) - copyRowToTable := func(row bigtable.Row) bool { - mu.Lock() - failed := writeErr != nil - mu.Unlock() - if failed { - return false - } - mut := bigtable.NewMutation() - for family, items := range row { - for _, item := range items { - // Get the column name, excluding the column family name and ':' character. - columnWithoutFamily := item.Column[len(family)+1:] - mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) - } - } - wg.Add(1) - go func() { - // TODO: should use a semaphore to limit the number of concurrent writes. - if err := table.Apply(ctx, row.Key(), mut); err != nil { - mu.Lock() - writeErr = err - mu.Unlock() - } - wg.Done() - }() - return true - } - - // Create a filter that only accepts the column families we're interested in. - filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) - // Read every row from prototypeTable, and call copyRowToTable to copy it to our table. - err := prototypeTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) - wg.Wait() - if err != nil { - return err - } - return writeErr -} - -// handleClear calls rebuildTable -func handleClear(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - http.Error(w, "POST requests only", http.StatusMethodNotAllowed) - return - } - if err := rebuildTable(); err != nil { - http.Error(w, "Failed to rebuild index: "+err.Error(), http.StatusInternalServerError) - return - } - fmt.Fprint(w, "Rebuilt index.\n") -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/pubsub/cmdline/main.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/pubsub/cmdline/main.go deleted file mode 100644 index c51614d1ce..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/pubsub/cmdline/main.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package main contains a simple command line tool for Cloud Pub/Sub -// Cloud Pub/Sub docs: https://cloud.google.com/pubsub/docs -package main - -import ( - "errors" - "flag" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "strconv" - "time" - - "golang.org/x/net/context" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/cloud" - "google.golang.org/cloud/compute/metadata" - "google.golang.org/cloud/pubsub" -) - -var ( - jsonFile = flag.String("j", "", "A path to your JSON key file for your service account downloaded from Google Developer Console, not needed if you run it on Compute Engine instances.") - projID = flag.String("p", "", "The ID of your Google Cloud project.") - reportMPS = flag.Bool("report", false, "Reports the incoming/outgoing message rate in msg/sec if set.") - size = flag.Int("size", 10, "Batch size for pull_messages and publish_messages subcommands.") -) - -const ( - usage = `Available arguments are: - create_topic - delete_topic - create_subscription - delete_subscription - publish - pull_messages - publish_messages -` - tick = 1 * time.Second -) - -func usageAndExit(msg string) { - fmt.Fprintln(os.Stderr, msg) - fmt.Println("Flags:") - flag.PrintDefaults() - fmt.Fprint(os.Stderr, usage) - os.Exit(2) -} - -// Check the length of the arguments. -func checkArgs(argv []string, min int) { - if len(argv) < min { - usageAndExit("Missing arguments") - } -} - -// newClient creates http.Client with a jwt service account when -// jsonFile flag is specified, otherwise by obtaining the GCE service -// account's access token. -func newClient(jsonFile string) (*http.Client, error) { - if jsonFile != "" { - jsonKey, err := ioutil.ReadFile(jsonFile) - if err != nil { - return nil, err - } - conf, err := google.JWTConfigFromJSON(jsonKey, pubsub.ScopePubSub) - if err != nil { - return nil, err - } - return conf.Client(oauth2.NoContext), nil - } - if metadata.OnGCE() { - c := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.ComputeTokenSource(""), - }, - } - if *projID == "" { - projectID, err := metadata.ProjectID() - if err != nil { - return nil, fmt.Errorf("ProjectID failed, %v", err) - } - *projID = projectID - } - return c, nil - } - return nil, errors.New("Could not create an authenticated client.") -} - -func listTopics(ctx context.Context, argv []string) { - panic("listTopics not implemented yet") -} - -func createTopic(ctx context.Context, argv []string) { - checkArgs(argv, 2) - topic := argv[1] - err := pubsub.CreateTopic(ctx, topic) - if err != nil { - log.Fatalf("CreateTopic failed, %v", err) - } - fmt.Printf("Topic %s was created.\n", topic) -} - -func deleteTopic(ctx context.Context, argv []string) { - checkArgs(argv, 2) - topic := argv[1] - err := pubsub.DeleteTopic(ctx, topic) - if err != nil { - log.Fatalf("DeleteTopic failed, %v", err) - } - fmt.Printf("Topic %s was deleted.\n", topic) -} - -func listSubscriptions(ctx context.Context, argv []string) { - panic("listSubscriptions not implemented yet") -} - -func createSubscription(ctx context.Context, argv []string) { - checkArgs(argv, 3) - sub := argv[1] - topic := argv[2] - err := pubsub.CreateSub(ctx, sub, topic, 60*time.Second, "") - if err != nil { - log.Fatalf("CreateSub failed, %v", err) - } - fmt.Printf("Subscription %s was created.\n", sub) -} - -func deleteSubscription(ctx context.Context, argv []string) { - checkArgs(argv, 2) - sub := argv[1] - err := pubsub.DeleteSub(ctx, sub) - if err != nil { - log.Fatalf("DeleteSub failed, %v", err) - } - fmt.Printf("Subscription %s was deleted.\n", sub) -} - -func publish(ctx context.Context, argv []string) { - checkArgs(argv, 3) - topic := argv[1] - message := argv[2] - msgIDs, err := pubsub.Publish(ctx, topic, &pubsub.Message{ - Data: []byte(message), - }) - if err != nil { - log.Fatalf("Publish failed, %v", err) - } - fmt.Printf("Message '%s' published to a topic %s and the message id is %s\n", message, topic, msgIDs[0]) -} - -type reporter struct { - reportTitle string - lastC uint64 - c uint64 - result <-chan int -} - -func (r *reporter) report() { - ticker := time.NewTicker(tick) - defer func() { - ticker.Stop() - }() - for { - select { - case <-ticker.C: - n := r.c - r.lastC - r.lastC = r.c - mps := n / uint64(tick/time.Second) - log.Printf("%s ~%d msgs/s, total: %d", r.reportTitle, mps, r.c) - case n := <-r.result: - r.c += uint64(n) - } - } -} - -func ack(ctx context.Context, sub string, ackID ...string) { - err := pubsub.Ack(ctx, sub, ackID...) - if err != nil { - log.Printf("Ack failed, %v\n", err) - } -} - -func pullLoop(ctx context.Context, sub string, result chan<- int) { - for { - msgs, err := pubsub.PullWait(ctx, sub, *size) - if err != nil { - log.Printf("PullWait failed, %v\n", err) - time.Sleep(5 * time.Second) - continue - } - if len(msgs) == 0 { - log.Println("Received no messages") - continue - } - if *reportMPS { - result <- len(msgs) - } - ackIDs := make([]string, len(msgs)) - for i, msg := range msgs { - if !*reportMPS { - fmt.Printf("Got a message: %s\n", msg.Data) - } - ackIDs[i] = msg.AckID - } - go ack(ctx, sub, ackIDs...) - } -} - -func pullMessages(ctx context.Context, argv []string) { - checkArgs(argv, 3) - sub := argv[1] - workers, err := strconv.Atoi(argv[2]) - if err != nil { - log.Fatalf("Atoi failed, %v", err) - } - result := make(chan int, 1024) - for i := 0; i < int(workers); i++ { - go pullLoop(ctx, sub, result) - } - if *reportMPS { - r := reporter{reportTitle: "Received", result: result} - r.report() - } else { - select {} - } -} - -func publishLoop(ctx context.Context, topic string, workerid int, result chan<- int) { - var r uint64 - for { - msgs := make([]*pubsub.Message, *size) - for i := 0; i < *size; i++ { - msgs[i] = &pubsub.Message{ - Data: []byte(fmt.Sprintf("Worker: %d, Round: %d, Message: %d", workerid, r, i)), - } - } - _, err := pubsub.Publish(ctx, topic, msgs...) - if err != nil { - log.Printf("Publish failed, %v\n", err) - return - } - r++ - if *reportMPS { - result <- *size - } - } -} - -func publishMessages(ctx context.Context, argv []string) { - checkArgs(argv, 3) - topic := argv[1] - workers, err := strconv.Atoi(argv[2]) - if err != nil { - log.Fatalf("Atoi failed, %v", err) - } - result := make(chan int, 1024) - for i := 0; i < int(workers); i++ { - go publishLoop(ctx, topic, i, result) - } - if *reportMPS { - r := reporter{reportTitle: "Sent", result: result} - r.report() - } else { - select {} - } -} - -// This example demonstrates calling the Cloud Pub/Sub API. As of 22 -// Oct 2014, the Cloud Pub/Sub API is only available if you're -// whitelisted. If you're interested in using it, please apply for the -// Limited Preview program at the following form: -// http://goo.gl/Wql9HL -// -// Also, before running this example, be sure to enable Cloud Pub/Sub -// service on your project in Developer Console at: -// https://console.developers.google.com/ -// -// Unless you run this sample on Compute Engine instance, please -// create a new service account and download a JSON key file for it at -// the developer console: https://console.developers.google.com/ -// -// It has the following subcommands: -// -// create_topic -// delete_topic -// create_subscription -// delete_subscription -// publish -// pull_messages -// publish_messages -// -// You can choose any names for topic and subscription as long as they -// follow the naming rule described at: -// https://cloud.google.com/pubsub/overview#names -// -// You can create/delete topics/subscriptions by self-explanatory -// subcommands. -// -// The "publish" subcommand is for publishing a single message to a -// specified Cloud Pub/Sub topic. -// -// The "pull_messages" subcommand is for continuously pulling messages -// from a specified Cloud Pub/Sub subscription with specified number -// of workers. -// -// The "publish_messages" subcommand is for continuously publishing -// messages to a specified Cloud Pub/Sub topic with specified number -// of workers. -func main() { - flag.Parse() - argv := flag.Args() - checkArgs(argv, 1) - client, err := newClient(*jsonFile) - if err != nil { - log.Fatalf("clientAndId failed, %v", err) - } - if *projID == "" { - usageAndExit("Please specify Project ID.") - } - ctx := cloud.NewContext(*projID, client) - m := map[string]func(ctx context.Context, argv []string){ - "create_topic": createTopic, - "delete_topic": deleteTopic, - "create_subscription": createSubscription, - "delete_subscription": deleteSubscription, - "publish": publish, - "pull_messages": pullMessages, - "publish_messages": publishMessages, - } - subcommand := argv[0] - f, ok := m[subcommand] - if !ok { - usageAndExit(fmt.Sprintf("Function not found for %s", subcommand)) - } - f(ctx, argv) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.go deleted file mode 100644 index 0889f12562..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gcsdemo is an example App Engine app using the Google Cloud Storage API. -package gcsdemo - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/appengine" - "google.golang.org/appengine/file" - "google.golang.org/appengine/log" - "google.golang.org/appengine/urlfetch" - "google.golang.org/cloud" - "google.golang.org/cloud/storage" -) - -// bucket is a local cache of the app's default bucket name. -var bucket string // or: var bucket = ".appspot.com" - -func init() { - http.HandleFunc("/", handler) -} - -// demo struct holds information needed to run the various demo functions. -type demo struct { - c context.Context - w http.ResponseWriter - ctx context.Context - // cleanUp is a list of filenames that need cleaning up at the end of the demo. - cleanUp []string - // failed indicates that one or more of the demo steps failed. - failed bool -} - -func (d *demo) errorf(format string, args ...interface{}) { - d.failed = true - log.Errorf(d.c, format, args...) -} - -// handler is the main demo entry point that calls the GCS operations. -func handler(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - c := appengine.NewContext(r) - if bucket == "" { - var err error - if bucket, err = file.DefaultBucketName(c); err != nil { - log.Errorf(c, "failed to get default GCS bucket name: %v", err) - return - } - } - hc := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(c, storage.ScopeFullControl), - // Note that the App Engine urlfetch service has a limit of 10MB uploads and - // 32MB downloads. - // See https://cloud.google.com/appengine/docs/go/urlfetch/#Go_Quotas_and_limits - // for more information. - Base: &urlfetch.Transport{Context: c}, - }, - } - ctx := cloud.NewContext(appengine.AppID(c), hc) - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(c)) - fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) - - d := &demo{ - c: c, - w: w, - ctx: ctx, - } - - n := "demo-testfile-go" - d.createFile(n) - d.readFile(n) - d.copyFile(n) - d.statFile(n) - d.createListFiles() - d.listBucket() - d.listBucketDirMode() - d.defaultACL() - d.putDefaultACLRule() - d.deleteDefaultACLRule() - d.bucketACL() - d.putBucketACLRule() - d.deleteBucketACLRule() - d.acl(n) - d.putACLRule(n) - d.deleteACLRule(n) - d.deleteFiles() - - if d.failed { - io.WriteString(w, "\nDemo failed.\n") - } else { - io.WriteString(w, "\nDemo succeeded.\n") - } -} - -// createFile creates a file in Google Cloud Storage. -func (d *demo) createFile(fileName string) { - fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) - - wc := storage.NewWriter(d.ctx, bucket, fileName) - wc.ContentType = "text/plain" - wc.Metadata = map[string]string{ - "x-goog-meta-foo": "foo", - "x-goog-meta-bar": "bar", - } - d.cleanUp = append(d.cleanUp, fileName) - - if _, err := wc.Write([]byte("abcde\n")); err != nil { - d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) - return - } - if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { - d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) - return - } - if err := wc.Close(); err != nil { - d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) - return - } -} - -// readFile reads the named file in Google Cloud Storage. -func (d *demo) readFile(fileName string) { - io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") - - rc, err := storage.NewReader(d.ctx, bucket, fileName) - if err != nil { - d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) - return - } - defer rc.Close() - slurp, err := ioutil.ReadAll(rc) - if err != nil { - d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) - return - } - - fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) - if len(slurp) > 1024 { - fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) - } else { - fmt.Fprintf(d.w, "%s\n", slurp) - } -} - -// copyFile copies a file in Google Cloud Storage. -func (d *demo) copyFile(fileName string) { - copyName := fileName + "-copy" - fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) - - obj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil) - if err != nil { - d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) - return - } - d.cleanUp = append(d.cleanUp, copyName) - - d.dumpStats(obj) -} - -func (d *demo) dumpStats(obj *storage.Object) { - fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) - fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) - fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) - fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) - fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) - fmt.Fprintf(d.w, "Size: %v, ", obj.Size) - fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) - fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) - fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) - fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) - fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) - if !obj.Deleted.IsZero() { - fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) - } - fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) -} - -// statFile reads the stats of the named file in Google Cloud Storage. -func (d *demo) statFile(fileName string) { - io.WriteString(d.w, "\nFile stat:\n") - - obj, err := storage.StatObject(d.ctx, bucket, fileName) - if err != nil { - d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) - return - } - - d.dumpStats(obj) -} - -// createListFiles creates files that will be used by listBucket. -func (d *demo) createListFiles() { - io.WriteString(d.w, "\nCreating more files for listbucket...\n") - for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { - d.createFile(n) - } -} - -// listBucket lists the contents of a bucket in Google Cloud Storage. -func (d *demo) listBucket() { - io.WriteString(d.w, "\nListbucket result:\n") - - query := &storage.Query{Prefix: "foo"} - for query != nil { - objs, err := storage.ListObjects(d.ctx, bucket, query) - if err != nil { - d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) - return - } - query = objs.Next - - for _, obj := range objs.Results { - d.dumpStats(obj) - } - } -} - -func (d *demo) listDir(name, indent string) { - query := &storage.Query{Prefix: name, Delimiter: "/"} - for query != nil { - objs, err := storage.ListObjects(d.ctx, bucket, query) - if err != nil { - d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) - return - } - query = objs.Next - - for _, obj := range objs.Results { - fmt.Fprint(d.w, indent) - d.dumpStats(obj) - } - for _, dir := range objs.Prefixes { - fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) - d.listDir(dir, indent+" ") - } - } -} - -// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. -func (d *demo) listBucketDirMode() { - io.WriteString(d.w, "\nListbucket directory mode result:\n") - d.listDir("b", "") -} - -// dumpDefaultACL prints out the default object ACL for this bucket. -func (d *demo) dumpDefaultACL() { - acl, err := storage.DefaultACL(d.ctx, bucket) - if err != nil { - d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) - return - } - for _, v := range acl { - fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) - } -} - -// defaultACL displays the default object ACL for this bucket. -func (d *demo) defaultACL() { - io.WriteString(d.w, "\nDefault object ACL:\n") - d.dumpDefaultACL() -} - -// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. -func (d *demo) putDefaultACLRule() { - io.WriteString(d.w, "\nPut Default object ACL Rule:\n") - err := storage.PutDefaultACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) - if err != nil { - d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpDefaultACL() -} - -// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. -func (d *demo) deleteDefaultACLRule() { - io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") - err := storage.DeleteDefaultACLRule(d.ctx, bucket, "allUsers") - if err != nil { - d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpDefaultACL() -} - -// dumpBucketACL prints out the bucket ACL. -func (d *demo) dumpBucketACL() { - acl, err := storage.BucketACL(d.ctx, bucket) - if err != nil { - d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) - return - } - for _, v := range acl { - fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) - } -} - -// bucketACL displays the bucket ACL for this bucket. -func (d *demo) bucketACL() { - io.WriteString(d.w, "\nBucket ACL:\n") - d.dumpBucketACL() -} - -// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. -func (d *demo) putBucketACLRule() { - io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") - err := storage.PutBucketACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) - if err != nil { - d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpBucketACL() -} - -// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. -func (d *demo) deleteBucketACLRule() { - io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") - err := storage.DeleteBucketACLRule(d.ctx, bucket, "allUsers") - if err != nil { - d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpBucketACL() -} - -// dumpACL prints out the ACL of the named file. -func (d *demo) dumpACL(fileName string) { - acl, err := storage.ACL(d.ctx, bucket, fileName) - if err != nil { - d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) - return - } - for _, v := range acl { - fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) - } -} - -// acl displays the ACL for the named file. -func (d *demo) acl(fileName string) { - fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) - d.dumpACL(fileName) -} - -// putACLRule adds the "allUsers" ACL rule for the named file. -func (d *demo) putACLRule(fileName string) { - fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) - err := storage.PutACLRule(d.ctx, bucket, fileName, "allUsers", storage.RoleReader) - if err != nil { - d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) - return - } - d.dumpACL(fileName) -} - -// deleteACLRule deleted the "allUsers" ACL rule for the named file. -func (d *demo) deleteACLRule(fileName string) { - fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) - err := storage.DeleteACLRule(d.ctx, bucket, fileName, "allUsers") - if err != nil { - d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) - return - } - d.dumpACL(fileName) -} - -// deleteFiles deletes all the temporary files from a bucket created by this demo. -func (d *demo) deleteFiles() { - io.WriteString(d.w, "\nDeleting files...\n") - for _, v := range d.cleanUp { - fmt.Fprintf(d.w, "Deleting file %v\n", v) - if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { - d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) - return - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.yaml b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.yaml deleted file mode 100644 index e98d030876..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appengine/app.yaml +++ /dev/null @@ -1,8 +0,0 @@ -application: -version: v1 -runtime: go -api_version: go1 - -handlers: -- url: /.* - script: _go_app diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.go b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.go deleted file mode 100644 index 47c7e927bb..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package main is an example Mananged VM app using the Google Cloud Storage API. -package main - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/appengine" - "google.golang.org/appengine/file" - "google.golang.org/appengine/log" - "google.golang.org/cloud" - "google.golang.org/cloud/storage" -) - -// bucket is a local cache of the app's default bucket name. -var bucket string // or: var bucket = ".appspot.com" - -func main() { - http.HandleFunc("/", handler) - appengine.Main() -} - -// demo struct holds information needed to run the various demo functions. -type demo struct { - c context.Context - w http.ResponseWriter - ctx context.Context - // cleanUp is a list of filenames that need cleaning up at the end of the demo. - cleanUp []string - // failed indicates that one or more of the demo steps failed. - failed bool -} - -func (d *demo) errorf(format string, args ...interface{}) { - d.failed = true - log.Errorf(d.c, format, args...) -} - -// handler is the main demo entry point that calls the GCS operations. -func handler(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - c := appengine.NewContext(r) - if bucket == "" { - var err error - if bucket, err = file.DefaultBucketName(c); err != nil { - log.Errorf(c, "failed to get default GCS bucket name: %v", err) - return - } - } - hc := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(c, storage.ScopeFullControl), - }, - } - ctx := cloud.NewContext(appengine.AppID(c), hc) - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(c)) - fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) - - d := &demo{ - c: c, - w: w, - ctx: ctx, - } - - n := "demo-testfile-go" - d.createFile(n) - d.readFile(n) - d.copyFile(n) - d.statFile(n) - d.createListFiles() - d.listBucket() - d.listBucketDirMode() - d.defaultACL() - d.putDefaultACLRule() - d.deleteDefaultACLRule() - d.bucketACL() - d.putBucketACLRule() - d.deleteBucketACLRule() - d.acl(n) - d.putACLRule(n) - d.deleteACLRule(n) - d.deleteFiles() - - if d.failed { - io.WriteString(w, "\nDemo failed.\n") - } else { - io.WriteString(w, "\nDemo succeeded.\n") - } -} - -// createFile creates a file in Google Cloud Storage. -func (d *demo) createFile(fileName string) { - fmt.Fprintf(d.w, "Creating file /%v/%v\n", bucket, fileName) - - wc := storage.NewWriter(d.ctx, bucket, fileName) - wc.ContentType = "text/plain" - wc.Metadata = map[string]string{ - "x-goog-meta-foo": "foo", - "x-goog-meta-bar": "bar", - } - d.cleanUp = append(d.cleanUp, fileName) - - if _, err := wc.Write([]byte("abcde\n")); err != nil { - d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) - return - } - if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { - d.errorf("createFile: unable to write data to bucket %q, file %q: %v", bucket, fileName, err) - return - } - if err := wc.Close(); err != nil { - d.errorf("createFile: unable to close bucket %q, file %q: %v", bucket, fileName, err) - return - } -} - -// readFile reads the named file in Google Cloud Storage. -func (d *demo) readFile(fileName string) { - io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") - - rc, err := storage.NewReader(d.ctx, bucket, fileName) - if err != nil { - d.errorf("readFile: unable to open file from bucket %q, file %q: %v", bucket, fileName, err) - return - } - defer rc.Close() - slurp, err := ioutil.ReadAll(rc) - if err != nil { - d.errorf("readFile: unable to read data from bucket %q, file %q: %v", bucket, fileName, err) - return - } - - fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) - if len(slurp) > 1024 { - fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) - } else { - fmt.Fprintf(d.w, "%s\n", slurp) - } -} - -// copyFile copies a file in Google Cloud Storage. -func (d *demo) copyFile(fileName string) { - copyName := fileName + "-copy" - fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", bucket, fileName, bucket, copyName) - - obj, err := storage.CopyObject(d.ctx, bucket, fileName, bucket, copyName, nil) - if err != nil { - d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", bucket, fileName, bucket, copyName, err) - return - } - d.cleanUp = append(d.cleanUp, copyName) - - d.dumpStats(obj) -} - -func (d *demo) dumpStats(obj *storage.Object) { - fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) - fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) - fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) - fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) - fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) - fmt.Fprintf(d.w, "Size: %v, ", obj.Size) - fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) - fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) - fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) - fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) - fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) - if !obj.Deleted.IsZero() { - fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) - } - fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) -} - -// statFile reads the stats of the named file in Google Cloud Storage. -func (d *demo) statFile(fileName string) { - io.WriteString(d.w, "\nFile stat:\n") - - obj, err := storage.StatObject(d.ctx, bucket, fileName) - if err != nil { - d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", bucket, fileName, err) - return - } - - d.dumpStats(obj) -} - -// createListFiles creates files that will be used by listBucket. -func (d *demo) createListFiles() { - io.WriteString(d.w, "\nCreating more files for listbucket...\n") - for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { - d.createFile(n) - } -} - -// listBucket lists the contents of a bucket in Google Cloud Storage. -func (d *demo) listBucket() { - io.WriteString(d.w, "\nListbucket result:\n") - - query := &storage.Query{Prefix: "foo"} - for query != nil { - objs, err := storage.ListObjects(d.ctx, bucket, query) - if err != nil { - d.errorf("listBucket: unable to list bucket %q: %v", bucket, err) - return - } - query = objs.Next - - for _, obj := range objs.Results { - d.dumpStats(obj) - } - } -} - -func (d *demo) listDir(name, indent string) { - query := &storage.Query{Prefix: name, Delimiter: "/"} - for query != nil { - objs, err := storage.ListObjects(d.ctx, bucket, query) - if err != nil { - d.errorf("listBucketDirMode: unable to list bucket %q: %v", bucket, err) - return - } - query = objs.Next - - for _, obj := range objs.Results { - fmt.Fprint(d.w, indent) - d.dumpStats(obj) - } - for _, dir := range objs.Prefixes { - fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, bucket, dir) - d.listDir(dir, indent+" ") - } - } -} - -// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. -func (d *demo) listBucketDirMode() { - io.WriteString(d.w, "\nListbucket directory mode result:\n") - d.listDir("b", "") -} - -// dumpDefaultACL prints out the default object ACL for this bucket. -func (d *demo) dumpDefaultACL() { - acl, err := storage.DefaultACL(d.ctx, bucket) - if err != nil { - d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", bucket, err) - return - } - for _, v := range acl { - fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) - } -} - -// defaultACL displays the default object ACL for this bucket. -func (d *demo) defaultACL() { - io.WriteString(d.w, "\nDefault object ACL:\n") - d.dumpDefaultACL() -} - -// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. -func (d *demo) putDefaultACLRule() { - io.WriteString(d.w, "\nPut Default object ACL Rule:\n") - err := storage.PutDefaultACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) - if err != nil { - d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpDefaultACL() -} - -// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. -func (d *demo) deleteDefaultACLRule() { - io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") - err := storage.DeleteDefaultACLRule(d.ctx, bucket, "allUsers") - if err != nil { - d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpDefaultACL() -} - -// dumpBucketACL prints out the bucket ACL. -func (d *demo) dumpBucketACL() { - acl, err := storage.BucketACL(d.ctx, bucket) - if err != nil { - d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", bucket, err) - return - } - for _, v := range acl { - fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) - } -} - -// bucketACL displays the bucket ACL for this bucket. -func (d *demo) bucketACL() { - io.WriteString(d.w, "\nBucket ACL:\n") - d.dumpBucketACL() -} - -// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. -func (d *demo) putBucketACLRule() { - io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") - err := storage.PutBucketACLRule(d.ctx, bucket, "allUsers", storage.RoleReader) - if err != nil { - d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpBucketACL() -} - -// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. -func (d *demo) deleteBucketACLRule() { - io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") - err := storage.DeleteBucketACLRule(d.ctx, bucket, "allUsers") - if err != nil { - d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", bucket, err) - return - } - d.dumpBucketACL() -} - -// dumpACL prints out the ACL of the named file. -func (d *demo) dumpACL(fileName string) { - acl, err := storage.ACL(d.ctx, bucket, fileName) - if err != nil { - d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", bucket, fileName, err) - return - } - for _, v := range acl { - fmt.Fprintf(d.w, "Entity: %q, Role: %q\n", v.Entity, v.Role) - } -} - -// acl displays the ACL for the named file. -func (d *demo) acl(fileName string) { - fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) - d.dumpACL(fileName) -} - -// putACLRule adds the "allUsers" ACL rule for the named file. -func (d *demo) putACLRule(fileName string) { - fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) - err := storage.PutACLRule(d.ctx, bucket, fileName, "allUsers", storage.RoleReader) - if err != nil { - d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", bucket, fileName, err) - return - } - d.dumpACL(fileName) -} - -// deleteACLRule deleted the "allUsers" ACL rule for the named file. -func (d *demo) deleteACLRule(fileName string) { - fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) - err := storage.DeleteACLRule(d.ctx, bucket, fileName, "allUsers") - if err != nil { - d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", bucket, fileName, err) - return - } - d.dumpACL(fileName) -} - -// deleteFiles deletes all the temporary files from a bucket created by this demo. -func (d *demo) deleteFiles() { - io.WriteString(d.w, "\nDeleting files...\n") - for _, v := range d.cleanUp { - fmt.Fprintf(d.w, "Deleting file %v\n", v) - if err := storage.DeleteObject(d.ctx, bucket, v); err != nil { - d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", bucket, v, err) - return - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.yaml b/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.yaml deleted file mode 100644 index 6847fc8715..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/examples/storage/appenginevm/app.yaml +++ /dev/null @@ -1,10 +0,0 @@ -runtime: go -api_version: go1 -vm: true - -manual_scaling: - instances: 1 - -handlers: -- url: /.* - script: _go_app diff --git a/Godeps/_workspace/src/google.golang.org/cloud/key.json.enc b/Godeps/_workspace/src/google.golang.org/cloud/key.json.enc deleted file mode 100644 index 2f673a84b1..0000000000 Binary files a/Godeps/_workspace/src/google.golang.org/cloud/key.json.enc and /dev/null differ diff --git a/Godeps/_workspace/src/google.golang.org/cloud/logging/logging.go b/Godeps/_workspace/src/google.golang.org/cloud/logging/logging.go deleted file mode 100644 index 366fb84bcf..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/logging/logging.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logging contains a Google Cloud Logging client. -// -// This package is experimental and subject to API changes. -package logging // import "google.golang.org/cloud/logging" - -import ( - "errors" - "io" - "log" - "sync" - "time" - - "golang.org/x/net/context" - api "google.golang.org/api/logging/v1beta3" - "google.golang.org/cloud" - "google.golang.org/cloud/internal/transport" -) - -// Scope is the OAuth2 scope necessary to use Google Cloud Logging. -const Scope = api.CloudPlatformScope - -// Level is the log level. -type Level int - -const ( - // Default means no assigned severity level. - Default Level = iota - Debug - Info - Warning - Error - Critical - Alert - Emergency - nLevel -) - -var levelName = [nLevel]string{ - Default: "", - Debug: "DEBUG", - Info: "INFO", - Warning: "WARNING", - Error: "ERROR", - Critical: "CRITICAL", - Alert: "ALERT", - Emergency: "EMERGENCY", -} - -func (v Level) String() string { - return levelName[v] -} - -// Client is a Google Cloud Logging client. -// It must be constructed via NewClient. -type Client struct { - svc *api.Service - logs *api.ProjectsLogsEntriesService - projID string - logName string - writer [nLevel]io.Writer - logger [nLevel]*log.Logger - - mu sync.Mutex - queued []*api.LogEntry - curFlush *flushCall // currently in-flight flush - flushTimer *time.Timer // nil before first use - timerActive bool // whether flushTimer is armed - inFlight int // number of log entries sent to API service but not yet ACKed - - // For testing: - timeNow func() time.Time // optional - - // ServiceName may be "appengine.googleapis.com", - // "compute.googleapis.com" or "custom.googleapis.com". - // - // The default is "custom.googleapis.com". - // - // The service name is only used by the API server to - // determine which of the labels are used to index the logs. - ServiceName string - - // CommonLabels are metadata labels that apply to all log - // entries in this request, so that you don't have to repeat - // them in each log entry's metadata.labels field. If any of - // the log entries contains a (key, value) with the same key - // that is in CommonLabels, then the entry's (key, value) - // overrides the one in CommonLabels. - CommonLabels map[string]string - - // BufferLimit is the maximum number of items to keep in memory - // before flushing. Zero means automatic. A value of 1 means to - // flush after each log entry. - // The default is currently 10,000. - BufferLimit int - - // FlushAfter optionally specifies a threshold count at which buffered - // log entries are flushed, even if the BufferInterval has not yet - // been reached. - // The default is currently 10. - FlushAfter int - - // BufferInterval is the maximum amount of time that an item - // should remain buffered in memory before being flushed to - // the logging service. - // The default is currently 1 second. - BufferInterval time.Duration - - // Overflow is a function which runs when the Log function - // overflows its configured buffer limit. If nil, the log - // entry is dropped. The return value from Overflow is - // returned by Log. - Overflow func(*Client, Entry) error -} - -func (c *Client) flushAfter() int { - if v := c.FlushAfter; v > 0 { - return v - } - return 10 -} - -func (c *Client) bufferInterval() time.Duration { - if v := c.BufferInterval; v > 0 { - return v - } - return time.Second -} - -func (c *Client) bufferLimit() int { - if v := c.BufferLimit; v > 0 { - return v - } - return 10000 -} - -func (c *Client) serviceName() string { - if v := c.ServiceName; v != "" { - return v - } - return "custom.googleapis.com" -} - -func (c *Client) now() time.Time { - if now := c.timeNow; now != nil { - return now() - } - return time.Now() -} - -// Writer returns an io.Writer for the provided log level. -// -// Each Write call on the returned Writer generates a log entry. -// -// This Writer accessor does not allocate, so callers do not need to -// cache. -func (c *Client) Writer(v Level) io.Writer { return c.writer[v] } - -// Logger returns a *log.Logger for the provided log level. -// -// A Logger for each Level is pre-allocated by NewClient with an empty -// prefix and no flags. This Logger accessor does not allocate. -// Callers wishing to use alternate flags (such as log.Lshortfile) may -// mutate the returned Logger with SetFlags. Such mutations affect all -// callers in the program. -func (c *Client) Logger(v Level) *log.Logger { return c.logger[v] } - -type levelWriter struct { - level Level - c *Client -} - -func (w levelWriter) Write(p []byte) (n int, err error) { - return len(p), w.c.Log(Entry{ - Level: w.level, - Payload: string(p), - }) -} - -// Entry is a log entry. -type Entry struct { - // Time is the time of the entry. If the zero value, the current time is used. - Time time.Time - - // Level is log entry's severity level. - // The zero value means no assigned severity level. - Level Level - - // Payload must be either a string, []byte, or something that - // marshals via the encoding/json package to a JSON object - // (and not any other type of JSON value). - Payload interface{} - - // Labels optionally specifies key/value labels for the log entry. - // Depending on the Client's ServiceName, these are indexed differently - // by the Cloud Logging Service. - // See https://cloud.google.com/logging/docs/logs_index - // The Client.Log method takes ownership of this map. - Labels map[string]string - - // TODO: de-duping id -} - -func (c *Client) apiEntry(e Entry) (*api.LogEntry, error) { - t := e.Time - if t.IsZero() { - t = c.now() - } - - ent := &api.LogEntry{ - Metadata: &api.LogEntryMetadata{ - Timestamp: t.UTC().Format(time.RFC3339Nano), - ServiceName: c.serviceName(), - Severity: e.Level.String(), - Labels: e.Labels, - }, - } - switch p := e.Payload.(type) { - case string: - ent.TextPayload = p - case []byte: - ent.TextPayload = string(p) - default: - ent.StructPayload = api.LogEntryStructPayload(p) - } - return ent, nil -} - -// LogSync logs e synchronously without any buffering. -// This is mostly intended for debugging or critical errors. -func (c *Client) LogSync(e Entry) error { - ent, err := c.apiEntry(e) - if err != nil { - return err - } - _, err = c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{ - CommonLabels: c.CommonLabels, - Entries: []*api.LogEntry{ent}, - }).Do() - return err -} - -var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") - -// Log queues an entry to be sent to the logging service, subject to the -// Client's parameters. By default, the log will be flushed within -// one second. -// Log only returns an error if the entry is invalid or the queue is at -// capacity. If the queue is at capacity and the entry can't be added, -// Log returns either ErrOverflow when c.Overflow is nil, or the -// value returned by c.Overflow. -func (c *Client) Log(e Entry) error { - ent, err := c.apiEntry(e) - if err != nil { - return err - } - - c.mu.Lock() - buffered := len(c.queued) + c.inFlight - - if buffered >= c.bufferLimit() { - c.mu.Unlock() - if fn := c.Overflow; fn != nil { - return fn(c, e) - } - return ErrOverflow - } - defer c.mu.Unlock() - - c.queued = append(c.queued, ent) - if len(c.queued) >= c.flushAfter() { - c.scheduleFlushLocked(0) - return nil - } - c.scheduleFlushLocked(c.bufferInterval()) - return nil -} - -// c.mu must be held. -// -// d will be one of two values: either c.BufferInterval (or its -// default value) or 0. -func (c *Client) scheduleFlushLocked(d time.Duration) { - if c.inFlight > 0 { - // For now to keep things simple, only allow one HTTP - // request in flight at a time. - return - } - switch { - case c.flushTimer == nil: - // First flush. - c.timerActive = true - c.flushTimer = time.AfterFunc(d, c.timeoutFlush) - case c.timerActive && d == 0: - // Make it happen sooner. For example, this is the - // case of transitioning from a 1 second flush after - // the 1st item to an immediate flush after the 10th - // item. - c.flushTimer.Reset(0) - case !c.timerActive: - c.timerActive = true - c.flushTimer.Reset(d) - default: - // else timer was already active, also at d > 0, - // so we don't touch it and let it fire as previously - // scheduled. - } -} - -// timeoutFlush runs in its own goroutine (from time.AfterFunc) and -// flushes c.queued. -func (c *Client) timeoutFlush() { - c.mu.Lock() - c.timerActive = false - c.mu.Unlock() - if err := c.Flush(); err != nil { - // schedule another try - // TODO: smarter back-off? - c.mu.Lock() - c.scheduleFlushLocked(5 * time.Second) - c.mu.Unlock() - } -} - -// Ping reports whether the client's connection to Google Cloud -// Logging and the authentication configuration are valid. -func (c *Client) Ping() error { - _, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{ - Entries: []*api.LogEntry{}, - }).Do() - return err -} - -// Flush flushes any buffered log entries. -func (c *Client) Flush() error { - var numFlush int - c.mu.Lock() - for { - // We're already flushing (or we just started flushing - // ourselves), so wait for it to finish. - if f := c.curFlush; f != nil { - wasEmpty := len(c.queued) == 0 - c.mu.Unlock() - <-f.donec // wait for it - numFlush++ - // Terminate whenever there's an error, we've - // already flushed twice (one that was already - // in-flight when flush was called, and then - // one we instigated), or the queue was empty - // when we released the locked (meaning this - // in-flight flush removes everything present - // when Flush was called, and we don't need to - // kick off a new flush for things arriving - // afterward) - if f.err != nil || numFlush == 2 || wasEmpty { - return f.err - } - // Otherwise, re-obtain the lock and loop, - // starting over with seeing if a flush is in - // progress, which might've been started by a - // different goroutine before aquiring this - // lock again. - c.mu.Lock() - continue - } - - // Terminal case: - if len(c.queued) == 0 { - c.mu.Unlock() - return nil - } - - c.startFlushLocked() - } -} - -// requires c.mu be held. -func (c *Client) startFlushLocked() { - if c.curFlush != nil { - panic("internal error: flush already in flight") - } - if len(c.queued) == 0 { - panic("internal error: no items queued") - } - logEntries := c.queued - c.inFlight = len(logEntries) - c.queued = nil - - flush := &flushCall{ - donec: make(chan struct{}), - } - c.curFlush = flush - go func() { - defer close(flush.donec) - _, err := c.logs.Write(c.projID, c.logName, &api.WriteLogEntriesRequest{ - CommonLabels: c.CommonLabels, - Entries: logEntries, - }).Do() - flush.err = err - c.mu.Lock() - defer c.mu.Unlock() - c.inFlight = 0 - c.curFlush = nil - if err != nil { - c.queued = append(c.queued, logEntries...) - } else if len(c.queued) > 0 { - c.scheduleFlushLocked(c.bufferInterval()) - } - }() - -} - -const prodAddr = "https://logging.googleapis.com/" - -const userAgent = "gcloud-golang-logging/20150922" - -// NewClient returns a new log client, logging to the named log in the -// provided project. -// -// The exported fields on the returned client may be modified before -// the client is used for logging. Once log entries are in flight, -// the fields must not be modified. -func NewClient(ctx context.Context, projectID, logName string, opts ...cloud.ClientOption) (*Client, error) { - httpClient, endpoint, err := transport.NewHTTPClient(ctx, append([]cloud.ClientOption{ - cloud.WithEndpoint(prodAddr), - cloud.WithScopes(api.CloudPlatformScope), - cloud.WithUserAgent(userAgent), - }, opts...)...) - if err != nil { - return nil, err - } - svc, err := api.New(httpClient) - if err != nil { - return nil, err - } - svc.BasePath = endpoint - c := &Client{ - svc: svc, - logs: api.NewProjectsLogsEntriesService(svc), - logName: logName, - projID: projectID, - } - for i := range c.writer { - level := Level(i) - c.writer[level] = levelWriter{level, c} - c.logger[level] = log.New(c.writer[level], "", 0) - } - return c, nil -} - -// flushCall is an in-flight or completed flush. -type flushCall struct { - donec chan struct{} // closed when response is in - err error // error is valid after wg is Done -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/logging/logging_test.go b/Godeps/_workspace/src/google.golang.org/cloud/logging/logging_test.go deleted file mode 100644 index f5ff9f7f55..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/logging/logging_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logging - -import ( - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "sync" - "testing" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - - "google.golang.org/cloud" -) - -func TestLogPayload(t *testing.T) { - lt := newLogTest(t) - defer lt.ts.Close() - - tests := []struct { - name string - entry Entry - want string - }{ - { - name: "string", - entry: Entry{ - Time: time.Unix(0, 0), - Payload: "some log string", - }, - want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"textPayload":"some log string"}]}`, - }, - { - name: "[]byte", - entry: Entry{ - Time: time.Unix(0, 0), - Payload: []byte("some log bytes"), - }, - want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"textPayload":"some log bytes"}]}`, - }, - { - name: "struct", - entry: Entry{ - Time: time.Unix(0, 0), - Payload: struct { - Foo string `json:"foo"` - Bar int `json:"bar,omitempty"` - }{ - Foo: "foovalue", - }, - }, - want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"structPayload":{"foo":"foovalue"}}]}`, - }, - { - name: "map[string]interface{}", - entry: Entry{ - Time: time.Unix(0, 0), - Payload: map[string]interface{}{ - "string": "foo", - "int": 42, - }, - }, - want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"structPayload":{"int":42,"string":"foo"}}]}`, - }, - { - name: "map[string]interface{}", - entry: Entry{ - Time: time.Unix(0, 0), - Payload: customJSONObject{}, - }, - want: `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","timestamp":"1970-01-01T00:00:00Z"},"structPayload":{"custom":"json"}}]}`, - }, - } - for _, tt := range tests { - lt.startGetRequest() - if err := lt.c.LogSync(tt.entry); err != nil { - t.Errorf("%s: LogSync = %v", tt.name, err) - continue - } - got := lt.getRequest() - if got != tt.want { - t.Errorf("%s: mismatch\n got: %s\nwant: %s\n", tt.name, got, tt.want) - } - } -} - -func TestBufferInterval(t *testing.T) { - lt := newLogTest(t) - defer lt.ts.Close() - - lt.c.CommonLabels = map[string]string{ - "common1": "one", - "common2": "two", - } - lt.c.BufferInterval = 1 * time.Millisecond // immediately, basically. - lt.c.FlushAfter = 100 // but we'll only send 1 - - lt.startGetRequest() - lt.c.Logger(Debug).Printf("log line 1") - got := lt.getRequest() - want := `{"commonLabels":{"common1":"one","common2":"two"},"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:01Z"},"textPayload":"log line 1\n"}]}` - if got != want { - t.Errorf(" got: %s\nwant: %s\n", got, want) - } -} - -func TestFlushAfter(t *testing.T) { - lt := newLogTest(t) - defer lt.ts.Close() - - lt.c.CommonLabels = map[string]string{ - "common1": "one", - "common2": "two", - } - lt.c.BufferInterval = getRequestTimeout * 2 - lt.c.FlushAfter = 2 - - lt.c.Logger(Debug).Printf("log line 1") - lt.startGetRequest() - lt.c.Logger(Debug).Printf("log line 2") - got := lt.getRequest() - want := `{"commonLabels":{"common1":"one","common2":"two"},"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:01Z"},"textPayload":"log line 1\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:02Z"},"textPayload":"log line 2\n"}]}` - if got != want { - t.Errorf(" got: %s\nwant: %s\n", got, want) - } -} - -func TestFlush(t *testing.T) { - lt := newLogTest(t) - defer lt.ts.Close() - lt.c.BufferInterval = getRequestTimeout * 2 - lt.c.FlushAfter = 100 // but we'll only send 1, requiring a Flush - - lt.c.Logger(Debug).Printf("log line 1") - lt.startGetRequest() - if err := lt.c.Flush(); err != nil { - t.Fatal(err) - } - got := lt.getRequest() - want := `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:01Z"},"textPayload":"log line 1\n"}]}` - if got != want { - t.Errorf(" got: %s\nwant: %s\n", got, want) - } -} - -func TestOverflow(t *testing.T) { - lt := newLogTest(t) - defer lt.ts.Close() - - lt.c.FlushAfter = 1 - lt.c.BufferLimit = 5 - lt.c.BufferInterval = 1 * time.Millisecond // immediately, basically. - - someErr := errors.New("some specific error value") - lt.c.Overflow = func(c *Client, e Entry) error { - return someErr - } - - unblock := make(chan bool, 1) - inHandler := make(chan bool, 1) - lt.handlerc <- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - <-unblock - ioutil.ReadAll(r.Body) - io.WriteString(w, "{}") // WriteLogEntriesResponse - }) - - lt.c.Logger(Debug).Printf("log line 1") - <-inHandler - lt.c.Logger(Debug).Printf("log line 2") - lt.c.Logger(Debug).Printf("log line 3") - lt.c.Logger(Debug).Printf("log line 4") - lt.c.Logger(Debug).Printf("log line 5") - - queued, inFlight := lt.c.stats() - if want := 4; queued != want { - t.Errorf("queued = %d; want %d", queued, want) - } - if want := 1; inFlight != want { - t.Errorf("inFlight = %d; want %d", inFlight, want) - } - - if err := lt.c.Log(Entry{Payload: "to overflow"}); err != someErr { - t.Errorf("Log(overflow Log entry) = %v; want someErr", err) - } - lt.startGetRequest() - unblock <- true - got := lt.getRequest() - want := `{"entries":[{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:02Z"},"textPayload":"log line 2\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:03Z"},"textPayload":"log line 3\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:04Z"},"textPayload":"log line 4\n"},{"metadata":{"serviceName":"custom.googleapis.com","severity":"DEBUG","timestamp":"1970-01-01T00:00:05Z"},"textPayload":"log line 5\n"}]}` - if got != want { - t.Errorf(" got: %s\nwant: %s\n", got, want) - } - if err := lt.c.Flush(); err != nil { - t.Fatal(err) - } - queued, inFlight = lt.c.stats() - if want := 0; queued != want { - t.Errorf("queued = %d; want %d", queued, want) - } - if want := 0; inFlight != want { - t.Errorf("inFlight = %d; want %d", inFlight, want) - } -} - -func (c *Client) stats() (queued, inFlight int) { - c.mu.Lock() - defer c.mu.Unlock() - return len(c.queued), c.inFlight -} - -type customJSONObject struct{} - -func (customJSONObject) MarshalJSON() ([]byte, error) { - return []byte(`{"custom":"json"}`), nil -} - -type logTest struct { - t *testing.T - ts *httptest.Server - c *Client - handlerc chan<- http.Handler - - bodyc chan string -} - -func newLogTest(t *testing.T) *logTest { - handlerc := make(chan http.Handler, 1) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - select { - case h := <-handlerc: - h.ServeHTTP(w, r) - default: - slurp, _ := ioutil.ReadAll(r.Body) - t.Errorf("Unexpected HTTP request received: %s", slurp) - w.WriteHeader(500) - io.WriteString(w, "unexpected HTTP request") - } - })) - c, err := NewClient(context.Background(), "PROJ-ID", "LOG-NAME", - cloud.WithEndpoint(ts.URL), - cloud.WithTokenSource(dummyTokenSource{}), // prevent DefaultTokenSource - ) - if err != nil { - t.Fatal(err) - } - var clock struct { - sync.Mutex - now time.Time - } - c.timeNow = func() time.Time { - clock.Lock() - defer clock.Unlock() - if clock.now.IsZero() { - clock.now = time.Unix(0, 0) - } - clock.now = clock.now.Add(1 * time.Second) - return clock.now - } - return &logTest{ - t: t, - ts: ts, - c: c, - handlerc: handlerc, - } -} - -func (lt *logTest) startGetRequest() { - bodyc := make(chan string, 1) - lt.bodyc = bodyc - lt.handlerc <- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - slurp, err := ioutil.ReadAll(r.Body) - if err != nil { - bodyc <- "ERROR: " + err.Error() - } else { - bodyc <- string(slurp) - } - io.WriteString(w, "{}") // a complete WriteLogEntriesResponse JSON struct - }) -} - -const getRequestTimeout = 5 * time.Second - -func (lt *logTest) getRequest() string { - if lt.bodyc == nil { - lt.t.Fatalf("getRequest called without previous startGetRequest") - } - select { - case v := <-lt.bodyc: - return strings.TrimSpace(v) - case <-time.After(getRequestTimeout): - lt.t.Fatalf("timeout waiting for request") - panic("unreachable") - } -} - -// dummyTokenSource returns fake oauth2 tokens for local testing. -type dummyTokenSource struct{} - -func (dummyTokenSource) Token() (*oauth2.Token, error) { - return new(oauth2.Token), nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/option.go b/Godeps/_workspace/src/google.golang.org/cloud/option.go deleted file mode 100644 index d4a5aea294..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/option.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cloud - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/cloud/internal/opts" - "google.golang.org/grpc" -) - -// ClientOption is used when construct clients for each cloud service. -type ClientOption interface { - // Resolve configures the given DialOpts for this option. - Resolve(*opts.DialOpt) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Resolve(o *opts.DialOpt) { - o.TokenSource = w.ts -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Resolve(o *opts.DialOpt) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Resolve(o *opts.DialOpt) { - o.Scopes = []string(w) -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) } - -// WithBaseHTTP returns a ClientOption that specifies the HTTP client to -// use as the basis of communications. This option may only be used with -// services that support HTTP as their communication transport. -func WithBaseHTTP(client *http.Client) ClientOption { - return withBaseHTTP{client} -} - -type withBaseHTTP struct{ client *http.Client } - -func (w withBaseHTTP) Resolve(o *opts.DialOpt) { - o.HTTPClient = w.client -} - -// WithBaseGRPC returns a ClientOption that specifies the GRPC client -// connection to use as the basis of communications. This option many only be -// used with services that support HRPC as their communication transport. -func WithBaseGRPC(client *grpc.ClientConn) ClientOption { - return withBaseGRPC{client} -} - -type withBaseGRPC struct{ client *grpc.ClientConn } - -func (w withBaseGRPC) Resolve(o *opts.DialOpt) { - o.GRPCClient = w.client -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/example_test.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/example_test.go deleted file mode 100644 index 939ca18231..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/example_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub_test - -import ( - "io/ioutil" - "log" - "testing" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/cloud" - "google.golang.org/cloud/pubsub" -) - -// TODO(jbd): Remove after Go 1.4. -// Related to https://codereview.appspot.com/107320046 -func TestA(t *testing.T) {} - -func Example_auth() context.Context { - // Initialize an authorized context with Google Developers Console - // JSON key. Read the google package examples to learn more about - // different authorization flows you can use. - // http://godoc.org/golang.org/x/oauth2/google - jsonKey, err := ioutil.ReadFile("/path/to/json/keyfile.json") - if err != nil { - log.Fatal(err) - } - conf, err := google.JWTConfigFromJSON( - jsonKey, - pubsub.ScopeCloudPlatform, - pubsub.ScopePubSub, - ) - if err != nil { - log.Fatal(err) - } - ctx := cloud.NewContext("project-id", conf.Client(oauth2.NoContext)) - // See the other samples to learn how to use the context. - return ctx -} - -func ExamplePublish() { - ctx := Example_auth() - - msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ - Data: []byte("hello world"), - }) - if err != nil { - log.Fatal(err) - } - log.Printf("Published a message with a message id: %s\n", msgIDs[0]) -} - -func ExamplePull() { - ctx := Example_auth() - - // E.g. c.CreateSub("sub1", "topic1", time.Duration(0), "") - msgs, err := pubsub.Pull(ctx, "sub1", 1) - if err != nil { - log.Fatal(err) - } - log.Printf("New message arrived: %v\n", msgs[0]) - if err := pubsub.Ack(ctx, "sub1", msgs[0].AckID); err != nil { - log.Fatal(err) - } - log.Println("Acknowledged message") -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/integration_test.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/integration_test.go deleted file mode 100644 index 9ce68af9d6..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/integration_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build integration - -package pubsub - -import ( - "fmt" - "testing" - "time" - - "google.golang.org/cloud/internal/testutil" -) - -func TestAll(t *testing.T) { - ctx := testutil.Context(ScopePubSub, ScopeCloudPlatform) - now := time.Now() - topic := fmt.Sprintf("topic-%d", now.Unix()) - subscription := fmt.Sprintf("subscription-%d", now.Unix()) - - if err := CreateTopic(ctx, topic); err != nil { - t.Errorf("CreateTopic error: %v", err) - } - - if err := CreateSub(ctx, subscription, topic, time.Duration(0), ""); err != nil { - t.Errorf("CreateSub error: %v", err) - } - - exists, err := TopicExists(ctx, topic) - if err != nil { - t.Fatalf("TopicExists error: %v", err) - } - if !exists { - t.Errorf("topic %s should exist, but it doesn't", topic) - } - - exists, err = SubExists(ctx, subscription) - if err != nil { - t.Fatalf("SubExists error: %v", err) - } - if !exists { - t.Errorf("subscription %s should exist, but it doesn't", subscription) - } - - max := 10 - msgs := make([]*Message, max) - expectedMsgs := make(map[string]bool, max) - for i := 0; i < max; i++ { - text := fmt.Sprintf("a message with an index %d", i) - attrs := make(map[string]string) - attrs["foo"] = "bar" - msgs[i] = &Message{ - Data: []byte(text), - Attributes: attrs, - } - expectedMsgs[text] = false - } - - ids, err := Publish(ctx, topic, msgs...) - if err != nil { - t.Fatalf("Publish (1) error: %v", err) - } - - if len(ids) != max { - t.Errorf("unexpected number of message IDs received; %d, want %d", len(ids), max) - } - - expectedIDs := make(map[string]bool, max) - for _, id := range ids { - expectedIDs[id] = false - } - - received, err := PullWait(ctx, subscription, max) - if err != nil { - t.Fatalf("PullWait error: %v", err) - } - - if len(received) != max { - t.Errorf("unexpected number of messages received; %d, want %d", len(received), max) - } - - for _, msg := range received { - expectedMsgs[string(msg.Data)] = true - expectedIDs[msg.ID] = true - if msg.Attributes["foo"] != "bar" { - t.Errorf("message attribute foo is expected to be 'bar', found '%s'", msg.Attributes["foo"]) - } - } - - for msg, found := range expectedMsgs { - if !found { - t.Errorf("message '%s' should be received", msg) - } - } - - for id, found := range expectedIDs { - if !found { - t.Errorf("message with the message id '%s' should be received", id) - } - } - - // base64 test - data := "=@~" - msg := &Message{ - Data: []byte(data), - } - _, err = Publish(ctx, topic, msg) - if err != nil { - t.Fatalf("Publish (2) error: %v", err) - } - - received, err = PullWait(ctx, subscription, 1) - if err != nil { - t.Fatalf("PullWait error: %v", err) - } - if len(received) != 1 { - t.Fatalf("unexpected number of messages received; %d, want %d", len(received), 1) - } - if string(received[0].Data) != data { - t.Errorf("unexpexted message received; %s, want %s", string(received[0].Data), data) - } - - err = DeleteSub(ctx, subscription) - if err != nil { - t.Errorf("DeleteSub error: %v", err) - } - - err = DeleteTopic(ctx, topic) - if err != nil { - t.Errorf("DeleteTopic error: %v", err) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub.go deleted file mode 100644 index 23d4ce01bc..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pubsub contains a Google Cloud Pub/Sub client. -// -// This package is experimental and may make backwards-incompatible changes. -// -// More information about Google Cloud Pub/Sub is available at -// https://cloud.google.com/pubsub/docs -package pubsub // import "google.golang.org/cloud/pubsub" - -import ( - "encoding/base64" - "errors" - "fmt" - "net/http" - "time" - - "google.golang.org/api/googleapi" - raw "google.golang.org/api/pubsub/v1beta2" - "google.golang.org/cloud" - "google.golang.org/cloud/internal" - "google.golang.org/cloud/internal/transport" - - "golang.org/x/net/context" -) - -const ( - // ScopePubSub grants permissions to view and manage Pub/Sub - // topics and subscriptions. - ScopePubSub = "https://www.googleapis.com/auth/pubsub" - - // ScopeCloudPlatform grants permissions to view and manage your data - // across Google Cloud Platform services. - ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" -) - -const prodAddr = "https://pubsub.googleapis.com/" -const userAgent = "gcloud-golang-pubsub/20151008" - -// batchLimit is maximun size of a single batch. -const batchLimit = 1000 - -// Message represents a Pub/Sub message. -type Message struct { - // ID identifies this message. - ID string - - // AckID is the identifier to acknowledge this message. - AckID string - - // Data is the actual data in the message. - Data []byte - - // Attributes represents the key-value pairs the current message - // is labelled with. - Attributes map[string]string -} - -// Client is a Google Pub/Sub client, which may be used to perform Pub/Sub operations with a project. -// Note: Some operations are not yet available via Client, and must be performed via the legacy standalone functions. -// It must be constructed via NewClient. -type Client struct { - projectID string - s *raw.Service -} - -// NewClient create a new PubSub client. -func NewClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*Client, error) { - o := []cloud.ClientOption{ - cloud.WithEndpoint(prodAddr), - cloud.WithScopes(raw.PubsubScope, raw.CloudPlatformScope), - cloud.WithUserAgent(userAgent), - } - o = append(o, opts...) - httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) - if err != nil { - return nil, fmt.Errorf("dialing: %v", err) - } - s, err := raw.New(httpClient) - if err != nil { - return nil, err - } - - s.BasePath = endpoint - c := &Client{ - projectID: projectID, - s: s, - } - - return c, nil -} - -// TODO(jbd): Add subscription and topic listing. - -// CreateSub creates a Pub/Sub subscription on the backend. -// A subscription should subscribe to an existing topic. -// -// The messages that haven't acknowledged will be pushed back to the -// subscription again when the default acknowledgement deadline is -// reached. You can override the default deadline by providing a -// non-zero deadline. Deadline must not be specified to -// precision greater than one second. -// -// As new messages are being queued on the subscription, you -// may recieve push notifications regarding to the new arrivals. -// To receive notifications of new messages in the queue, -// specify an endpoint callback URL. -// If endpoint is an empty string the backend will not notify the -// client of new messages. -// -// If the subscription already exists an error will be returned. -func CreateSub(ctx context.Context, name string, topic string, deadline time.Duration, endpoint string) error { - sub := &raw.Subscription{ - Topic: fullTopicName(internal.ProjID(ctx), topic), - } - if int64(deadline) > 0 { - if !isSec(deadline) { - return errors.New("pubsub: deadline must not be specified to precision greater than one second") - } - sub.AckDeadlineSeconds = int64(deadline / time.Second) - } - if endpoint != "" { - sub.PushConfig = &raw.PushConfig{PushEndpoint: endpoint} - } - _, err := rawService(ctx).Projects.Subscriptions.Create(fullSubName(internal.ProjID(ctx), name), sub).Do() - return err -} - -// DeleteSub deletes the subscription. -func DeleteSub(ctx context.Context, name string) error { - _, err := rawService(ctx).Projects.Subscriptions.Delete(fullSubName(internal.ProjID(ctx), name)).Do() - return err -} - -// ModifyAckDeadline modifies the acknowledgement deadline -// for the messages retrieved from the specified subscription. -// Deadline must not be specified to precision greater than one second. -func ModifyAckDeadline(ctx context.Context, sub string, id string, deadline time.Duration) error { - if !isSec(deadline) { - return errors.New("pubsub: deadline must not be specified to precision greater than one second") - } - _, err := rawService(ctx).Projects.Subscriptions.ModifyAckDeadline(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyAckDeadlineRequest{ - AckDeadlineSeconds: int64(deadline / time.Second), - AckId: id, - }).Do() - return err -} - -// ModifyPushEndpoint modifies the URL endpoint to modify the resource -// to handle push notifications coming from the Pub/Sub backend -// for the specified subscription. -func ModifyPushEndpoint(ctx context.Context, sub, endpoint string) error { - _, err := rawService(ctx).Projects.Subscriptions.ModifyPushConfig(fullSubName(internal.ProjID(ctx), sub), &raw.ModifyPushConfigRequest{ - PushConfig: &raw.PushConfig{ - PushEndpoint: endpoint, - }, - }).Do() - return err -} - -// SubExists returns true if subscription exists. -func SubExists(ctx context.Context, name string) (bool, error) { - _, err := rawService(ctx).Projects.Subscriptions.Get(fullSubName(internal.ProjID(ctx), name)).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return false, nil - } - if err != nil { - return false, err - } - return true, nil -} - -// Ack acknowledges one or more Pub/Sub messages on the -// specified subscription. -func Ack(ctx context.Context, sub string, id ...string) error { - for idx, ackID := range id { - if ackID == "" { - return fmt.Errorf("pubsub: empty ackID detected at index %d", idx) - } - } - _, err := rawService(ctx).Projects.Subscriptions.Acknowledge(fullSubName(internal.ProjID(ctx), sub), &raw.AcknowledgeRequest{ - AckIds: id, - }).Do() - return err -} - -func toMessage(resp *raw.ReceivedMessage) (*Message, error) { - if resp.Message == nil { - return &Message{AckID: resp.AckId}, nil - } - data, err := base64.StdEncoding.DecodeString(resp.Message.Data) - if err != nil { - return nil, err - } - return &Message{ - AckID: resp.AckId, - Data: data, - Attributes: resp.Message.Attributes, - ID: resp.Message.MessageId, - }, nil -} - -// Pull pulls messages from the subscription. It returns up to n -// number of messages, and n could not be larger than 100. -func Pull(ctx context.Context, sub string, n int) ([]*Message, error) { - return pull(ctx, sub, n, true) -} - -// PullWait pulls messages from the subscription. If there are not -// enough messages left in the subscription queue, it will block until -// at least n number of messages arrive or timeout occurs, and n could -// not be larger than 100. -func PullWait(ctx context.Context, sub string, n int) ([]*Message, error) { - return pull(ctx, sub, n, false) -} - -func pull(ctx context.Context, sub string, n int, retImmediately bool) ([]*Message, error) { - if n < 1 || n > batchLimit { - return nil, fmt.Errorf("pubsub: cannot pull less than one, more than %d messages, but %d was given", batchLimit, n) - } - resp, err := rawService(ctx).Projects.Subscriptions.Pull(fullSubName(internal.ProjID(ctx), sub), &raw.PullRequest{ - ReturnImmediately: retImmediately, - MaxMessages: int64(n), - }).Do() - if err != nil { - return nil, err - } - msgs := make([]*Message, len(resp.ReceivedMessages)) - for i := 0; i < len(resp.ReceivedMessages); i++ { - msg, err := toMessage(resp.ReceivedMessages[i]) - if err != nil { - return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, PullResponse: %+v", i, resp.ReceivedMessages[i]) - } - msgs[i] = msg - } - return msgs, nil -} - -// CreateTopic creates a new topic with the specified name on the backend. -// It will return an error if topic already exists. -func CreateTopic(ctx context.Context, name string) error { - _, err := rawService(ctx).Projects.Topics.Create(fullTopicName(internal.ProjID(ctx), name), &raw.Topic{}).Do() - return err -} - -// DeleteTopic deletes the specified topic. -func DeleteTopic(ctx context.Context, name string) error { - _, err := rawService(ctx).Projects.Topics.Delete(fullTopicName(internal.ProjID(ctx), name)).Do() - return err -} - -// TopicExists returns true if a topic exists with the specified name. -func TopicExists(ctx context.Context, name string) (bool, error) { - _, err := rawService(ctx).Projects.Topics.Get(fullTopicName(internal.ProjID(ctx), name)).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return false, nil - } - if err != nil { - return false, err - } - return true, nil -} - -// Publish publish messages to the topic's subscribers. It returns -// message IDs upon success. -func Publish(ctx context.Context, topic string, msgs ...*Message) ([]string, error) { - var rawMsgs []*raw.PubsubMessage - if len(msgs) == 0 { - return nil, errors.New("pubsub: no messages to publish") - } - if len(msgs) > batchLimit { - return nil, fmt.Errorf("pubsub: %d messages given, but maximum batch size is %d", len(msgs), batchLimit) - } - rawMsgs = make([]*raw.PubsubMessage, len(msgs)) - for i, msg := range msgs { - rawMsgs[i] = &raw.PubsubMessage{ - Data: base64.StdEncoding.EncodeToString(msg.Data), - Attributes: msg.Attributes, - } - } - resp, err := rawService(ctx).Projects.Topics.Publish(fullTopicName(internal.ProjID(ctx), topic), &raw.PublishRequest{ - Messages: rawMsgs, - }).Do() - if err != nil { - return nil, err - } - return resp.MessageIds, nil -} - -// fullSubName returns the fully qualified name for a subscription. -// E.g. /subscriptions/project-id/subscription-name. -func fullSubName(proj, name string) string { - return fmt.Sprintf("projects/%s/subscriptions/%s", proj, name) -} - -// fullTopicName returns the fully qualified name for a topic. -// E.g. /topics/project-id/topic-name. -func fullTopicName(proj, name string) string { - return fmt.Sprintf("projects/%s/topics/%s", proj, name) -} - -func isSec(dur time.Duration) bool { - return dur%time.Second == 0 -} - -func rawService(ctx context.Context) *raw.Service { - return internal.Service(ctx, "pubsub", func(hc *http.Client) interface{} { - svc, _ := raw.New(hc) - return svc - }).(*raw.Service) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub_test.go b/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub_test.go deleted file mode 100644 index df73f11f2b..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/pubsub/pubsub_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pubsub - -import ( - "net/http" - "strings" - "testing" - "time" - - "google.golang.org/cloud" -) - -func TestIsSec(t *testing.T) { - tests := map[time.Duration]bool{ - time.Second: true, - 5 * time.Second: true, - time.Hour: true, - time.Millisecond: false, - time.Second + time.Microsecond: false, - } - for dur, expected := range tests { - if isSec(dur) != expected { - t.Errorf("%v is more precise than a second", dur) - } - } -} - -func TestEmptyAckID(t *testing.T) { - ctx := cloud.NewContext("project-id", &http.Client{}) - id := []string{"test", ""} - err := Ack(ctx, "sub", id...) - - if err == nil || !strings.Contains(err.Error(), "index 1") { - t.Errorf("Ack should report an error indicating the id is empty. Got: %v", err) - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/acl.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/acl.go deleted file mode 100644 index 71c5800a86..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/acl.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" -) - -// ACLRole is the the access permission for the entity. -type ACLRole string - -const ( - RoleOwner ACLRole = "OWNER" - RoleReader ACLRole = "READER" -) - -// ACLEntity is an entity holding an ACL permission. -// -// It could be in the form of: -// "user-", "user-","group-", "group-", -// "domain-" and "project-team-". -// -// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. -type ACLEntity string - -const ( - AllUsers ACLEntity = "allUsers" - AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" -) - -// ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket. -// A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or -// more objects. An object is a blob of data that is stored in a bucket. -type ACLRule struct { - // Entity identifies the entity holding the current rule's permissions. - Entity ACLEntity - - // Role is the the access permission for the entity. - Role ACLRole -} - -// DefaultACL returns the default object ACL entries for the named bucket. -func DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) { - acls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Context(ctx).Do() - if err != nil { - return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", bucket, err) - } - r := make([]ACLRule, 0, len(acls.Items)) - for _, v := range acls.Items { - if m, ok := v.(map[string]interface{}); ok { - entity, ok1 := m["entity"].(string) - role, ok2 := m["role"].(string) - if ok1 && ok2 { - r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) - } - } - } - return r, nil -} - -// PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket. -func PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - _, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error updating default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// DeleteDefaultACLRule deletes the named default ACL entity for the named bucket. -func DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error { - err := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error deleting default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// BucketACL returns the ACL entries for the named bucket. -func BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) { - acls, err := rawService(ctx).BucketAccessControls.List(bucket).Context(ctx).Do() - if err != nil { - return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", bucket, err) - } - r := make([]ACLRule, len(acls.Items)) - for i, v := range acls.Items { - r[i].Entity = ACLEntity(v.Entity) - r[i].Role = ACLRole(v.Role) - } - return r, nil -} - -// PutBucketACLRule saves the named ACL entity with the provided role for the named bucket. -func PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { - acl := &raw.BucketAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - _, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error updating bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// DeleteBucketACLRule deletes the named ACL entity for the named bucket. -func DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error { - err := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error deleting bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// ACL returns the ACL entries for the named object. -func ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) { - acls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Context(ctx).Do() - if err != nil { - return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", bucket, object, err) - } - r := make([]ACLRule, 0, len(acls.Items)) - for _, v := range acls.Items { - if m, ok := v.(map[string]interface{}); ok { - entity, ok1 := m["entity"].(string) - role, ok2 := m["role"].(string) - if ok1 && ok2 { - r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) - } - } - } - return r, nil -} - -// PutACLRule saves the named ACL entity with the provided role for the named object. -func PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error { - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - _, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) - } - return nil -} - -// DeleteACLRule deletes the named ACL entity for the named object. -func DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error { - err := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) - } - return nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/example_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/example_test.go deleted file mode 100644 index 38c543021c..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/example_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage_test - -import ( - "io/ioutil" - "log" - "testing" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/cloud" - "google.golang.org/cloud/storage" -) - -// TODO(jbd): Remove after Go 1.4. -// Related to https://codereview.appspot.com/107320046 -func TestA(t *testing.T) {} - -func Example_auth() context.Context { - // Initialize an authorized context with Google Developers Console - // JSON key. Read the google package examples to learn more about - // different authorization flows you can use. - // http://godoc.org/golang.org/x/oauth2/google - jsonKey, err := ioutil.ReadFile("/path/to/json/keyfile.json") - if err != nil { - log.Fatal(err) - } - conf, err := google.JWTConfigFromJSON( - jsonKey, - storage.ScopeFullControl, - ) - if err != nil { - log.Fatal(err) - } - ctx := cloud.NewContext("project-id", conf.Client(oauth2.NoContext)) - // Use the context (see other examples) - return ctx -} - -func ExampleListObjects() { - ctx := Example_auth() - - var query *storage.Query - for { - // If you are using this package on App Engine Managed VMs runtime, - // you can init a bucket client with your app's default bucket name. - // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. - objects, err := storage.ListObjects(ctx, "bucketname", query) - if err != nil { - log.Fatal(err) - } - for _, obj := range objects.Results { - log.Printf("object name: %s, size: %v", obj.Name, obj.Size) - } - // if there are more results, objects.Next - // will be non-nil. - query = objects.Next - if query == nil { - break - } - } - - log.Println("paginated through all object items in the bucket you specified.") -} - -func ExampleNewReader() { - ctx := Example_auth() - - rc, err := storage.NewReader(ctx, "bucketname", "filename1") - if err != nil { - log.Fatal(err) - } - slurp, err := ioutil.ReadAll(rc) - rc.Close() - if err != nil { - log.Fatal(err) - } - - log.Println("file contents:", slurp) -} - -func ExampleNewWriter() { - ctx := Example_auth() - - wc := storage.NewWriter(ctx, "bucketname", "filename1") - wc.ContentType = "text/plain" - wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} - if _, err := wc.Write([]byte("hello world")); err != nil { - log.Fatal(err) - } - if err := wc.Close(); err != nil { - log.Fatal(err) - } - log.Println("updated object:", wc.Object()) -} - -func ExampleCopyObject() { - ctx := Example_auth() - - o, err := storage.CopyObject(ctx, "bucketname", "file1", "another-bucketname", "file2", nil) - if err != nil { - log.Fatal(err) - } - log.Println("copied file:", o) -} - -func ExampleDeleteObject() { - // To delete multiple objects in a bucket, first ListObjects then delete them. - ctx := Example_auth() - - // If you are using this package on App Engine Managed VMs runtime, - // you can init a bucket client with your app's default bucket name. - // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. - const bucket = "bucketname" - - var query *storage.Query // Set up query as desired. - for { - objects, err := storage.ListObjects(ctx, bucket, query) - if err != nil { - log.Fatal(err) - } - for _, obj := range objects.Results { - log.Printf("deleting object name: %q, size: %v", obj.Name, obj.Size) - if err := storage.DeleteObject(ctx, bucket, obj.Name); err != nil { - log.Fatalf("unable to delete %q: %v", obj.Name, err) - } - } - // if there are more results, objects.Next will be non-nil. - query = objects.Next - if query == nil { - break - } - } - - log.Println("deleted all object items in the bucket you specified.") -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/integration_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/integration_test.go deleted file mode 100644 index 9c691964f6..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/integration_test.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build integration - -package storage - -import ( - "bytes" - "crypto/md5" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "testing" - "time" - - "google.golang.org/cloud/internal" - "google.golang.org/cloud/internal/testutil" -) - -var ( - bucket string - contents = make(map[string][]byte) - objects = []string{"obj1", "obj2", "obj/with/slashes"} - aclObjects = []string{"acl1", "acl2"} - copyObj = "copy-object" -) - -const envBucket = "GCLOUD_TESTS_GOLANG_PROJECT_ID" - -func TestObjects(t *testing.T) { - ctx := testutil.Context(ScopeFullControl) - bucket = os.Getenv(envBucket) - - // Cleanup. - cleanup(t, "obj") - - const defaultType = "text/plain" - - // Test Writer. - for _, obj := range objects { - t.Logf("Writing %v", obj) - wc := NewWriter(ctx, bucket, obj) - wc.ContentType = defaultType - c := randomContents() - if _, err := wc.Write(c); err != nil { - t.Errorf("Write for %v failed with %v", obj, err) - } - if err := wc.Close(); err != nil { - t.Errorf("Close for %v failed with %v", obj, err) - } - contents[obj] = c - } - - // Test Reader. - for _, obj := range objects { - t.Logf("Creating a reader to read %v", obj) - rc, err := NewReader(ctx, bucket, obj) - if err != nil { - t.Errorf("Can't create a reader for %v, errored with %v", obj, err) - } - slurp, err := ioutil.ReadAll(rc) - if err != nil { - t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) - } - if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { - t.Errorf("Contents (%v) = %q; want %q", obj, got, want) - } - rc.Close() - - // Test SignedURL - opts := &SignedURLOptions{ - GoogleAccessID: "xxx@clientid", - PrivateKey: dummyKey("rsa"), - Method: "GET", - MD5: []byte("202cb962ac59075b964b07152d234b70"), - Expires: time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC), - ContentType: "application/json", - Headers: []string{"x-header1", "x-header2"}, - } - u, err := SignedURL(bucket, obj, opts) - if err != nil { - t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err) - } - hc := internal.HTTPClient(ctx) - res, err := hc.Get(u) - if err != nil { - t.Fatalf("Can't get URL %q: %v", u, err) - } - slurp, err = ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err) - } - if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { - t.Errorf("Contents (%v) = %q; want %q", obj, got, want) - } - res.Body.Close() - } - - // Test NotFound. - _, err := NewReader(ctx, bucket, "obj-not-exists") - if err != ErrObjectNotExist { - t.Errorf("Object should not exist, err found to be %v", err) - } - - name := objects[0] - - // Test StatObject. - o, err := StatObject(ctx, bucket, name) - if err != nil { - t.Error(err) - } - if got, want := o.Name, name; got != want { - t.Errorf("Name (%v) = %q; want %q", name, got, want) - } - if got, want := o.ContentType, defaultType; got != want { - t.Errorf("ContentType (%v) = %q; want %q", name, got, want) - } - - // Test object copy. - copy, err := CopyObject(ctx, bucket, name, bucket, copyObj, nil) - if err != nil { - t.Errorf("CopyObject failed with %v", err) - } - if copy.Name != copyObj { - t.Errorf("Copy object's name = %q; want %q", copy.Name, copyObj) - } - if copy.Bucket != bucket { - t.Errorf("Copy object's bucket = %q; want %q", copy.Bucket, bucket) - } - - // Test UpdateAttrs. - updated, err := UpdateAttrs(ctx, bucket, name, ObjectAttrs{ - ContentType: "text/html", - ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, - }) - if err != nil { - t.Errorf("UpdateAttrs failed with %v", err) - } - if want := "text/html"; updated.ContentType != want { - t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want) - } - - // Test checksums. - checksumCases := []struct { - name string - contents [][]byte - size int64 - md5 string - crc32c uint32 - }{ - { - name: "checksum-object", - contents: [][]byte{[]byte("hello"), []byte("world")}, - size: 10, - md5: "fc5e038d38a57032085441e7fe7010b0", - crc32c: 1456190592, - }, - { - name: "zero-object", - contents: [][]byte{}, - size: 0, - md5: "d41d8cd98f00b204e9800998ecf8427e", - crc32c: 0, - }, - } - for _, c := range checksumCases { - wc := NewWriter(ctx, bucket, c.name) - for _, data := range c.contents { - if _, err := wc.Write(data); err != nil { - t.Errorf("Write(%q) failed with %q", data, err) - } - } - if err = wc.Close(); err != nil { - t.Errorf("%q: close failed with %q", c.name, err) - } - obj := wc.Object() - if got, want := obj.Size, c.size; got != want { - t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) - } - if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { - t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) - } - if got, want := obj.CRC32C, c.crc32c; got != want { - t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) - } - } - - // Test public ACL. - publicObj := objects[0] - if err = PutACLRule(ctx, bucket, publicObj, AllUsers, RoleReader); err != nil { - t.Errorf("PutACLRule failed with %v", err) - } - publicCtx := testutil.NoAuthContext() - rc, err := NewReader(publicCtx, bucket, publicObj) - if err != nil { - t.Error(err) - } - slurp, err := ioutil.ReadAll(rc) - if err != nil { - t.Errorf("ReadAll failed with %v", err) - } - if string(slurp) != string(contents[publicObj]) { - t.Errorf("Public object's content is expected to be %s, found %s", contents[publicObj], slurp) - } - rc.Close() - - // Test writer error handling. - wc := NewWriter(publicCtx, bucket, publicObj) - if _, err := wc.Write([]byte("hello")); err != nil { - t.Errorf("Write unexpectedly failed with %v", err) - } - if err = wc.Close(); err == nil { - t.Error("Close expected an error, found none") - } - - // DeleteObject object. - // The rest of the other object will be deleted during - // the initial cleanup. This tests exists, so we still can cover - // deletion if there are no objects on the bucket to clean. - if err := DeleteObject(ctx, bucket, copyObj); err != nil { - t.Errorf("Deletion of %v failed with %v", copyObj, err) - } - _, err = StatObject(ctx, bucket, copyObj) - if err != ErrObjectNotExist { - t.Errorf("Copy is expected to be deleted, stat errored with %v", err) - } -} - -func TestACL(t *testing.T) { - ctx := testutil.Context(ScopeFullControl) - cleanup(t, "acl") - entity := ACLEntity("domain-google.com") - if err := PutDefaultACLRule(ctx, bucket, entity, RoleReader); err != nil { - t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) - } - for _, obj := range aclObjects { - t.Logf("Writing %v", obj) - wc := NewWriter(ctx, bucket, obj) - c := randomContents() - if _, err := wc.Write(c); err != nil { - t.Errorf("Write for %v failed with %v", obj, err) - } - if err := wc.Close(); err != nil { - t.Errorf("Close for %v failed with %v", obj, err) - } - } - name := aclObjects[0] - acl, err := ACL(ctx, bucket, name) - if err != nil { - t.Errorf("Can't retrieve ACL of %v", name) - } - aclFound := false - for _, rule := range acl { - if rule.Entity == entity && rule.Role == RoleReader { - aclFound = true - } - } - if !aclFound { - t.Error("Expected to find an ACL rule for google.com domain users, but not found") - } - if err := DeleteACLRule(ctx, bucket, name, entity); err != nil { - t.Errorf("Can't delete the ACL rule for the entity: %v", entity) - } - - if err := PutBucketACLRule(ctx, bucket, "user-jbd@google.com", RoleReader); err != nil { - t.Errorf("Error while putting bucket ACL rule: %v", err) - } - bACL, err := BucketACL(ctx, bucket) - if err != nil { - t.Errorf("Error while getting the ACL of the bucket: %v", err) - } - bACLFound := false - for _, rule := range bACL { - if rule.Entity == "user-jbd@google.com" && rule.Role == RoleReader { - bACLFound = true - } - } - if !bACLFound { - t.Error("Expected to find an ACL rule for jbd@google.com user, but not found") - } - if err := DeleteBucketACLRule(ctx, bucket, "user-jbd@google.com"); err != nil { - t.Errorf("Error while deleting bucket ACL rule: %v", err) - } -} - -func cleanup(t *testing.T, prefix string) { - ctx := testutil.Context(ScopeFullControl) - var q *Query = &Query{ - Prefix: prefix, - } - for { - o, err := ListObjects(ctx, bucket, q) - if err != nil { - t.Fatalf("Cleanup List for bucket %v failed with error: %v", bucket, err) - } - for _, obj := range o.Results { - t.Logf("Cleanup deletion of %v", obj.Name) - if err = DeleteObject(ctx, bucket, obj.Name); err != nil { - t.Fatalf("Cleanup Delete for object %v failed with %v", obj.Name, err) - } - } - if o.Next == nil { - break - } - q = o.Next - } -} - -func randomContents() []byte { - h := md5.New() - io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) - return h.Sum(nil) -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/storage.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/storage.go deleted file mode 100644 index 8aa70ff429..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/storage.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package storage contains a Google Cloud Storage client. -// -// This package is experimental and may make backwards-incompatible changes. -package storage // import "google.golang.org/cloud/storage" - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "google.golang.org/cloud/internal" - - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" -) - -var ( - ErrBucketNotExist = errors.New("storage: bucket doesn't exist") - ErrObjectNotExist = errors.New("storage: object doesn't exist") -) - -const ( - // ScopeFullControl grants permissions to manage your - // data and permissions in Google Cloud Storage. - ScopeFullControl = raw.DevstorageFullControlScope - - // ScopeReadOnly grants permissions to - // view your data in Google Cloud Storage. - ScopeReadOnly = raw.DevstorageReadOnlyScope - - // ScopeReadWrite grants permissions to manage your - // data in Google Cloud Storage. - ScopeReadWrite = raw.DevstorageReadWriteScope -) - -// TODO(jbd): Add storage.buckets.list. -// TODO(jbd): Add storage.buckets.insert. -// TODO(jbd): Add storage.buckets.update. -// TODO(jbd): Add storage.buckets.delete. - -// TODO(jbd): Add storage.objects.watch. - -// BucketInfo returns the metadata for the specified bucket. -func BucketInfo(ctx context.Context, name string) (*Bucket, error) { - resp, err := rawService(ctx).Buckets.Get(name).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp), nil -} - -// ListObjects lists objects from the bucket. You can specify a query -// to filter the results. If q is nil, no filtering is applied. -func ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) { - c := rawService(ctx).Objects.List(bucket) - c.Projection("full") - if q != nil { - c.Delimiter(q.Delimiter) - c.Prefix(q.Prefix) - c.Versions(q.Versions) - c.PageToken(q.Cursor) - if q.MaxResults > 0 { - c.MaxResults(int64(q.MaxResults)) - } - } - resp, err := c.Context(ctx).Do() - if err != nil { - return nil, err - } - objects := &Objects{ - Results: make([]*Object, len(resp.Items)), - Prefixes: make([]string, len(resp.Prefixes)), - } - for i, item := range resp.Items { - objects.Results[i] = newObject(item) - } - for i, prefix := range resp.Prefixes { - objects.Prefixes[i] = prefix - } - if resp.NextPageToken != "" { - next := Query{} - if q != nil { - // keep the other filtering - // criteria if there is a query - next = *q - } - next.Cursor = resp.NextPageToken - objects.Next = &next - } - return objects, nil -} - -// SignedURLOptions allows you to restrict the access to the signed URL. -type SignedURLOptions struct { - // GoogleAccessID represents the authorizer of the signed URL generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Required. - PrivateKey []byte - - // Method is the HTTP method to be used with the signed URL. - // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. - // Required. - Method string - - // Expires is the expiration time on the signed URL. It must be - // a datetime in the future. - // Required. - Expires time.Time - - // ContentType is the content type header the client must provide - // to use the generated signed URL. - // Optional. - ContentType string - - // Headers is a list of extention headers the client must provide - // in order to use the generated signed URL. - // Optional. - Headers []string - - // MD5 is the base64 encoded MD5 checksum of the file. - // If provided, the client should provide the exact value on the request - // header in order to use the signed URL. - // Optional. - MD5 []byte -} - -// SignedURL returns a URL for the specified object. Signed URLs allow -// the users access to a restricted resource for a limited time without having a -// Google account or signing in. For more information about the signed -// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. -func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { - if opts == nil { - return "", errors.New("storage: missing required SignedURLOptions") - } - if opts.GoogleAccessID == "" || opts.PrivateKey == nil { - return "", errors.New("storage: missing required credentials to generate a signed URL") - } - if opts.Method == "" { - return "", errors.New("storage: missing required method option") - } - if opts.Expires.IsZero() { - return "", errors.New("storage: missing required expires option") - } - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - h := sha256.New() - fmt.Fprintf(h, "%s\n", opts.Method) - fmt.Fprintf(h, "%s\n", opts.MD5) - fmt.Fprintf(h, "%s\n", opts.ContentType) - fmt.Fprintf(h, "%d\n", opts.Expires.Unix()) - fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n")) - fmt.Fprintf(h, "/%s/%s", bucket, name) - b, err := rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - h.Sum(nil), - ) - if err != nil { - return "", err - } - encoded := base64.StdEncoding.EncodeToString(b) - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - q := u.Query() - q.Set("GoogleAccessId", opts.GoogleAccessID) - q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) - q.Set("Signature", string(encoded)) - u.RawQuery = q.Encode() - return u.String(), nil -} - -// StatObject returns meta information about the specified object. -func StatObject(ctx context.Context, bucket, name string) (*Object, error) { - o, err := rawService(ctx).Objects.Get(bucket, name).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(o), nil -} - -// UpdateAttrs updates an object with the provided attributes. -// All zero-value attributes are ignored. -func UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) { - o, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(o), nil -} - -// DeleteObject deletes the single specified object. -func DeleteObject(ctx context.Context, bucket, name string) error { - return rawService(ctx).Objects.Delete(bucket, name).Context(ctx).Do() -} - -// CopyObject copies the source object to the destination. -// The copied object's attributes are overwritten by attrs if non-nil. -func CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) { - if srcBucket == "" || destBucket == "" { - return nil, errors.New("storage: srcBucket and destBucket must both be non-empty") - } - if srcName == "" || destName == "" { - return nil, errors.New("storage: srcName and destName must be non-empty") - } - var rawObject *raw.Object - if attrs != nil { - attrs.Name = destName - if attrs.ContentType == "" { - return nil, errors.New("storage: attrs.ContentType must be non-empty") - } - rawObject = attrs.toRawObject(destBucket) - } - o, err := rawService(ctx).Objects.Copy( - srcBucket, srcName, destBucket, destName, rawObject).Projection("full").Context(ctx).Do() - if err != nil { - return nil, err - } - return newObject(o), nil -} - -// NewReader creates a new io.ReadCloser to read the contents -// of the object. -func NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) { - hc := internal.HTTPClient(ctx) - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - res, err := hc.Get(u.String()) - if err != nil { - return nil, err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - res.Body.Close() - return res.Body, fmt.Errorf("storage: can't read object %v/%v, status code: %v", bucket, name, res.Status) - } - return res.Body, nil -} - -// NewWriter returns a storage Writer that writes to the GCS object -// identified by the specified name. -// If such an object doesn't exist, it creates one. -// Attributes can be set on the object by modifying the returned Writer's -// ObjectAttrs field before the first call to Write. The name parameter to this -// function is ignored if the Name field of the ObjectAttrs field is set to a -// non-empty string. -// -// It is the caller's responsibility to call Close when writing is done. -// -// The object is not available and any previous object with the same -// name is not replaced on Cloud Storage until Close is called. -func NewWriter(ctx context.Context, bucket, name string) *Writer { - return &Writer{ - ctx: ctx, - bucket: bucket, - name: name, - donec: make(chan struct{}), - } -} - -func rawService(ctx context.Context) *raw.Service { - return internal.Service(ctx, "storage", func(hc *http.Client) interface{} { - svc, _ := raw.New(hc) - return svc - }).(*raw.Service) -} - -// parseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func parseKey(key []byte) (*rsa.PrivateKey, error) { - if block, _ := pem.Decode(key); block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, err - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("oauth2: private key is invalid") - } - return parsed, nil -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/storage_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/storage_test.go deleted file mode 100644 index 4d239e5c72..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/storage_test.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - "io/ioutil" - "log" - "strings" - "testing" - "time" - - "golang.org/x/net/context" -) - -func TestSignedURL(t *testing.T) { - expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") - url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ - GoogleAccessID: "xxx@clientid", - PrivateKey: dummyKey("rsa"), - Method: "GET", - MD5: []byte("202cb962ac59075b964b07152d234b70"), - Expires: expires, - ContentType: "application/json", - Headers: []string{"x-header1", "x-header2"}, - }) - if err != nil { - t.Error(err) - } - want := "https://storage.googleapis.com/bucket-name/object-name?" + - "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + - "ITqNWQHr7ayIj%2B0Ds5%2FzUT2cWMQQouuFmu6L11Zd3kfNKvm3sjyGIzO" + - "gZsSUoter1SxP7BcrCzgqIZ9fQmgQnuIpqqLL4kcGmTbKsQS6hTknpJM%2F" + - "2lS4NY6UH1VXBgm2Tce28kz8rnmqG6svcGvtWuOgJsETeSIl1R9nAEIDCEq" + - "ZJzoOiru%2BODkHHkpoFjHWAwHugFHX%2B9EX4SxaytiN3oEy48HpYGWV0I" + - "h8NvU1hmeWzcLr41GnTADeCn7Eg%2Fb5H2GCNO70Cz%2Bw2fn%2BofLCUeR" + - "YQd%2FhES8oocv5kpHZkstc8s8uz3aKMsMauzZ9MOmGy%2F6VULBgIVvi6a" + - "AwEBIYOw%3D%3D" - if url != want { - t.Fatalf("Unexpected signed URL; found %v", url) - } -} - -func TestSignedURL_PEMPrivateKey(t *testing.T) { - expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") - url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ - GoogleAccessID: "xxx@clientid", - PrivateKey: dummyKey("pem"), - Method: "GET", - MD5: []byte("202cb962ac59075b964b07152d234b70"), - Expires: expires, - ContentType: "application/json", - Headers: []string{"x-header1", "x-header2"}, - }) - if err != nil { - t.Error(err) - } - want := "https://storage.googleapis.com/bucket-name/object-name?" + - "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + - "B7XkS4dfmVDoe%2FoDeXZkWlYmg8u2kI0SizTrzL5%2B9RmKnb5j7Kf34DZ" + - "JL8Hcjr1MdPFLNg2QV4lEH86Gqgqt%2Fv3jFOTRl4wlzcRU%2FvV5c5HU8M" + - "qW0FZ0IDbqod2RdsMONLEO6yQWV2HWFrMLKl2yMFlWCJ47et%2BFaHe6v4Z" + - "EBc0%3D" - if url != want { - t.Fatalf("Unexpected signed URL; found %v", url) - } -} - -func TestSignedURL_MissingOptions(t *testing.T) { - pk := dummyKey("rsa") - var tests = []struct { - opts *SignedURLOptions - errMsg string - }{ - { - &SignedURLOptions{}, - "missing required credentials", - }, - { - &SignedURLOptions{GoogleAccessID: "access_id"}, - "missing required credentials", - }, - { - &SignedURLOptions{ - GoogleAccessID: "access_id", - PrivateKey: pk, - }, - "missing required method", - }, - { - &SignedURLOptions{ - GoogleAccessID: "access_id", - PrivateKey: pk, - Method: "PUT", - }, - "missing required expires", - }, - } - for _, test := range tests { - _, err := SignedURL("bucket", "name", test.opts) - if !strings.Contains(err.Error(), test.errMsg) { - t.Errorf("expected err: %v, found: %v", test.errMsg, err) - } - } -} - -func dummyKey(kind string) []byte { - slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) - if err != nil { - log.Fatal(err) - } - return slurp -} - -func TestCopyObjectMissingFields(t *testing.T) { - var tests = []struct { - srcBucket, srcName, destBucket, destName string - errMsg string - }{ - { - "mybucket", "", "mybucket", "destname", - "srcName and destName must be non-empty", - }, - { - "mybucket", "srcname", "mybucket", "", - "srcName and destName must be non-empty", - }, - { - "", "srcfile", "mybucket", "destname", - "srcBucket and destBucket must both be non-empty", - }, - { - "mybucket", "srcfile", "", "destname", - "srcBucket and destBucket must both be non-empty", - }, - } - for i, test := range tests { - _, err := CopyObject(context.TODO(), test.srcBucket, test.srcName, test.destBucket, test.destName, nil) - if !strings.Contains(err.Error(), test.errMsg) { - t.Errorf("CopyObject test #%v: err = %v, want %v", i, err, test.errMsg) - } - } -} - -func TestObjectNames(t *testing.T) { - // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming - const maxLegalLength = 1024 - - type testT struct { - name, want string - } - tests := []testT{ - // Embedded characters important in URLs. - {"foo % bar", "foo%20%25%20bar"}, - {"foo ? bar", "foo%20%3F%20bar"}, - {"foo / bar", "foo%20/%20bar"}, - {"foo %?/ bar", "foo%20%25%3F/%20bar"}, - - // Non-Roman scripts - {"타코", "%ED%83%80%EC%BD%94"}, - {"δΈ–η•Œ", "%E4%B8%96%E7%95%8C"}, - - // Longest legal name - {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, - - // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode - {"foo \u000b bar", "foo%20%0B%20bar"}, - {"foo \u000c bar", "foo%20%0C%20bar"}, - {"foo \u0085 bar", "foo%20%C2%85%20bar"}, - {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, - {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, - - // Null byte. - {"foo \u0000 bar", "foo%20%00%20bar"}, - - // Non-control characters that are discouraged, but not forbidden, according to the documentation. - {"foo # bar", "foo%20%23%20bar"}, - {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, - - // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ - {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, - {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, - {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, - - // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) - {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, - {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, - {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, - } - - // C0 control characters not forbidden by the docs. - var runes []rune - for r := rune(0x01); r <= rune(0x1f); r++ { - if r != '\u000a' && r != '\u000d' { - runes = append(runes, r) - } - } - tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) - - // C1 control characters, plus DEL. - runes = nil - for r := rune(0x7f); r <= rune(0x9f); r++ { - runes = append(runes, r) - } - tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) - - opts := &SignedURLOptions{ - GoogleAccessID: "xxx@clientid", - PrivateKey: dummyKey("rsa"), - Method: "GET", - MD5: []byte("202cb962ac59075b964b07152d234b70"), - Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), - ContentType: "application/json", - Headers: []string{"x-header1", "x-header2"}, - } - - for _, test := range tests { - g, err := SignedURL("bucket-name", test.name, opts) - if err != nil { - t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) - } - if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { - t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_pem b/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_pem deleted file mode 100644 index 3428d4497c..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_pem +++ /dev/null @@ -1,39 +0,0 @@ -Bag Attributes - friendlyName: privatekey - localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 -Key Attributes: ------BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx -64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY -kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB -AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs -KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ -7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i -Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ -5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G -ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ -EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS -NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI -/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 -TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C ------END RSA PRIVATE KEY----- -Bag Attributes - friendlyName: privatekey - localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 -subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com -issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com ------BEGIN CERTIFICATE----- -MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE -AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex -MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y -NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 -czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j -b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU -ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp -gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx -6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ -BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB -ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T -lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv -qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_rsa b/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_rsa deleted file mode 100644 index 4ce6678dbd..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/testdata/dummy_rsa +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE -DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY -fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK -1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr -k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 -/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt -3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn -2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 -nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK -6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf -5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e -DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 -M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g -z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y -1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK -J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U -f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx -QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA -cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr -Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw -5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg -KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 -OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd -mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ -5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/types.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/types.go deleted file mode 100644 index 060deb6ad7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/types.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "encoding/base64" - "io" - "sync" - "time" - - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" -) - -// Bucket represents a Google Cloud Storage bucket. -type Bucket struct { - // Name is the name of the bucket. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // Location is the location of the bucket. It defaults to "US". - Location string - - // Metageneration is the metadata generation of the bucket. - // Read-only. - Metageneration int64 - - // StorageClass is the storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". - StorageClass string - - // Created is the creation time of the bucket. - // Read-only. - Created time.Time -} - -func newBucket(b *raw.Bucket) *Bucket { - if b == nil { - return nil - } - bucket := &Bucket{ - Name: b.Name, - Location: b.Location, - Metageneration: b.Metageneration, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - } - acl := make([]ACLRule, len(b.Acl)) - for i, rule := range b.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.ACL = acl - objACL := make([]ACLRule, len(b.DefaultObjectAcl)) - for i, rule := range b.DefaultObjectAcl { - objACL[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.DefaultObjectACL = objACL - return bucket -} - -// ObjectAttrs is the user-editable object attributes. -type ObjectAttrs struct { - // Name is the name of the object. - Name string - - // ContentType is the MIME type of the object's content. - // Optional. - ContentType string - - // ContentLanguage is the optional RFC 1766 Content-Language of - // the object's content sent in response headers. - ContentLanguage string - - // ContentEncoding is the optional Content-Encoding of the object - // sent it the response headers. - ContentEncoding string - - // CacheControl is the optional Cache-Control header of the object - // sent in the response headers. - CacheControl string - - // ContentDisposition is the optional Content-Disposition header of the object - // sent in the response headers. - ContentDisposition string - - // ACL is the list of access control rules for the object. - // Optional. If nil or empty, existing ACL rules are preserved. - ACL []ACLRule - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if the current metadata values needs to preserved. - Metadata map[string]string -} - -func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { - var acl []*raw.ObjectAccessControl - if len(o.ACL) > 0 { - acl = make([]*raw.ObjectAccessControl, len(o.ACL)) - for i, rule := range o.ACL { - acl[i] = &raw.ObjectAccessControl{ - Entity: string(rule.Entity), - Role: string(rule.Role), - } - } - } - return &raw.Object{ - Bucket: bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - Acl: acl, - Metadata: o.Metadata, - } -} - -// Object represents a Google Cloud Storage (GCS) object. -type Object struct { - // Bucket is the name of the bucket containing this GCS object. - Bucket string - - // Name is the name of the object within the bucket. - Name string - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentLanguage is the content language of the object's content. - ContentLanguage string - - // CacheControl is the Cache-Control header to be sent in the response - // headers when serving the object data. - CacheControl string - - // ACL is the list of access control rules for the object. - ACL []ACLRule - - // Owner is the owner of the object. - // - // If non-zero, it is in the form of "user-". - Owner string - - // Size is the length of the object's content. - Size int64 - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // MD5 is the MD5 hash of the object's content. - MD5 []byte - - // CRC32C is the CRC32 checksum of the object's content using - // the Castagnoli93 polynomial. - CRC32C uint32 - - // MediaLink is an URL to the object's content. - MediaLink string - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if no metadata is provided. - Metadata map[string]string - - // Generation is the generation number of the object's content. - Generation int64 - - // MetaGeneration is the version of the metadata for this - // object at this generation. This field is used for preconditions - // and for detecting changes in metadata. A metageneration number - // is only meaningful in the context of a particular generation - // of a particular object. - MetaGeneration int64 - - // StorageClass is the storage class of the bucket. - // This value defines how objects in the bucket are stored and - // determines the SLA and the cost of storage. Typical values are - // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY". - // It defaults to "STANDARD". - StorageClass string - - // Deleted is the time the object was deleted. - // If not deleted, it is the zero value. - Deleted time.Time - - // Updated is the creation or modification time of the object. - // For buckets with versioning enabled, changing an object's - // metadata does not change this property. - Updated time.Time -} - -// convertTime converts a time in RFC3339 format to time.Time. -// If any error occurs in parsing, the zero-value time.Time is silently returned. -func convertTime(t string) time.Time { - var r time.Time - if t != "" { - r, _ = time.Parse(time.RFC3339, t) - } - return r -} - -func newObject(o *raw.Object) *Object { - if o == nil { - return nil - } - acl := make([]ACLRule, len(o.Acl)) - for i, rule := range o.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - owner := "" - if o.Owner != nil { - owner = o.Owner.Entity - } - md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) - var crc32c uint32 - d, err := base64.StdEncoding.DecodeString(o.Crc32c) - if err == nil && len(d) == 4 { - crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) - } - return &Object{ - Bucket: o.Bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ACL: acl, - Owner: owner, - ContentEncoding: o.ContentEncoding, - Size: int64(o.Size), - MD5: md5, - CRC32C: crc32c, - MediaLink: o.MediaLink, - Metadata: o.Metadata, - Generation: o.Generation, - MetaGeneration: o.Metageneration, - StorageClass: o.StorageClass, - Deleted: convertTime(o.TimeDeleted), - Updated: convertTime(o.Updated), - } -} - -// Query represents a query to filter objects from a bucket. -type Query struct { - // Delimiter returns results in a directory-like fashion. - // Results will contain only objects whose names, aside from the - // prefix, do not contain delimiter. Objects whose names, - // aside from the prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in prefixes. - // Duplicate prefixes are omitted. - // Optional. - Delimiter string - - // Prefix is the prefix filter to query objects - // whose names begin with this prefix. - // Optional. - Prefix string - - // Versions indicates whether multiple versions of the same - // object will be included in the results. - Versions bool - - // Cursor is a previously-returned page token - // representing part of the larger set of results to view. - // Optional. - Cursor string - - // MaxResults is the maximum number of items plus prefixes - // to return. As duplicate prefixes are omitted, - // fewer total results may be returned than requested. - // The default page limit is used if it is negative or zero. - MaxResults int -} - -// Objects represents a list of objects returned from -// a bucket look-p request and a query to retrieve more -// objects from the next pages. -type Objects struct { - // Results represent a list of object results. - Results []*Object - - // Next is the continuation query to retrieve more - // results with the same filtering criteria. If there - // are no more results to retrieve, it is nil. - Next *Query - - // Prefixes represents prefixes of objects - // matching-but-not-listed up to and including - // the requested delimiter. - Prefixes []string -} - -// contentTyper implements ContentTyper to enable an -// io.ReadCloser to specify its MIME type. -type contentTyper struct { - io.Reader - t string -} - -func (c *contentTyper) ContentType() string { - return c.t -} - -// A Writer writes a Cloud Storage object. -type Writer struct { - // ObjectAttrs are optional attributes to set on the object. Any attributes - // must be initialized before the first Write call. Nil or zero-valued - // attributes are ignored. - ObjectAttrs - - ctx context.Context - bucket string - name string - - once sync.Once - - opened bool - r io.Reader - pw *io.PipeWriter - - donec chan struct{} // closed after err and obj are set. - err error - obj *Object -} - -func (w *Writer) open() { - attrs := w.ObjectAttrs - // Always set the name, otherwise the backend - // rejects the request and responds with an HTTP 400. - if attrs.Name == "" { - attrs.Name = w.name - } - pr, pw := io.Pipe() - w.r = &contentTyper{pr, attrs.ContentType} - w.pw = pw - w.opened = true - - go func() { - resp, err := rawService(w.ctx).Objects.Insert( - w.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection("full").Context(w.ctx).Do() - w.err = err - if err == nil { - w.obj = newObject(resp) - } else { - pr.CloseWithError(w.err) - } - close(w.donec) - }() -} - -// Write appends to w. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.err != nil { - return 0, w.err - } - if !w.opened { - w.open() - } - return w.pw.Write(p) -} - -// Close completes the write operation and flushes any buffered data. -// If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Object. -func (w *Writer) Close() error { - if !w.opened { - w.open() - } - if err := w.pw.Close(); err != nil { - return err - } - <-w.donec - return w.err -} - -// CloseWithError aborts the write operation with the provided error. -// CloseWithError always returns nil. -func (w *Writer) CloseWithError(err error) error { - if !w.opened { - return nil - } - return w.pw.CloseWithError(err) -} - -// Object returns metadata about a successfully-written object. -// It's only valid to call it after Close returns nil. -func (w *Writer) Object() *Object { - return w.obj -} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/storage/types_test.go b/Godeps/_workspace/src/google.golang.org/cloud/storage/types_test.go deleted file mode 100644 index 02b71314c7..0000000000 --- a/Godeps/_workspace/src/google.golang.org/cloud/storage/types_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - "net/http" - "testing" - - "google.golang.org/cloud" -) - -type fakeTransport struct{} - -func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return nil, fmt.Errorf("error handling request") -} - -func TestErrorOnObjectsInsertCall(t *testing.T) { - ctx := cloud.NewContext("project-id", &http.Client{ - Transport: &fakeTransport{}}) - wc := NewWriter(ctx, "bucketname", "filename1") - wc.ContentType = "text/plain" - if _, err := wc.Write([]byte("hello world")); err == nil { - t.Errorf("expected error on write, got nil") - } - if err := wc.Close(); err == nil { - t.Errorf("expected error on close, got nil") - } -}