Updating Godeps (engine-api and go-connections)

Signed-off-by: Nishant Totla <nishanttotla@gmail.com>
This commit is contained in:
Nishant Totla 2016-02-23 16:06:30 -08:00
parent d4cfba1844
commit 7376fd156b
106 changed files with 7985 additions and 105 deletions

35
Godeps/Godeps.json generated
View File

@ -2,6 +2,10 @@
"ImportPath": "github.com/docker/swarm",
"GoVersion": "go1.5",
"Deps": [
{
"ImportPath": "github.com/Microsoft/go-winio",
"Rev": "c40bf24f405ab3cc8e1383542d474e813332de6d"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.8.7-53-g446d1c1",
@ -66,20 +70,30 @@
"Comment": "v1.4.1-10581-gb65fd8e",
"Rev": "b65fd8e879545e8c9b859ea9b6b825ac50c79e46"
},
{
"ImportPath": "github.com/docker/engine-api/client",
"Comment": "v0.2.2-99-gadfa32f",
"Rev": "adfa32fb6ccad724d73a16d53ce3defa46df27e5"
},
{
"ImportPath": "github.com/docker/engine-api/types",
"Comment": "v0.2.2-27-gbdbab71",
"Rev": "bdbab71ec21209ef56dffdbe42c9d21843c30862"
"Comment": "v0.2.2-99-gadfa32f",
"Rev": "adfa32fb6ccad724d73a16d53ce3defa46df27e5"
},
{
"ImportPath": "github.com/docker/go-connections/nat",
"Comment": "v0.1.2",
"Rev": "4e42727957c146776e5de9cec8c39e4059ed9f20"
"Comment": "v0.2.0",
"Rev": "34b5052da6b11e27f5f2e357b38b571ddddd3928"
},
{
"ImportPath": "github.com/docker/go-connections/sockets",
"Comment": "v0.2.0",
"Rev": "34b5052da6b11e27f5f2e357b38b571ddddd3928"
},
{
"ImportPath": "github.com/docker/go-connections/tlsconfig",
"Comment": "v0.1.2",
"Rev": "4e42727957c146776e5de9cec8c39e4059ed9f20"
"Comment": "v0.2.0",
"Rev": "34b5052da6b11e27f5f2e357b38b571ddddd3928"
},
{
"ImportPath": "github.com/docker/go-units",
@ -161,6 +175,11 @@
"Comment": "v0.0.2-27-g657252e",
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
},
{
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
"Comment": "v0.0.8-45-g2c31154",
"Rev": "2c3115481ee1782ad687a9e0b4834f89533c2acf"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "dee7705ef7b324f27ceb85a121c61f2c2e8ce988"
@ -203,6 +222,10 @@
"ImportPath": "golang.org/x/net/context",
"Rev": "4fd4a9fed55e5bdee4a89d6406c2eabe38b60300"
},
{
"ImportPath": "golang.org/x/net/proxy",
"Rev": "4fd4a9fed55e5bdee4a89d6406c2eabe38b60300"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"

View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,15 @@
# go-winio
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and
for using named pipes as a net transport.
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
newer operating systems. This is similar to the implementation of network sockets in Go's net
package.
Please see the LICENSE file for licensing information.
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.

View File

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
Xattrs map[string]string
Winheaders map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxWindows = "MSWINDOWS."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}

View File

@ -0,0 +1,996 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - pax extensions
import (
"bytes"
"errors"
"io"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
"time"
)
var (
ErrHeader = errors.New("archive/tar: invalid tar header")
)
const maxNanoSecondIntSize = 9
// A Reader provides sequential access to the contents of a tar archive.
// A tar archive consists of a sequence of files.
// The Next method advances to the next file in the archive (including the first),
// and then it can be treated as an io.Reader to access the file's data.
type Reader struct {
r io.Reader
err error
pad int64 // amount of padding (ignored) after current file entry
curr numBytesReader // reader for current file entry
hdrBuff [blockSize]byte // buffer to use in readHeader
}
type parser struct {
err error // Last error seen
}
// A numBytesReader is an io.Reader with a numBytes method, returning the number
// of bytes remaining in the underlying encoded data.
type numBytesReader interface {
io.Reader
numBytes() int64
}
// A regFileReader is a numBytesReader for reading file data from a tar archive.
type regFileReader struct {
r io.Reader // underlying reader
nb int64 // number of unread bytes for current file entry
}
// A sparseFileReader is a numBytesReader for reading sparse file data from a
// tar archive.
type sparseFileReader struct {
rfr numBytesReader // Reads the sparse-encoded file data
sp []sparseEntry // The sparse map for the file
pos int64 // Keeps track of file position
total int64 // Total size of the file
}
// A sparseEntry holds a single entry in a sparse file's sparse map.
//
// Sparse files are represented using a series of sparseEntrys.
// Despite the name, a sparseEntry represents an actual data fragment that
// references data found in the underlying archive stream. All regions not
// covered by a sparseEntry are logically filled with zeros.
//
// For example, if the underlying raw file contains the 10-byte data:
// var compactData = "abcdefgh"
//
// And the sparse map has the following entries:
// var sp = []sparseEntry{
// {offset: 2, numBytes: 5} // Data fragment for [2..7]
// {offset: 18, numBytes: 3} // Data fragment for [18..21]
// }
//
// Then the content of the resulting sparse file with a "real" size of 25 is:
// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
type sparseEntry struct {
offset int64 // Starting position of the fragment
numBytes int64 // Length of the fragment
}
// Keywords for GNU sparse files in a PAX extended header
const (
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
paxGNUSparseOffset = "GNU.sparse.offset"
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
paxGNUSparseMap = "GNU.sparse.map"
paxGNUSparseName = "GNU.sparse.name"
paxGNUSparseMajor = "GNU.sparse.major"
paxGNUSparseMinor = "GNU.sparse.minor"
paxGNUSparseSize = "GNU.sparse.size"
paxGNUSparseRealSize = "GNU.sparse.realsize"
)
// Keywords for old GNU sparse headers
const (
oldGNUSparseMainHeaderOffset = 386
oldGNUSparseMainHeaderIsExtendedOffset = 482
oldGNUSparseMainHeaderNumEntries = 4
oldGNUSparseExtendedHeaderIsExtendedOffset = 504
oldGNUSparseExtendedHeaderNumEntries = 21
oldGNUSparseOffsetSize = 12
oldGNUSparseNumBytesSize = 12
)
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
// Next advances to the next entry in the tar archive.
//
// io.EOF is returned at the end of the input.
func (tr *Reader) Next() (*Header, error) {
if tr.err != nil {
return nil, tr.err
}
var hdr *Header
var extHdrs map[string]string
// Externally, Next iterates through the tar archive as if it is a series of
// files. Internally, the tar format often uses fake "files" to add meta
// data that describes the next file. These meta data "files" should not
// normally be visible to the outside. As such, this loop iterates through
// one or more "header files" until it finds a "normal file".
loop:
for {
tr.err = tr.skipUnread()
if tr.err != nil {
return nil, tr.err
}
hdr = tr.readHeader()
if tr.err != nil {
return nil, tr.err
}
// Check for PAX/GNU special headers and files.
switch hdr.Typeflag {
case TypeXHeader:
extHdrs, tr.err = parsePAX(tr)
if tr.err != nil {
return nil, tr.err
}
continue loop // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink:
var realname []byte
realname, tr.err = ioutil.ReadAll(tr)
if tr.err != nil {
return nil, tr.err
}
// Convert GNU extensions to use PAX headers.
if extHdrs == nil {
extHdrs = make(map[string]string)
}
var p parser
switch hdr.Typeflag {
case TypeGNULongName:
extHdrs[paxPath] = p.parseString(realname)
case TypeGNULongLink:
extHdrs[paxLinkpath] = p.parseString(realname)
}
if p.err != nil {
tr.err = p.err
return nil, tr.err
}
continue loop // This is a meta header affecting the next header
default:
mergePAX(hdr, extHdrs)
// Check for a PAX format sparse file
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
if err != nil {
tr.err = err
return nil, err
}
if sp != nil {
// Current file is a PAX format GNU sparse file.
// Set the current file reader to a sparse file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil, tr.err
}
}
break loop // This is a file, so stop
}
}
return hdr, nil
}
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
// be treated as a regular file.
func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
var sparseFormat string
// Check for sparse format indicators
major, majorOk := headers[paxGNUSparseMajor]
minor, minorOk := headers[paxGNUSparseMinor]
sparseName, sparseNameOk := headers[paxGNUSparseName]
_, sparseMapOk := headers[paxGNUSparseMap]
sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
// Identify which, if any, sparse format applies from which PAX headers are set
if majorOk && minorOk {
sparseFormat = major + "." + minor
} else if sparseNameOk && sparseMapOk {
sparseFormat = "0.1"
} else if sparseSizeOk {
sparseFormat = "0.0"
} else {
// Not a PAX format GNU sparse file.
return nil, nil
}
// Check for unknown sparse format
if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
return nil, nil
}
// Update hdr from GNU sparse PAX headers
if sparseNameOk {
hdr.Name = sparseName
}
if sparseSizeOk {
realSize, err := strconv.ParseInt(sparseSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
} else if sparseRealSizeOk {
realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
}
// Set up the sparse map, according to the particular sparse format in use
var sp []sparseEntry
var err error
switch sparseFormat {
case "0.0", "0.1":
sp, err = readGNUSparseMap0x1(headers)
case "1.0":
sp, err = readGNUSparseMap1x0(tr.curr)
}
return sp, err
}
// mergePAX merges well known headers according to PAX standard.
// In general headers with the same name as those found
// in the header struct overwrite those found in the header
// struct with higher precision or longer values. Esp. useful
// for name and linkname fields.
func mergePAX(hdr *Header, headers map[string]string) error {
for k, v := range headers {
switch k {
case paxPath:
hdr.Name = v
case paxLinkpath:
hdr.Linkname = v
case paxGname:
hdr.Gname = v
case paxUname:
hdr.Uname = v
case paxUid:
uid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Uid = int(uid)
case paxGid:
gid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Gid = int(gid)
case paxAtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.AccessTime = t
case paxMtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ModTime = t
case paxCtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ChangeTime = t
case paxSize:
size, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Size = int64(size)
default:
if strings.HasPrefix(k, paxXattr) {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
}
hdr.Xattrs[k[len(paxXattr):]] = v
} else if strings.HasPrefix(k, paxWindows) {
if hdr.Winheaders == nil {
hdr.Winheaders = make(map[string]string)
}
hdr.Winheaders[k[len(paxWindows):]] = v
}
}
}
return nil
}
// parsePAXTime takes a string of the form %d.%d as described in
// the PAX specification.
func parsePAXTime(t string) (time.Time, error) {
buf := []byte(t)
pos := bytes.IndexByte(buf, '.')
var seconds, nanoseconds int64
var err error
if pos == -1 {
seconds, err = strconv.ParseInt(t, 10, 0)
if err != nil {
return time.Time{}, err
}
} else {
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
if err != nil {
return time.Time{}, err
}
nano_buf := string(buf[pos+1:])
// Pad as needed before converting to a decimal.
// For example .030 -> .030000000 -> 30000000 nanoseconds
if len(nano_buf) < maxNanoSecondIntSize {
// Right pad
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
} else if len(nano_buf) > maxNanoSecondIntSize {
// Right truncate
nano_buf = nano_buf[:maxNanoSecondIntSize]
}
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
if err != nil {
return time.Time{}, err
}
}
ts := time.Unix(seconds, nanoseconds)
return ts, nil
}
// parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
sbuf := string(buf)
// For GNU PAX sparse format 0.0 support.
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
var sparseMap bytes.Buffer
headers := make(map[string]string)
// Each record is constructed as
// "%d %s=%s\n", length, keyword, value
for len(sbuf) > 0 {
key, value, residual, err := parsePAXRecord(sbuf)
if err != nil {
return nil, ErrHeader
}
sbuf = residual
keyStr := string(key)
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
sparseMap.WriteString(value)
sparseMap.Write([]byte{','})
} else {
// Normal key. Set the value in the headers map.
headers[keyStr] = string(value)
}
}
if sparseMap.Len() != 0 {
// Add sparse info to headers, chopping off the extra comma
sparseMap.Truncate(sparseMap.Len() - 1)
headers[paxGNUSparseMap] = sparseMap.String()
}
return headers, nil
}
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
//
// A PAX record is of the following form:
// "%d %s=%s\n" % (size, key, value)
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
sp := strings.IndexByte(s, ' ')
if sp == -1 {
return "", "", s, ErrHeader
}
// Parse the first token as a decimal integer.
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
if perr != nil || n < 5 || int64(len(s)) < n {
return "", "", s, ErrHeader
}
// Extract everything between the space and the final newline.
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
if nl != "\n" {
return "", "", s, ErrHeader
}
// The first equals separates the key from the value.
eq := strings.IndexByte(rec, '=')
if eq == -1 {
return "", "", s, ErrHeader
}
return rec[:eq], rec[eq+1:], rem, nil
}
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
n := 0
for n < len(b) && b[n] != 0 {
n++
}
return string(b[0:n])
}
// parseNumeric parses the input as being encoded in either base-256 or octal.
// This function may return negative numbers.
// If parsing fails or an integer overflow occurs, err will be set.
func (p *parser) parseNumeric(b []byte) int64 {
// Check for base-256 (binary) format first.
// If the first bit is set, then all following bits constitute a two's
// complement encoded number in big-endian byte order.
if len(b) > 0 && b[0]&0x80 != 0 {
// Handling negative numbers relies on the following identity:
// -a-1 == ^a
//
// If the number is negative, we use an inversion mask to invert the
// data bytes and treat the value as an unsigned number.
var inv byte // 0x00 if positive or zero, 0xff if negative
if b[0]&0x40 != 0 {
inv = 0xff
}
var x uint64
for i, c := range b {
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
if i == 0 {
c &= 0x7f // Ignore signal bit in first byte
}
if (x >> 56) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
x = x<<8 | uint64(c)
}
if (x >> 63) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
if inv == 0xff {
return ^int64(x)
}
return int64(x)
}
// Normal case is base-8 (octal) format.
return p.parseOctal(b)
}
func (p *parser) parseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
}
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
if perr != nil {
p.err = ErrHeader
}
return int64(x)
}
// skipUnread skips any unread bytes in the existing file entry, as well as any
// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
// encountered in the data portion; it is okay to hit io.EOF in the padding.
//
// Note that this function still works properly even when sparse files are being
// used since numBytes returns the bytes remaining in the underlying io.Reader.
func (tr *Reader) skipUnread() error {
dataSkip := tr.numBytes() // Number of data bytes to skip
totalSkip := dataSkip + tr.pad // Total number of bytes to skip
tr.curr, tr.pad = nil, 0
// If possible, Seek to the last byte before the end of the data section.
// Do this because Seek is often lazy about reporting errors; this will mask
// the fact that the tar stream may be truncated. We can rely on the
// io.CopyN done shortly afterwards to trigger any IO errors.
var seekSkipped int64 // Number of bytes skipped via Seek
if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
// io.Seeker, but calling Seek always returns an error and performs
// no action. Thus, we try an innocent seek to the current position
// to see if Seek is really supported.
pos1, err := sr.Seek(0, os.SEEK_CUR)
if err == nil {
// Seek seems supported, so perform the real Seek.
pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
if err != nil {
tr.err = err
return tr.err
}
seekSkipped = pos2 - pos1
}
}
var copySkipped int64 // Number of bytes skipped via CopyN
copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
tr.err = io.ErrUnexpectedEOF
}
return tr.err
}
func (tr *Reader) verifyChecksum(header []byte) bool {
if tr.err != nil {
return false
}
var p parser
given := p.parseOctal(header[148:156])
unsigned, signed := checksum(header)
return p.err == nil && (given == unsigned || given == signed)
}
// readHeader reads the next block header and assumes that the underlying reader
// is already aligned to a block boundary.
//
// The err will be set to io.EOF only when one of the following occurs:
// * Exactly 0 bytes are read and EOF is hit.
// * Exactly 1 block of zeros is read and EOF is hit.
// * At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() *Header {
header := tr.hdrBuff[:]
copy(header, zeroBlock)
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil // io.EOF is okay here
}
// Two blocks of zero bytes marks the end of the archive.
if bytes.Equal(header, zeroBlock[0:blockSize]) {
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil // io.EOF is okay here
}
if bytes.Equal(header, zeroBlock[0:blockSize]) {
tr.err = io.EOF
} else {
tr.err = ErrHeader // zero block and then non-zero block
}
return nil
}
if !tr.verifyChecksum(header) {
tr.err = ErrHeader
return nil
}
// Unpack
var p parser
hdr := new(Header)
s := slicer(header)
hdr.Name = p.parseString(s.next(100))
hdr.Mode = p.parseNumeric(s.next(8))
hdr.Uid = int(p.parseNumeric(s.next(8)))
hdr.Gid = int(p.parseNumeric(s.next(8)))
hdr.Size = p.parseNumeric(s.next(12))
hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
s.next(8) // chksum
hdr.Typeflag = s.next(1)[0]
hdr.Linkname = p.parseString(s.next(100))
// The remainder of the header depends on the value of magic.
// The original (v7) version of tar had no explicit magic field,
// so its magic bytes, like the rest of the block, are NULs.
magic := string(s.next(8)) // contains version field as well.
var format string
switch {
case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
if string(header[508:512]) == "tar\x00" {
format = "star"
} else {
format = "posix"
}
case magic == "ustar \x00": // old GNU tar
format = "gnu"
}
switch format {
case "posix", "gnu", "star":
hdr.Uname = p.parseString(s.next(32))
hdr.Gname = p.parseString(s.next(32))
devmajor := s.next(8)
devminor := s.next(8)
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
hdr.Devmajor = p.parseNumeric(devmajor)
hdr.Devminor = p.parseNumeric(devminor)
}
var prefix string
switch format {
case "posix", "gnu":
prefix = p.parseString(s.next(155))
case "star":
prefix = p.parseString(s.next(131))
hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
}
if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name
}
}
if p.err != nil {
tr.err = p.err
return nil
}
nb := hdr.Size
if isHeaderOnlyType(hdr.Typeflag) {
nb = 0
}
if nb < 0 {
tr.err = ErrHeader
return nil
}
// Set the current file reader.
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
tr.curr = &regFileReader{r: tr.r, nb: nb}
// Check for old GNU sparse format entry.
if hdr.Typeflag == TypeGNUSparse {
// Get the real size of the file.
hdr.Size = p.parseNumeric(header[483:495])
if p.err != nil {
tr.err = p.err
return nil
}
// Read the sparse map.
sp := tr.readOldGNUSparseMap(header)
if tr.err != nil {
return nil
}
// Current file is a GNU sparse file. Update the current file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil
}
}
return hdr
}
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
// then one or more extension headers are used to store the rest of the sparse map.
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
var p parser
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
spCap := oldGNUSparseMainHeaderNumEntries
if isExtended {
spCap += oldGNUSparseExtendedHeaderNumEntries
}
sp := make([]sparseEntry, 0, spCap)
s := slicer(header[oldGNUSparseMainHeaderOffset:])
// Read the four entries from the main tar header
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
if p.err != nil {
tr.err = p.err
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
for isExtended {
// There are more entries. Read an extension header and parse its entries.
sparseHeader := make([]byte, blockSize)
if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
return nil
}
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
s = slicer(sparseHeader)
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
if p.err != nil {
tr.err = p.err
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
}
return sp
}
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
// version 1.0. The format of the sparse map consists of a series of
// newline-terminated numeric fields. The first field is the number of entries
// and is always present. Following this are the entries, consisting of two
// fields (offset, numBytes). This function must stop reading at the end
// boundary of the block containing the last newline.
//
// Note that the GNU manual says that numeric values should be encoded in octal
// format. However, the GNU tar utility itself outputs these values in decimal.
// As such, this library treats values as being encoded in decimal.
func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
var cntNewline int64
var buf bytes.Buffer
var blk = make([]byte, blockSize)
// feedTokens copies data in numBlock chunks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
var feedTokens = func(cnt int64) error {
for cntNewline < cnt {
if _, err := io.ReadFull(r, blk); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
buf.Write(blk)
for _, c := range blk {
if c == '\n' {
cntNewline++
}
}
}
return nil
}
// nextToken gets the next token delimited by a newline. This assumes that
// at least one newline exists in the buffer.
var nextToken = func() string {
cntNewline--
tok, _ := buf.ReadString('\n')
return tok[:len(tok)-1] // Cut off newline
}
// Parse for the number of entries.
// Use integer overflow resistant math to check this.
if err := feedTokens(1); err != nil {
return nil, err
}
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// Parse for all member entries.
// numEntries is trusted after this since a potential attacker must have
// committed resources proportional to what this library used.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(nextToken(), 10, 64)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
// version 0.1. The sparse map is stored in the PAX headers.
func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
// Get number of entries.
// Use integer overflow resistant math to check this.
numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// There should be two numbers in sparseMap for each entry.
sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
if int64(len(sparseMap)) != 2*numEntries {
return nil, ErrHeader
}
// Loop through the entries in the sparse map.
// numEntries is trusted now.
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// numBytes returns the number of bytes left to read in the current file's entry
// in the tar archive, or 0 if there is no current file.
func (tr *Reader) numBytes() int64 {
if tr.curr == nil {
// No current file, so no bytes
return 0
}
return tr.curr.numBytes()
}
// Read reads from the current entry in the tar archive.
// It returns 0, io.EOF when it reaches the end of that entry,
// until Next is called to advance to the next entry.
//
// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
// the Header.Size claims.
func (tr *Reader) Read(b []byte) (n int, err error) {
if tr.err != nil {
return 0, tr.err
}
if tr.curr == nil {
return 0, io.EOF
}
n, err = tr.curr.Read(b)
if err != nil && err != io.EOF {
tr.err = err
}
return
}
func (rfr *regFileReader) Read(b []byte) (n int, err error) {
if rfr.nb == 0 {
// file consumed
return 0, io.EOF
}
if int64(len(b)) > rfr.nb {
b = b[0:rfr.nb]
}
n, err = rfr.r.Read(b)
rfr.nb -= int64(n)
if err == io.EOF && rfr.nb > 0 {
err = io.ErrUnexpectedEOF
}
return
}
// numBytes returns the number of bytes left to read in the file's data in the tar archive.
func (rfr *regFileReader) numBytes() int64 {
return rfr.nb
}
// newSparseFileReader creates a new sparseFileReader, but validates all of the
// sparse entries before doing so.
func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
if total < 0 {
return nil, ErrHeader // Total size cannot be negative
}
// Validate all sparse entries. These are the same checks as performed by
// the BSD tar utility.
for i, s := range sp {
switch {
case s.offset < 0 || s.numBytes < 0:
return nil, ErrHeader // Negative values are never okay
case s.offset > math.MaxInt64-s.numBytes:
return nil, ErrHeader // Integer overflow with large length
case s.offset+s.numBytes > total:
return nil, ErrHeader // Region extends beyond the "real" size
case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
return nil, ErrHeader // Regions can't overlap and must be in order
}
}
return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
}
// readHole reads a sparse hole ending at endOffset.
func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
n64 := endOffset - sfr.pos
if n64 > int64(len(b)) {
n64 = int64(len(b))
}
n := int(n64)
for i := 0; i < n; i++ {
b[i] = 0
}
sfr.pos += n64
return n
}
// Read reads the sparse file data in expanded form.
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
// Skip past all empty fragments.
for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
sfr.sp = sfr.sp[1:]
}
// If there are no more fragments, then it is possible that there
// is one last sparse hole.
if len(sfr.sp) == 0 {
// This behavior matches the BSD tar utility.
// However, GNU tar stops returning data even if sfr.total is unmet.
if sfr.pos < sfr.total {
return sfr.readHole(b, sfr.total), nil
}
return 0, io.EOF
}
// In front of a data fragment, so read a hole.
if sfr.pos < sfr.sp[0].offset {
return sfr.readHole(b, sfr.sp[0].offset), nil
}
// In a data fragment, so read from it.
// This math is overflow free since we verify that offset and numBytes can
// be safely added when creating the sparseFileReader.
endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
bytesLeft := endPos - sfr.pos // Bytes left in fragment
if int64(len(b)) > bytesLeft {
b = b[:bytesLeft]
}
n, err = sfr.rfr.Read(b)
sfr.pos += int64(n)
if err == io.EOF {
if sfr.pos < endPos {
err = io.ErrUnexpectedEOF // There was supposed to be more data
} else if sfr.pos < sfr.total {
err = nil // There is still an implicit sparse hole at the end
}
}
if sfr.pos == endPos {
sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
}
return n, err
}
// numBytes returns the number of bytes left to read in the sparse file's
// sparse-encoded data in the tar archive.
func (sfr *sparseFileReader) numBytes() int64 {
return sfr.rfr.numBytes()
}

View File

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View File

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View File

@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View File

@ -0,0 +1,419 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var f formatter
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// Wrappers around formatter that automatically sets paxHeaders if the
// argument extends beyond the capacity of the input byte slice.
var formatString = func(b []byte, s string, paxKeyword string) {
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
f.formatString(b, s)
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
f.formatOctal(b, x)
return
}
// If it is too long for octal, and PAX is preferred, use a PAX header.
if paxKeyword != paxNone && tw.preferPax {
f.formatOctal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
tw.usedBinary = true
f.formatNumeric(b, x)
}
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
formatString(pathHeaderBytes, hdr.Name, paxPath)
// Handle out of range ModTime carefully.
var modTime int64
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
modTime = hdr.ModTime.Unix()
}
f.formatOctal(s.next(8), hdr.Mode) // 100:108
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
formatString(s.next(100), hdr.Linkname, paxLinkpath)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok {
// Since we can encode in USTAR format, disable PAX header.
delete(paxHeaders, paxPath)
// Update the path fields
formatString(pathHeaderBytes, suffix, paxNone)
formatString(prefixHeaderBytes, prefix, paxNone)
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
f.formatOctal(header[148:155], chksum) // Never fails
header[155] = ' '
// Check if there were any formatting errors.
if f.err != nil {
tw.err = f.err
return tw.err
}
if allowPax {
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
for k, v := range hdr.Winheaders {
paxHeaders[paxWindows+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= fileNameSize || !isASCII(name) {
return "", "", false
} else if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

View File

@ -0,0 +1,241 @@
package winio
import (
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"unicode/utf16"
)
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
const (
BackupData = uint32(iota + 1)
BackupEaData
BackupSecurity
BackupAlternateData
BackupLink
BackupPropertyData
BackupObjectId
BackupReparseData
BackupSparseBlock
BackupTxfsData
StreamSparseAttributes = uint32(8)
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
Name string // The name of the stream (for BackupAlternateData only).
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
type win32StreamId struct {
StreamId uint32
Attributes uint32
Size uint64
NameSize uint32
}
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
// of BackupHeader values.
type BackupStreamReader struct {
r io.Reader
bytesLeft int64
}
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
return &BackupStreamReader{r, 0}
}
// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if r.bytesLeft > 0 {
if _, err := io.Copy(ioutil.Discard, r); err != nil {
return nil, err
}
}
var wsi win32StreamId
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
Id: wsi.StreamId,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
if wsi.NameSize != 0 {
name := make([]uint16, int(wsi.NameSize/2))
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
return nil, err
}
hdr.Name = syscall.UTF16ToString(name)
}
if wsi.StreamId == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
hdr.Size -= 8
}
r.bytesLeft = hdr.Size
return hdr, nil
}
// Read reads from the current backup stream.
func (r *BackupStreamReader) Read(b []byte) (int, error) {
if r.bytesLeft == 0 {
return 0, io.EOF
}
if int64(len(b)) > r.bytesLeft {
b = b[:r.bytesLeft]
}
n, err := r.r.Read(b)
r.bytesLeft -= int64(n)
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else if r.bytesLeft == 0 && err == nil {
err = io.EOF
}
return n, err
}
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
type BackupStreamWriter struct {
w io.Writer
bytesLeft int64
}
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
return &BackupStreamWriter{w, 0}
}
// WriteHeader writes the next backup stream header and prepares for calls to Write().
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
if w.bytesLeft != 0 {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
wsi := win32StreamId{
StreamId: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
}
if hdr.Id == BackupSparseBlock {
// Include space for the int64 block offset
wsi.Size += 8
}
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
return err
}
if len(name) != 0 {
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
return err
}
}
if hdr.Id == BackupSparseBlock {
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
return err
}
}
w.bytesLeft = hdr.Size
return nil
}
// Write writes to the current backup stream.
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
if w.bytesLeft < int64(len(b)) {
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
}
n, err := w.w.Write(b)
w.bytesLeft -= int64(n)
return n, err
}
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
type BackupFileReader struct {
f *os.File
includeSecurity bool
ctx uintptr
}
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
// Read will attempt to read the security descriptor of the file.
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
r := &BackupFileReader{f, includeSecurity, 0}
runtime.SetFinalizer(r, func(r *BackupFileReader) { r.Close() })
return r
}
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
}
if bytesRead == 0 {
return 0, io.EOF
}
return int(bytesRead), nil
}
// Close frees Win32 resources associated with the BackupFileReader. It does not close
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
r.ctx = 0
}
return nil
}
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
type BackupFileWriter struct {
f *os.File
includeSecurity bool
ctx uintptr
}
// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true,
// Write() will attempt to restore the security descriptor from the stream.
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
w := &BackupFileWriter{f, includeSecurity, 0}
runtime.SetFinalizer(w, func(w *BackupFileWriter) { w.Close() })
return w
}
// Write restores a portion of the file using the provided backup stream.
func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
}
if int(bytesWritten) != len(b) {
return int(bytesWritten), errors.New("not all bytes could be written")
}
return len(b), nil
}
// Close frees Win32 resources associated with the BackupFileWriter. It does not
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
w.ctx = 0
}
return nil
}

View File

@ -0,0 +1,362 @@
package backuptar
import (
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
const (
hdrFileAttributes = "fileattr"
hdrAccessTime = "accesstime"
hdrChangeTime = "changetime"
hdrCreateTime = "createtime"
hdrWriteTime = "writetime"
hdrSecurityDescriptor = "sd"
hdrMountPoint = "mountpoint"
)
func writeZeroes(w io.Writer, count int64) error {
buf := make([]byte, 8192)
c := len(buf)
for i := int64(0); i < count; i += int64(c) {
if int64(c) > count-i {
c = int(count - i)
}
_, err := w.Write(buf[:c])
if err != nil {
return err
}
}
return nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if bhdr.Id != winio.BackupSparseBlock {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
err = writeZeroes(t, bhdr.Offset-curOffset)
if bhdr.Size == 0 {
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
curOffset = bhdr.Offset + n
}
return nil
}
func win32TimeFromTar(key string, hdrs map[string]string, unixTime time.Time) syscall.Filetime {
if s, ok := hdrs[key]; ok {
n, err := strconv.ParseUint(s, 10, 64)
if err == nil {
return syscall.Filetime{uint32(n & 0xffffffff), uint32(n >> 32)}
}
}
return syscall.NsecToFiletime(unixTime.UnixNano())
}
func win32TimeToTar(ft syscall.Filetime) (string, time.Time) {
return fmt.Sprintf("%d", uint64(ft.LowDateTime)+(uint64(ft.HighDateTime)<<32)), time.Unix(0, ft.Nanoseconds())
}
// Writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
//
// MSWINDOWS.accesstime: The last access time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.createtime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.changetime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.writetime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.sd: The Win32 security descriptor, in SDDL (string) format
//
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := &tar.Header{
Name: name,
Size: size,
Typeflag: tar.TypeReg,
Winheaders: make(map[string]string),
}
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
hdr.Winheaders[hdrAccessTime], hdr.AccessTime = win32TimeToTar(fileInfo.LastAccessTime)
hdr.Winheaders[hdrChangeTime], hdr.ChangeTime = win32TimeToTar(fileInfo.ChangeTime)
hdr.Winheaders[hdrCreateTime], _ = win32TimeToTar(fileInfo.CreationTime)
hdr.Winheaders[hdrWriteTime], hdr.ModTime = win32TimeToTar(fileInfo.LastWriteTime)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
br := winio.NewBackupStreamReader(r)
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= c_ISREG
dataHdr = bhdr
case winio.BackupSecurity:
sd, err := ioutil.ReadAll(br)
if err != nil {
return err
}
sddl, err := winio.SecurityDescriptorToSddl(sd)
if err != nil {
return err
}
hdr.Winheaders[hdrSecurityDescriptor] = sddl
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, err := ioutil.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
}
if rp.IsMountPoint {
hdr.Winheaders[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
}
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
if dataHdr != nil {
// A data stream was found. Copy the data.
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
err = copySparse(t, br)
if err != nil {
return err
}
}
}
// Look for streams after the data stream. The only ones we handle are alternate data streams.
// Other streams may have metadata that could be serialized, but the tar header has already
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupAlternateData:
altName := bhdr.Name
if strings.HasSuffix(altName, ":$DATA") {
altName = altName[:len(altName)-len(":$DATA")]
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return errors.New("tar of sparse alternate data streams is unsupported")
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
}
}
return nil
}
// Retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: win32TimeFromTar(hdrAccessTime, hdr.Winheaders, hdr.AccessTime),
LastWriteTime: win32TimeFromTar(hdrWriteTime, hdr.Winheaders, hdr.ModTime),
ChangeTime: win32TimeFromTar(hdrChangeTime, hdr.Winheaders, hdr.ChangeTime),
CreationTime: win32TimeFromTar(hdrCreateTime, hdr.Winheaders, hdr.ModTime),
}
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uintptr(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
return
}
// Writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
sd, err := winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
rp := winio.ReparsePoint{
Target: hdr.Linkname,
IsMountPoint: isMountPoint,
}
reparse := winio.EncodeReparsePoint(&rp)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name)+1:] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
}

View File

@ -0,0 +1,216 @@
package winio
import (
"errors"
"io"
"runtime"
"sync"
"syscall"
"time"
)
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
)
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
)
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle
// ioResult contains the result of an asynchronous IO operation
type ioResult struct {
bytes uint32
err error
}
// ioOperation represents an outstanding asynchronous Win32 IO
type ioOperation struct {
o syscall.Overlapped
ch chan ioResult
}
func initIo() {
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
}
ioCompletionPort = h
go ioCompletionProcessor(h)
}
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
// It takes ownership of this handle and will close it if it is garbage collected.
type win32File struct {
handle syscall.Handle
wg sync.WaitGroup
closing bool
readDeadline time.Time
writeDeadline time.Time
}
// makeWin32File makes a new win32File from an existing file handle
func makeWin32File(h syscall.Handle) (*win32File, error) {
f := &win32File{handle: h}
ioInitOnce.Do(initIo)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
runtime.SetFinalizer(f, (*win32File).closeHandle)
return f, nil
}
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return makeWin32File(h)
}
// closeHandle closes the resources associated with a Win32 handle
func (f *win32File) closeHandle() {
if !f.closing {
// cancel all IO and wait for it to complete
f.closing = true
cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
syscall.Close(f.handle)
f.handle = 0
}
}
// Close closes a win32File.
func (f *win32File) Close() error {
f.closeHandle()
runtime.SetFinalizer(f, nil)
return nil
}
// prepareIo prepares for a new IO operation
func (f *win32File) prepareIo() (*ioOperation, error) {
f.wg.Add(1)
if f.closing {
return nil, ErrFileClosed
}
c := &ioOperation{}
c.ch = make(chan ioResult)
return c, nil
}
// ioCompletionProcessor processes completed async IOs forever
func ioCompletionProcessor(h syscall.Handle) {
for {
var bytes uint32
var key uintptr
var op *ioOperation
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
if op == nil {
panic(err)
}
op.ch <- ioResult{bytes, err}
}
}
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
func (f *win32File) asyncIo(c *ioOperation, deadline time.Time, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING {
f.wg.Done()
return int(bytes), err
} else {
var r ioResult
wait := true
timedout := false
if f.closing {
cancelIoEx(f.handle, &c.o)
} else if !deadline.IsZero() {
now := time.Now()
if !deadline.After(now) {
timedout = true
} else {
timeout := time.After(deadline.Sub(now))
select {
case r = <-c.ch:
wait = false
case <-timeout:
timedout = true
}
}
}
if timedout {
cancelIoEx(f.handle, &c.o)
}
if wait {
r = <-c.ch
}
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing {
err = ErrFileClosed
} else if timedout {
err = ErrTimeout
}
}
f.wg.Done()
return int(r.bytes), err
}
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
c, err := f.prepareIo()
if err != nil {
return 0, err
}
var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, f.readDeadline, bytes, err)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
} else if err == syscall.ERROR_BROKEN_PIPE {
return 0, io.EOF
} else {
return n, err
}
}
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
c, err := f.prepareIo()
if err != nil {
return 0, err
}
var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
return f.asyncIo(c, f.writeDeadline, bytes, err)
}
func (f *win32File) SetReadDeadline(t time.Time) error {
f.readDeadline = t
return nil
}
func (f *win32File) SetWriteDeadline(t time.Time) error {
f.writeDeadline = t
return nil
}

View File

@ -0,0 +1,30 @@
package winio
import (
"os"
"syscall"
"unsafe"
)
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uintptr // includes padding
}
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{"GetFileInformationByHandleEx", f.Name(), err}
}
return bi, nil
}
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{"SetFileInformationByHandle", f.Name(), err}
}
return nil
}

View File

@ -0,0 +1,280 @@
package winio
import (
"errors"
"net"
"os"
"syscall"
"time"
"unsafe"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
type securityAttributes struct {
Length uint32
SecurityDescriptor *byte
InheritHandle uint32
}
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cPIPE_ACCESS_DUPLEX = 0x3
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
cPIPE_UNLIMITED_INSTANCES = 255
cNMPWAIT_USE_DEFAULT_WAIT = 0
cNMPWAIT_NOWAIT = 1
)
var (
// This error should match net.errClosing since docker takes a dependency on its text
ErrPipeListenerClosed = errors.New("use of closed network connection")
)
type win32Pipe struct {
*win32File
path string
}
type pipeAddress string
func (f *win32Pipe) LocalAddr() net.Addr {
return pipeAddress(f.path)
}
func (f *win32Pipe) RemoteAddr() net.Addr {
return pipeAddress(f.path)
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
f.SetReadDeadline(t)
f.SetWriteDeadline(t)
return nil
}
func (s pipeAddress) Network() string {
return "pipe"
}
func (s pipeAddress) String() string {
return string(s)
}
func makeWin32Pipe(h syscall.Handle, path string) (*win32Pipe, error) {
f, err := makeWin32File(h)
if err != nil {
return nil, err
}
return &win32Pipe{f, path}, nil
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then the timeout
// is the default timeout established by the pipe server.
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
}
var err error
var h syscall.Handle
for {
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != cERROR_PIPE_BUSY {
break
}
now := time.Now()
var ms uint32
if absTimeout.IsZero() {
ms = cNMPWAIT_USE_DEFAULT_WAIT
} else if now.After(absTimeout) {
ms = cNMPWAIT_NOWAIT
} else {
ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000)
}
err = waitNamedPipe(path, ms)
if err != nil {
if err == cERROR_SEM_TIMEOUT {
return nil, ErrTimeout
}
break
}
}
if err != nil {
return nil, &os.PathError{"open", path, err}
}
p, err := makeWin32Pipe(h, path)
if err != nil {
syscall.Close(h)
return nil, err
}
return p, nil
}
type acceptResponse struct {
p *win32Pipe
err error
}
type win32PipeListener struct {
firstHandle syscall.Handle
path string
securityDescriptor []byte
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
}
func makeServerPipeHandle(path string, securityDescriptor []byte, first bool) (syscall.Handle, error) {
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
if first {
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
}
var sa securityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
if securityDescriptor != nil {
sa.SecurityDescriptor = &securityDescriptor[0]
}
h, err := createNamedPipe(path, flags, cPIPE_REJECT_REMOTE_CLIENTS, cPIPE_UNLIMITED_INSTANCES, 4096, 4096, 0, &sa)
if err != nil {
return 0, &os.PathError{"open", path, err}
}
return h, nil
}
func (l *win32PipeListener) makeServerPipe() (*win32Pipe, error) {
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, false)
if err != nil {
return nil, err
}
p, err := makeWin32Pipe(h, l.path)
if err != nil {
syscall.Close(h)
return nil, err
}
return p, nil
}
func (l *win32PipeListener) listenerRoutine() {
closed := false
for !closed {
select {
case <-l.closeCh:
closed = true
case responseCh := <-l.acceptCh:
p, err := l.makeServerPipe()
if err == nil {
// Wait for the client to connect.
ch := make(chan error)
go func() {
ch <- connectPipe(p)
}()
select {
case err = <-ch:
if err != nil {
p.Close()
p = nil
}
case <-l.closeCh:
// Abort the connect request by closing the handle.
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed {
err = ErrPipeListenerClosed
}
closed = true
}
}
responseCh <- acceptResponse{p, err}
}
}
syscall.Close(l.firstHandle)
l.firstHandle = 0
// Notify Close() and Accept() callers that the handle has been closed.
close(l.doneCh)
}
func ListenPipe(path, sddl string) (net.Listener, error) {
var (
sd []byte
err error
)
if sddl != "" {
sd, err = SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
h, err := makeServerPipeHandle(path, sd, true)
if err != nil {
return nil, err
}
// Immediately open and then close a client handle so that the named pipe is
// created but not currently accepting connections.
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,
path: path,
securityDescriptor: sd,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
}
go l.listenerRoutine()
return l, nil
}
func connectPipe(p *win32Pipe) error {
c, err := p.prepareIo()
if err != nil {
return err
}
err = connectNamedPipe(p.handle, &c.o)
_, err = p.asyncIo(c, time.Time{}, 0, err)
if err != nil && err != cERROR_PIPE_CONNECTED {
return err
}
return nil
}
func (l *win32PipeListener) Accept() (net.Conn, error) {
ch := make(chan acceptResponse)
select {
case l.acceptCh <- ch:
response := <-ch
return response.p, response.err
case <-l.doneCh:
return nil, ErrPipeListenerClosed
}
}
func (l *win32PipeListener) Close() error {
select {
case l.closeCh <- 1:
<-l.doneCh
case <-l.doneCh:
}
return nil
}
func (l *win32PipeListener) Addr() net.Addr {
return pipeAddress(l.path)
}

View File

@ -0,0 +1,147 @@
package winio
import (
"bytes"
"encoding/binary"
"fmt"
"runtime"
"syscall"
"unicode/utf16"
)
//sys adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (err error) = advapi32.AdjustTokenPrivileges
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
//sys revertToSelf() (err error) = advapi32.RevertToSelf
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) = advapi32.OpenThreadToken
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
SE_PRIVILEGE_ENABLED = 2
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
)
const (
securityAnonymous = iota
securityIdentification
securityImpersonation
securityDelegation
)
type PrivilegeError struct {
privileges []uint64
}
func (e *PrivilegeError) Error() string {
s := ""
if len(e.privileges) > 1 {
s = "Could not enable privileges "
} else {
s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
s += ", "
}
s += `"`
s += getPrivilegeName(p)
s += `"`
}
return s
}
func RunWithPrivilege(name string, fn func() error) error {
return RunWithPrivileges([]string{name}, fn)
}
func RunWithPrivileges(names []string, fn func() error) error {
var privileges []uint64
for _, name := range names {
p := uint64(0)
err := lookupPrivilegeValue("", name, &p)
if err != nil {
return err
}
privileges = append(privileges, p)
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
token, err := newThreadToken()
if err != nil {
return err
}
defer releaseThreadToken(token)
err = adjustPrivileges(token, privileges)
if err != nil {
return err
}
return fn()
}
func adjustPrivileges(token syscall.Handle, privileges []uint64) error {
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED))
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
if err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize); err != nil {
return err
}
if int(binary.LittleEndian.Uint32(prevState[0:4])) < len(privileges) {
return &PrivilegeError{privileges}
}
return nil
}
func getPrivilegeName(luid uint64) string {
var nameBuffer [256]uint16
bufSize := uint32(len(nameBuffer))
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
if err != nil {
return fmt.Sprintf("<unknown privilege %d>", luid)
}
var displayNameBuffer [256]uint16
displayBufSize := uint32(len(displayNameBuffer))
var langId uint32
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langId)
if err != nil {
return fmt.Sprintf("<unknown privilege %s>", utf16.Decode(nameBuffer[:bufSize]))
}
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
}
func newThreadToken() (syscall.Handle, error) {
err := impersonateSelf(securityImpersonation)
if err != nil {
panic(err)
return 0, err
}
var token syscall.Handle
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
if err != nil {
rerr := revertToSelf()
if rerr != nil {
panic(rerr)
}
return 0, err
}
return token, nil
}
func releaseThreadToken(h syscall.Handle) {
err := revertToSelf()
if err != nil {
panic(err)
}
syscall.Close(h)
}

View File

@ -0,0 +1,124 @@
package winio
import (
"bytes"
"encoding/binary"
"fmt"
"strings"
"unicode/utf16"
"unsafe"
)
const (
reparseTagMountPoint = 0xA0000003
reparseTagSymlink = 0xA000000C
)
type reparseDataBuffer struct {
ReparseTag uint32
ReparseDataLength uint16
Reserved uint16
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
}
// ReparsePoint describes a Win32 symlink or mount point.
type ReparsePoint struct {
Target string
IsMountPoint bool
}
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
// mount point reparse point.
type UnsupportedReparsePointError struct {
Tag uint32
}
func (e *UnsupportedReparsePointError) Error() string {
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
}
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
// or a mount point.
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
isMountPoint := false
tag := binary.LittleEndian.Uint32(b[0:4])
switch tag {
case reparseTagMountPoint:
isMountPoint = true
case reparseTagSymlink:
default:
return nil, &UnsupportedReparsePointError{tag}
}
nameOffset := 16 + binary.LittleEndian.Uint16(b[12:14])
if !isMountPoint {
nameOffset += 4
}
nameLength := binary.LittleEndian.Uint16(b[14:16])
name := make([]uint16, nameLength/2)
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
if err != nil {
return nil, err
}
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
}
func isDriveLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
// mount point.
func EncodeReparsePoint(rp *ReparsePoint) []byte {
// Generate an NT path and determine if this is a relative path.
var ntTarget string
relative := false
if strings.HasPrefix(rp.Target, `\\?\`) {
ntTarget = rp.Target
} else if strings.HasPrefix(rp.Target, `\\`) {
ntTarget = `\??\UNC\` + rp.Target[2:]
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
ntTarget = `\??\` + rp.Target
} else {
ntTarget = rp.Target
relative = true
}
// The paths must be NUL-terminated even though they are counted strings.
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
size += len(ntTarget16)*2 + len(target16)*2
tag := uint32(reparseTagMountPoint)
if !rp.IsMountPoint {
tag = reparseTagSymlink
size += 4 // Add room for symlink flags
}
data := reparseDataBuffer{
ReparseTag: tag,
ReparseDataLength: uint16(size),
SubstituteNameOffset: 0,
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
PrintNameOffset: uint16(len(ntTarget16) * 2),
PrintNameLength: uint16((len(target16) - 1) * 2),
}
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
binary.Write(&b, binary.LittleEndian, flags)
}
binary.Write(&b, binary.LittleEndian, ntTarget16)
binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}

View File

@ -0,0 +1,96 @@
package winio
import (
"syscall"
"unsafe"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
//sys localFree(mem uintptr) = LocalFree
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
const (
cERROR_NONE_MAPPED = syscall.Errno(1332)
)
type AccountLookupError struct {
Name string
Err error
}
func (e *AccountLookupError) Error() string {
if e.Name == "" {
return "lookup account: empty account name specified"
}
var s string
switch e.Err {
case cERROR_NONE_MAPPED:
s = "not found"
default:
s = e.Err.Error()
}
return "lookup account " + e.Name + ": " + s
}
type SddlConversionError struct {
Sddl string
Err error
}
func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
// LookupSidByName looks up the SID of an account by name
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
refDomainBuffer := make([]uint16, refDomainSize)
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
if err != nil {
return "", &AccountLookupError{name, err}
}
var strBuffer *uint16
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
if err != nil {
return "", &AccountLookupError{name, err}
}
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
localFree(uintptr(unsafe.Pointer(strBuffer)))
return sid, nil
}
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
var sdBuffer uintptr
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
if err != nil {
return nil, &SddlConversionError{sddl, err}
}
defer localFree(sdBuffer)
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
return sd, nil
}
func SecurityDescriptorToSddl(sd []byte) (string, error) {
var sddl *uint16
// The returned string length seems to including an aribtrary number of terminating NULs.
// Don't use it.
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
if err != nil {
return "", err
}
defer localFree(uintptr(unsafe.Pointer(sddl)))
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
}

View File

@ -0,0 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go

View File

@ -0,0 +1,457 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package winio
import "unsafe"
import "syscall"
var _ unsafe.Pointer
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procLocalFree = modkernel32.NewProc("LocalFree")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func waitNamedPipe(name string, timeout uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _waitNamedPipe(_p0, timeout)
}
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(str)
if err != nil {
return
}
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
return
}
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func impersonateSelf(level uint32) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
var _p1 *uint16
_p1, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _lookupPrivilegeValue(_p0, _p1, luid)
}
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
}
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
}
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@ -0,0 +1,131 @@
package client
import (
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/docker/engine-api/client/transport"
"github.com/docker/go-connections/tlsconfig"
)
// Client is the API client that performs all operations
// against a docker server.
type Client struct {
// proto holds the client protocol i.e. unix.
proto string
// addr holds the client address.
addr string
// basePath holds the path to prepend to the requests.
basePath string
// transport is the interface to sends request with, it implements transport.Client.
transport transport.Client
// version of the server to talk to.
version string
// custom http headers configured by users.
customHTTPHeaders map[string]string
}
// NewEnvClient initializes a new API client based on environment variables.
// Use DOCKER_HOST to set the url to the docker server.
// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
// Use DOCKER_CERT_PATH to load the tls certificates from.
// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
func NewEnvClient() (*Client, error) {
var client *http.Client
if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
options := tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
host := os.Getenv("DOCKER_HOST")
if host == "" {
host = DefaultDockerHost
}
return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil)
}
// NewClient initializes a new API client for the given host and API version.
// It won't send any version information if the version number is empty.
// It uses the given http client as transport.
// It also initializes the custom http headers to add to each request.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
proto, addr, basePath, err := ParseHost(host)
if err != nil {
return nil, err
}
transport, err := transport.NewTransportWithHTTP(proto, addr, client)
if err != nil {
return nil, err
}
return &Client{
proto: proto,
addr: addr,
basePath: basePath,
transport: transport,
version: version,
customHTTPHeaders: httpHeaders,
}, nil
}
// getAPIPath returns the versioned request path to call the api.
// It appends the query parameters to the path if they are not empty.
func (cli *Client) getAPIPath(p string, query url.Values) string {
var apiPath string
if cli.version != "" {
v := strings.TrimPrefix(cli.version, "v")
apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p)
} else {
apiPath = fmt.Sprintf("%s%s", cli.basePath, p)
}
if len(query) > 0 {
apiPath += "?" + query.Encode()
}
return apiPath
}
// ClientVersion returns the version string associated with this
// instance of the Client. Note that this value can be changed
// via the DOCKER_API_VERSION env var.
func (cli *Client) ClientVersion() string {
return cli.version
}
// ParseHost verifies that the given host strings is valid.
func ParseHost(host string) (string, string, string, error) {
protoAddrParts := strings.SplitN(host, "://", 2)
if len(protoAddrParts) == 1 {
return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
proto, addr := protoAddrParts[0], protoAddrParts[1]
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
return "", "", "", err
}
addr = parsed.Host
basePath = parsed.Path
}
return proto, addr, basePath, nil
}

View File

@ -0,0 +1,6 @@
// +build windows darwin
package client
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "tcp://127.0.0.1:2375"

View File

@ -0,0 +1,6 @@
// +build linux freebsd solaris
package client
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
const DefaultDockerHost = "unix:///var/run/docker.sock"

View File

@ -0,0 +1,33 @@
package client
import (
"net/url"
"github.com/docker/engine-api/types"
)
// ContainerAttach attaches a connection to a container in the server.
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
func (cli *Client) ContainerAttach(options types.ContainerAttachOptions) (types.HijackedResponse, error) {
query := url.Values{}
if options.Stream {
query.Set("stream", "1")
}
if options.Stdin {
query.Set("stdin", "1")
}
if options.Stdout {
query.Set("stdout", "1")
}
if options.Stderr {
query.Set("stderr", "1")
}
if options.DetachKeys != "" {
query.Set("detachKeys", options.DetachKeys)
}
headers := map[string][]string{"Content-Type": {"text/plain"}}
return cli.postHijacked("/containers/"+options.ContainerID+"/attach", query, nil, headers)
}

View File

@ -0,0 +1,34 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
)
// ContainerCommit applies changes into a container and creates a new tagged image.
func (cli *Client) ContainerCommit(options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) {
query := url.Values{}
query.Set("container", options.ContainerID)
query.Set("repo", options.RepositoryName)
query.Set("tag", options.Tag)
query.Set("comment", options.Comment)
query.Set("author", options.Author)
for _, change := range options.Changes {
query.Add("changes", change)
}
if options.Pause != true {
query.Set("pause", "0")
}
var response types.ContainerCommitResponse
resp, err := cli.post("/commit", query, options.Config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@ -0,0 +1,97 @@
package client
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ContainerStatPath returns Stat information about a path inside the container filesystem.
func (cli *Client) ContainerStatPath(containerID, path string) (types.ContainerPathStat, error) {
query := url.Values{}
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
urlStr := fmt.Sprintf("/containers/%s/archive", containerID)
response, err := cli.head(urlStr, query, nil)
if err != nil {
return types.ContainerPathStat{}, err
}
defer ensureReaderClosed(response)
return getContainerPathStatFromHeader(response.header)
}
// CopyToContainer copies content into the container filesystem.
func (cli *Client) CopyToContainer(ctx context.Context, options types.CopyToContainerOptions) error {
query := url.Values{}
query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API.
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
if !options.AllowOverwriteDirWithFile {
query.Set("noOverwriteDirNonDir", "true")
}
path := fmt.Sprintf("/containers/%s/archive", options.ContainerID)
response, err := cli.putRawWithContext(ctx, path, query, options.Content, nil)
if err != nil {
return err
}
defer ensureReaderClosed(response)
if response.statusCode != http.StatusOK {
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
return nil
}
// CopyFromContainer get the content from the container and return it as a Reader
// to manipulate it in the host. It's up to the caller to close the reader.
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
query := make(url.Values, 1)
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
apiPath := fmt.Sprintf("/containers/%s/archive", containerID)
response, err := cli.getWithContext(ctx, apiPath, query, nil)
if err != nil {
return nil, types.ContainerPathStat{}, err
}
if response.statusCode != http.StatusOK {
return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
// In order to get the copy behavior right, we need to know information
// about both the source and the destination. The response headers include
// stat info about the source that we can use in deciding exactly how to
// copy it locally. Along with the stat info about the local destination,
// we have everything we need to handle the multiple possibilities there
// can be when copying a file/dir from one location to another file/dir.
stat, err := getContainerPathStatFromHeader(response.header)
if err != nil {
return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
}
return response.body, stat, err
}
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
var stat types.ContainerPathStat
encodedStat := header.Get("X-Docker-Container-Path-Stat")
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
err := json.NewDecoder(statDecoder).Decode(&stat)
if err != nil {
err = fmt.Errorf("unable to decode container path stat header: %s", err)
}
return stat, err
}

View File

@ -0,0 +1,45 @@
package client
import (
"encoding/json"
"net/url"
"strings"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
)
type configWrapper struct {
*container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
}
// ContainerCreate creates a new container based in the given configuration.
// It can be associated with a name, but it's not mandatory.
func (cli *Client) ContainerCreate(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) {
var response types.ContainerCreateResponse
query := url.Values{}
if containerName != "" {
query.Set("name", containerName)
}
body := configWrapper{
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
}
serverResp, err := cli.post("/containers/create", query, body, nil)
if err != nil {
if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
return response, imageNotFoundError{config.Image}
}
return response, err
}
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}

View File

@ -0,0 +1,22 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
)
// ContainerDiff shows differences in a container filesystem since it was started.
func (cli *Client) ContainerDiff(containerID string) ([]types.ContainerChange, error) {
var changes []types.ContainerChange
serverResp, err := cli.get("/containers/"+containerID+"/changes", url.Values{}, nil)
if err != nil {
return changes, err
}
err = json.NewDecoder(serverResp.body).Decode(&changes)
ensureReaderClosed(serverResp)
return changes, err
}

View File

@ -0,0 +1,48 @@
package client
import (
"encoding/json"
"github.com/docker/engine-api/types"
)
// ContainerExecCreate creates a new exec configuration to run an exec process.
func (cli *Client) ContainerExecCreate(config types.ExecConfig) (types.ContainerExecCreateResponse, error) {
var response types.ContainerExecCreateResponse
resp, err := cli.post("/containers/"+config.Container+"/exec", nil, config, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}
// ContainerExecStart starts an exec process already create in the docker host.
func (cli *Client) ContainerExecStart(execID string, config types.ExecStartCheck) error {
resp, err := cli.post("/exec/"+execID+"/start", nil, config, nil)
ensureReaderClosed(resp)
return err
}
// ContainerExecAttach attaches a connection to an exec process in the server.
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
func (cli *Client) ContainerExecAttach(execID string, config types.ExecConfig) (types.HijackedResponse, error) {
headers := map[string][]string{"Content-Type": {"application/json"}}
return cli.postHijacked("/exec/"+execID+"/start", nil, config, headers)
}
// ContainerExecInspect returns information about a specific exec process on the docker host.
func (cli *Client) ContainerExecInspect(execID string) (types.ContainerExecInspect, error) {
var response types.ContainerExecInspect
resp, err := cli.get("/exec/"+execID+"/json", nil, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@ -0,0 +1,20 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
)
// ContainerExport retrieves the raw contents of a container
// and returns them as a io.ReadCloser. It's up to the caller
// to close the stream.
func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
serverResp, err := cli.getWithContext(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
if err != nil {
return nil, err
}
return serverResp.body, nil
}

View File

@ -0,0 +1,64 @@
package client
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
)
// ContainerInspect returns the container information.
func (cli *Client) ContainerInspect(containerID string) (types.ContainerJSON, error) {
serverResp, err := cli.get("/containers/"+containerID+"/json", nil, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return types.ContainerJSON{}, containerNotFoundError{containerID}
}
return types.ContainerJSON{}, err
}
var response types.ContainerJSON
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}
// ContainerInspectWithRaw returns the container information and it's raw representation.
func (cli *Client) ContainerInspectWithRaw(containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
query := url.Values{}
if getSize {
query.Set("size", "1")
}
serverResp, err := cli.get("/containers/"+containerID+"/json", query, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
}
return types.ContainerJSON{}, nil, err
}
defer ensureReaderClosed(serverResp)
body, err := ioutil.ReadAll(serverResp.body)
if err != nil {
return types.ContainerJSON{}, nil, err
}
var response types.ContainerJSON
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
}
func (cli *Client) containerInspectWithResponse(containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) {
serverResp, err := cli.get("/containers/"+containerID+"/json", nil, nil)
if err != nil {
return types.ContainerJSON{}, serverResp, err
}
var response types.ContainerJSON
err = json.NewDecoder(serverResp.body).Decode(&response)
return response, serverResp, err
}

View File

@ -0,0 +1,13 @@
package client
import "net/url"
// ContainerKill terminates the container process but does not remove the container from the docker host.
func (cli *Client) ContainerKill(containerID, signal string) error {
query := url.Values{}
query.Set("signal", signal)
resp, err := cli.post("/containers/"+containerID+"/kill", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,54 @@
package client
import (
"encoding/json"
"net/url"
"strconv"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
)
// ContainerList returns the list of containers in the docker host.
func (cli *Client) ContainerList(options types.ContainerListOptions) ([]types.Container, error) {
query := url.Values{}
if options.All {
query.Set("all", "1")
}
if options.Limit != -1 {
query.Set("limit", strconv.Itoa(options.Limit))
}
if options.Since != "" {
query.Set("since", options.Since)
}
if options.Before != "" {
query.Set("before", options.Before)
}
if options.Size {
query.Set("size", "1")
}
if options.Filter.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filter)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get("/containers/json", query, nil)
if err != nil {
return nil, err
}
var containers []types.Container
err = json.NewDecoder(resp.body).Decode(&containers)
ensureReaderClosed(resp)
return containers, err
}

View File

@ -0,0 +1,48 @@
package client
import (
"io"
"net/url"
"time"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
timetypes "github.com/docker/engine-api/types/time"
)
// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
// It's up to the caller to close the stream.
func (cli *Client) ContainerLogs(ctx context.Context, options types.ContainerLogsOptions) (io.ReadCloser, error) {
query := url.Values{}
if options.ShowStdout {
query.Set("stdout", "1")
}
if options.ShowStderr {
query.Set("stderr", "1")
}
if options.Since != "" {
ts, err := timetypes.GetTimestamp(options.Since, time.Now())
if err != nil {
return nil, err
}
query.Set("since", ts)
}
if options.Timestamps {
query.Set("timestamps", "1")
}
if options.Follow {
query.Set("follow", "1")
}
query.Set("tail", options.Tail)
resp, err := cli.getWithContext(ctx, "/containers/"+options.ContainerID+"/logs", query, nil)
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@ -0,0 +1,8 @@
package client
// ContainerPause pauses the main process of a given container without terminating it.
func (cli *Client) ContainerPause(containerID string) error {
resp, err := cli.post("/containers/"+containerID+"/pause", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,26 @@
package client
import (
"net/url"
"github.com/docker/engine-api/types"
)
// ContainerRemove kills and removes a container from the docker host.
func (cli *Client) ContainerRemove(options types.ContainerRemoveOptions) error {
query := url.Values{}
if options.RemoveVolumes {
query.Set("v", "1")
}
if options.RemoveLinks {
query.Set("link", "1")
}
if options.Force {
query.Set("force", "1")
}
resp, err := cli.delete("/containers/"+options.ContainerID, query, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,12 @@
package client
import "net/url"
// ContainerRename changes the name of a given container.
func (cli *Client) ContainerRename(containerID, newContainerName string) error {
query := url.Values{}
query.Set("name", newContainerName)
resp, err := cli.post("/containers/"+containerID+"/rename", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,28 @@
package client
import (
"net/url"
"strconv"
"github.com/docker/engine-api/types"
)
// ContainerResize changes the size of the tty for a container.
func (cli *Client) ContainerResize(options types.ResizeOptions) error {
return cli.resize("/containers/"+options.ID, options.Height, options.Width)
}
// ContainerExecResize changes the size of the tty for an exec process running inside a container.
func (cli *Client) ContainerExecResize(options types.ResizeOptions) error {
return cli.resize("/exec/"+options.ID, options.Height, options.Width)
}
func (cli *Client) resize(basePath string, height, width int) error {
query := url.Values{}
query.Set("h", strconv.Itoa(height))
query.Set("w", strconv.Itoa(width))
resp, err := cli.post(basePath+"/resize", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,17 @@
package client
import (
"net/url"
"strconv"
)
// ContainerRestart stops and starts a container again.
// It makes the daemon to wait for the container to be up again for
// a specific amount of time, given the timeout.
func (cli *Client) ContainerRestart(containerID string, timeout int) error {
query := url.Values{}
query.Set("t", strconv.Itoa(timeout))
resp, err := cli.post("/containers/"+containerID+"/restart", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,8 @@
package client
// ContainerStart sends a request to the docker daemon to start a container.
func (cli *Client) ContainerStart(containerID string) error {
resp, err := cli.post("/containers/"+containerID+"/start", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,24 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
)
// ContainerStats returns near realtime stats for a given container.
// It's up to the caller to close the io.ReadCloser returned.
func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
query := url.Values{}
query.Set("stream", "0")
if stream {
query.Set("stream", "1")
}
resp, err := cli.getWithContext(ctx, "/containers/"+containerID+"/stats", query, nil)
if err != nil {
return nil, err
}
return resp.body, err
}

View File

@ -0,0 +1,16 @@
package client
import (
"net/url"
"strconv"
)
// ContainerStop stops a container without terminating the process.
// The process is blocked until the container stops or the timeout expires.
func (cli *Client) ContainerStop(containerID string, timeout int) error {
query := url.Values{}
query.Set("t", strconv.Itoa(timeout))
resp, err := cli.post("/containers/"+containerID+"/stop", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,27 @@
package client
import (
"encoding/json"
"net/url"
"strings"
"github.com/docker/engine-api/types"
)
// ContainerTop shows process information from within a container.
func (cli *Client) ContainerTop(containerID string, arguments []string) (types.ContainerProcessList, error) {
var response types.ContainerProcessList
query := url.Values{}
if len(arguments) > 0 {
query.Set("ps_args", strings.Join(arguments, " "))
}
resp, err := cli.get("/containers/"+containerID+"/top", query, nil)
if err != nil {
return response, err
}
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@ -0,0 +1,8 @@
package client
// ContainerUnpause resumes the process execution within a container
func (cli *Client) ContainerUnpause(containerID string) error {
resp, err := cli.post("/containers/"+containerID+"/unpause", nil, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,12 @@
package client
import (
"github.com/docker/engine-api/types/container"
)
// ContainerUpdate updates resources of a container
func (cli *Client) ContainerUpdate(containerID string, updateConfig container.UpdateConfig) error {
resp, err := cli.post("/containers/"+containerID+"/update", nil, updateConfig, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,26 @@
package client
import (
"encoding/json"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ContainerWait pauses execution util a container is exits.
// It returns the API status code as response of its readiness.
func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) {
resp, err := cli.postWithContext(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
if err != nil {
return -1, err
}
defer ensureReaderClosed(resp)
var res types.ContainerWaitResponse
if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
return -1, err
}
return res.StatusCode, nil
}

View File

@ -0,0 +1,94 @@
package client
import (
"errors"
"fmt"
)
// ErrConnectionFailed is a error raised when the connection between the client and the server failed.
var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
// imageNotFoundError implements an error returned when an image is not in the docker host.
type imageNotFoundError struct {
imageID string
}
// Error returns a string representation of an imageNotFoundError
func (i imageNotFoundError) Error() string {
return fmt.Sprintf("Error: No such image: %s", i.imageID)
}
// IsErrImageNotFound returns true if the error is caused
// when an image is not found in the docker host.
func IsErrImageNotFound(err error) bool {
_, ok := err.(imageNotFoundError)
return ok
}
// containerNotFoundError implements an error returned when a container is not in the docker host.
type containerNotFoundError struct {
containerID string
}
// Error returns a string representation of an containerNotFoundError
func (e containerNotFoundError) Error() string {
return fmt.Sprintf("Error: No such container: %s", e.containerID)
}
// IsErrContainerNotFound returns true if the error is caused
// when a container is not found in the docker host.
func IsErrContainerNotFound(err error) bool {
_, ok := err.(containerNotFoundError)
return ok
}
// networkNotFoundError implements an error returned when a network is not in the docker host.
type networkNotFoundError struct {
networkID string
}
// Error returns a string representation of an networkNotFoundError
func (e networkNotFoundError) Error() string {
return fmt.Sprintf("Error: No such network: %s", e.networkID)
}
// IsErrNetworkNotFound returns true if the error is caused
// when a network is not found in the docker host.
func IsErrNetworkNotFound(err error) bool {
_, ok := err.(networkNotFoundError)
return ok
}
// volumeNotFoundError implements an error returned when a volume is not in the docker host.
type volumeNotFoundError struct {
volumeID string
}
// Error returns a string representation of an networkNotFoundError
func (e volumeNotFoundError) Error() string {
return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
}
// IsErrVolumeNotFound returns true if the error is caused
// when a volume is not found in the docker host.
func IsErrVolumeNotFound(err error) bool {
_, ok := err.(networkNotFoundError)
return ok
}
// unauthorizedError represents an authorization error in a remote registry.
type unauthorizedError struct {
cause error
}
// Error returns a string representation of an unauthorizedError
func (u unauthorizedError) Error() string {
return u.cause.Error()
}
// IsErrUnauthorized returns true if the error is caused
// when an the remote registry authentication fails
func IsErrUnauthorized(err error) bool {
_, ok := err.(unauthorizedError)
return ok
}

View File

@ -0,0 +1,48 @@
package client
import (
"io"
"net/url"
"time"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
timetypes "github.com/docker/engine-api/types/time"
)
// Events returns a stream of events in the daemon in a ReadCloser.
// It's up to the caller to close the stream.
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) {
query := url.Values{}
ref := time.Now()
if options.Since != "" {
ts, err := timetypes.GetTimestamp(options.Since, ref)
if err != nil {
return nil, err
}
query.Set("since", ts)
}
if options.Until != "" {
ts, err := timetypes.GetTimestamp(options.Until, ref)
if err != nil {
return nil, err
}
query.Set("until", ts)
}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
serverResponse, err := cli.getWithContext(ctx, "/events", query, nil)
if err != nil {
return nil, err
}
return serverResponse.body, nil
}

View File

@ -0,0 +1,173 @@
package client
import (
"crypto/tls"
"errors"
"fmt"
"net"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/docker/engine-api/types"
"github.com/docker/go-connections/sockets"
)
// tlsClientCon holds tls information and a dialed connection.
type tlsClientCon struct {
*tls.Conn
rawConn net.Conn
}
func (c *tlsClientCon) CloseWrite() error {
// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
// on its underlying connection.
if conn, ok := c.rawConn.(types.CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// postHijacked sends a POST request and hijacks the connection.
func (cli *Client) postHijacked(path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
bodyEncoded, err := encodeData(body)
if err != nil {
return types.HijackedResponse{}, err
}
req, err := cli.newRequest("POST", path, query, bodyEncoded, headers)
if err != nil {
return types.HijackedResponse{}, err
}
req.Host = cli.addr
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp")
conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig())
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
}
return types.HijackedResponse{}, err
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
clientconn := httputil.NewClientConn(conn, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
return types.HijackedResponse{Conn: rwc, Reader: br}, nil
}
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
return tlsDialWithDialer(new(net.Dialer), network, addr, config)
}
// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
// order to return our custom tlsClientCon struct which holds both the tls.Conn
// object _and_ its underlying raw connection. The rationale for this is that
// we need to be able to close the write end of the connection when attaching,
// which tls.Conn does not provide.
func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and TLS handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := dialer.Deadline.Sub(time.Now())
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- errors.New("")
})
}
proxyDialer, err := sockets.DialerFromEnvironment(dialer)
if err != nil {
return nil, err
}
rawConn, err := proxyDialer.Dial(network, addr)
if err != nil {
return nil, err
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := rawConn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
colonPos := strings.LastIndex(addr, ":")
if colonPos == -1 {
colonPos = len(addr)
}
hostname := addr[:colonPos]
// If no ServerName is set, infer the ServerName
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
c := *config
c.ServerName = hostname
config = &c
}
conn := tls.Client(rawConn, config)
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
// This is Docker difference with standard's crypto/tls package: returned a
// wrapper which holds both the TLS and raw connections.
return &tlsClientCon{conn, rawConn}, nil
}
func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
if tlsConfig != nil && proto != "unix" && proto != "npipe" {
// Notice this isn't Go standard's tls.Dial function
return tlsDial(proto, addr, tlsConfig)
}
if proto == "npipe" {
return sockets.DialPipe(addr, 32*time.Second)
}
return net.Dial(proto, addr)
}

View File

@ -0,0 +1,129 @@
package client
import (
"encoding/base64"
"encoding/json"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
)
var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
// ImageBuild sends request to the daemon to build images.
// The Body in the response implement an io.ReadCloser and it's up to the caller to
// close it.
func (cli *Client) ImageBuild(ctx context.Context, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
query, err := imageBuildOptionsToQuery(options)
if err != nil {
return types.ImageBuildResponse{}, err
}
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(options.AuthConfigs)
if err != nil {
return types.ImageBuildResponse{}, err
}
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
headers.Set("Content-Type", "application/tar")
serverResp, err := cli.postRaw(ctx, "/build", query, options.Context, headers)
if err != nil {
return types.ImageBuildResponse{}, err
}
osType := getDockerOS(serverResp.header.Get("Server"))
return types.ImageBuildResponse{
Body: serverResp.body,
OSType: osType,
}, nil
}
func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
query := url.Values{
"t": options.Tags,
}
if options.SuppressOutput {
query.Set("q", "1")
}
if options.RemoteContext != "" {
query.Set("remote", options.RemoteContext)
}
if options.NoCache {
query.Set("nocache", "1")
}
if options.Remove {
query.Set("rm", "1")
} else {
query.Set("rm", "0")
}
if options.ForceRemove {
query.Set("forcerm", "1")
}
if options.PullParent {
query.Set("pull", "1")
}
if !container.Isolation.IsDefault(options.Isolation) {
query.Set("isolation", string(options.Isolation))
}
query.Set("cpusetcpus", options.CPUSetCPUs)
query.Set("cpusetmems", options.CPUSetMems)
query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
query.Set("memory", strconv.FormatInt(options.Memory, 10))
query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
query.Set("cgroupparent", options.CgroupParent)
query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
query.Set("dockerfile", options.Dockerfile)
ulimitsJSON, err := json.Marshal(options.Ulimits)
if err != nil {
return query, err
}
query.Set("ulimits", string(ulimitsJSON))
buildArgsJSON, err := json.Marshal(options.BuildArgs)
if err != nil {
return query, err
}
query.Set("buildargs", string(buildArgsJSON))
return query, nil
}
func getDockerOS(serverHeader string) string {
var osType string
matches := headerRegexp.FindStringSubmatch(serverHeader)
if len(matches) > 0 {
osType = matches[1]
}
return osType
}
// convertKVStringsToMap converts ["key=value"] to {"key":"value"}
func convertKVStringsToMap(values []string) map[string]string {
result := make(map[string]string, len(values))
for _, value := range values {
kv := strings.SplitN(value, "=", 2)
if len(kv) == 1 {
result[kv[0]] = ""
} else {
result[kv[0]] = kv[1]
}
}
return result
}

View File

@ -0,0 +1,28 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ImageCreate creates a new image based in the parent options.
// It returns the JSON content in the response body.
func (cli *Client) ImageCreate(ctx context.Context, options types.ImageCreateOptions) (io.ReadCloser, error) {
query := url.Values{}
query.Set("fromImage", options.Parent)
query.Set("tag", options.Tag)
resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
if err != nil {
return nil, err
}
return resp.body, nil
}
func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
return cli.postWithContext(ctx, "/images/create", query, nil, headers)
}

View File

@ -0,0 +1,21 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
)
// ImageHistory returns the changes in an image in history format.
func (cli *Client) ImageHistory(imageID string) ([]types.ImageHistory, error) {
var history []types.ImageHistory
serverResp, err := cli.get("/images/"+imageID+"/history", url.Values{}, nil)
if err != nil {
return history, err
}
err = json.NewDecoder(serverResp.body).Decode(&history)
ensureReaderClosed(serverResp)
return history, err
}

View File

@ -0,0 +1,29 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ImageImport creates a new image based in the source options.
// It returns the JSON content in the response body.
func (cli *Client) ImageImport(ctx context.Context, options types.ImageImportOptions) (io.ReadCloser, error) {
query := url.Values{}
query.Set("fromSrc", options.SourceName)
query.Set("repo", options.RepositoryName)
query.Set("tag", options.Tag)
query.Set("message", options.Message)
for _, change := range options.Changes {
query.Add("changes", change)
}
resp, err := cli.postRaw(ctx, "/images/create", query, options.Source, nil)
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@ -0,0 +1,37 @@
package client
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
)
// ImageInspectWithRaw returns the image information and it's raw representation.
func (cli *Client) ImageInspectWithRaw(imageID string, getSize bool) (types.ImageInspect, []byte, error) {
query := url.Values{}
if getSize {
query.Set("size", "1")
}
serverResp, err := cli.get("/images/"+imageID+"/json", query, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return types.ImageInspect{}, nil, imageNotFoundError{imageID}
}
return types.ImageInspect{}, nil, err
}
defer ensureReaderClosed(serverResp)
body, err := ioutil.ReadAll(serverResp.body)
if err != nil {
return types.ImageInspect{}, nil, err
}
var response types.ImageInspect
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
}

View File

@ -0,0 +1,39 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
)
// ImageList returns a list of images in the docker host.
func (cli *Client) ImageList(options types.ImageListOptions) ([]types.Image, error) {
var images []types.Image
query := url.Values{}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return images, err
}
query.Set("filters", filterJSON)
}
if options.MatchName != "" {
// FIXME rename this parameter, to not be confused with the filters flag
query.Set("filter", options.MatchName)
}
if options.All {
query.Set("all", "1")
}
serverResp, err := cli.get("/images/json", query, nil)
if err != nil {
return images, err
}
err = json.NewDecoder(serverResp.body).Decode(&images)
ensureReaderClosed(serverResp)
return images, err
}

View File

@ -0,0 +1,30 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ImageLoad loads an image in the docker host from the client host.
// It's up to the caller to close the io.ReadCloser returned by
// this function.
func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
v := url.Values{}
v.Set("quiet", "0")
if quiet {
v.Set("quiet", "1")
}
headers := map[string][]string{"Content-Type": {"application/x-tar"}}
resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
if err != nil {
return types.ImageLoadResponse{}, err
}
return types.ImageLoadResponse{
Body: resp.body,
JSON: resp.header.Get("Content-Type") == "application/json",
}, nil
}

View File

@ -0,0 +1,36 @@
package client
import (
"io"
"net/http"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ImagePull request the docker host to pull an image from a remote registry.
// It executes the privileged function if the operation is unauthorized
// and it tries one more time.
// It's up to the caller to handle the io.ReadCloser and close it properly.
func (cli *Client) ImagePull(ctx context.Context, options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) {
query := url.Values{}
query.Set("fromImage", options.ImageID)
if options.Tag != "" {
query.Set("tag", options.Tag)
}
resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized {
newAuthHeader, privilegeErr := privilegeFunc()
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
}
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@ -0,0 +1,38 @@
package client
import (
"io"
"net/http"
"net/url"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
)
// ImagePush request the docker host to push an image to a remote registry.
// It executes the privileged function if the operation is unauthorized
// and it tries one more time.
// It's up to the caller to handle the io.ReadCloser and close it properly.
func (cli *Client) ImagePush(ctx context.Context, options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) {
query := url.Values{}
query.Set("tag", options.Tag)
resp, err := cli.tryImagePush(ctx, options.ImageID, query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized {
newAuthHeader, privilegeErr := privilegeFunc()
if privilegeErr != nil {
return nil, privilegeErr
}
resp, err = cli.tryImagePush(ctx, options.ImageID, query, newAuthHeader)
}
if err != nil {
return nil, err
}
return resp.body, nil
}
func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
return cli.postWithContext(ctx, "/images/"+imageID+"/push", query, nil, headers)
}

View File

@ -0,0 +1,30 @@
package client
import (
"encoding/json"
"net/url"
"github.com/docker/engine-api/types"
)
// ImageRemove removes an image from the docker host.
func (cli *Client) ImageRemove(options types.ImageRemoveOptions) ([]types.ImageDelete, error) {
query := url.Values{}
if options.Force {
query.Set("force", "1")
}
if !options.PruneChildren {
query.Set("noprune", "1")
}
resp, err := cli.delete("/images/"+options.ImageID, query, nil)
if err != nil {
return nil, err
}
var dels []types.ImageDelete
err = json.NewDecoder(resp.body).Decode(&dels)
ensureReaderClosed(resp)
return dels, err
}

View File

@ -0,0 +1,22 @@
package client
import (
"io"
"net/url"
"golang.org/x/net/context"
)
// ImageSave retrieves one or more images from the docker host as a io.ReadCloser.
// It's up to the caller to store the images and close the stream.
func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
query := url.Values{
"names": imageIDs,
}
resp, err := cli.getWithContext(ctx, "/images/get", query, nil)
if err != nil {
return nil, err
}
return resp.body, nil
}

View File

@ -0,0 +1,39 @@
package client
import (
"encoding/json"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/registry"
)
// ImageSearch makes the docker host to search by a term in a remote registry.
// The list of results is not sorted in any fashion.
func (cli *Client) ImageSearch(options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) {
var results []registry.SearchResult
query := url.Values{}
query.Set("term", options.Term)
resp, err := cli.tryImageSearch(query, options.RegistryAuth)
if resp.statusCode == http.StatusUnauthorized {
newAuthHeader, privilegeErr := privilegeFunc()
if privilegeErr != nil {
return results, privilegeErr
}
resp, err = cli.tryImageSearch(query, newAuthHeader)
}
if err != nil {
return results, err
}
err = json.NewDecoder(resp.body).Decode(&results)
ensureReaderClosed(resp)
return results, err
}
func (cli *Client) tryImageSearch(query url.Values, registryAuth string) (*serverResponse, error) {
headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
return cli.get("/images/search", query, headers)
}

View File

@ -0,0 +1,21 @@
package client
import (
"net/url"
"github.com/docker/engine-api/types"
)
// ImageTag tags an image in the docker host
func (cli *Client) ImageTag(options types.ImageTagOptions) error {
query := url.Values{}
query.Set("repo", options.RepositoryName)
query.Set("tag", options.Tag)
if options.Force {
query.Set("force", "1")
}
resp, err := cli.post("/images/"+options.ImageID+"/tag", query, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -0,0 +1,25 @@
package client
import (
"encoding/json"
"fmt"
"net/url"
"github.com/docker/engine-api/types"
)
// Info returns information about the docker server.
func (cli *Client) Info() (types.Info, error) {
var info types.Info
serverResp, err := cli.get("/info", url.Values{}, nil)
if err != nil {
return info, err
}
defer ensureReaderClosed(serverResp)
if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
return info, fmt.Errorf("Error reading remote info: %v", err)
}
return info, nil
}

View File

@ -0,0 +1,78 @@
package client
import (
"io"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
"github.com/docker/engine-api/types/registry"
)
// APIClient is an interface that clients that talk with a docker server must implement.
type APIClient interface {
ClientVersion() string
ContainerAttach(options types.ContainerAttachOptions) (types.HijackedResponse, error)
ContainerCommit(options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
ContainerCreate(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)
ContainerDiff(containerID string) ([]types.ContainerChange, error)
ContainerExecAttach(execID string, config types.ExecConfig) (types.HijackedResponse, error)
ContainerExecCreate(config types.ExecConfig) (types.ContainerExecCreateResponse, error)
ContainerExecInspect(execID string) (types.ContainerExecInspect, error)
ContainerExecResize(options types.ResizeOptions) error
ContainerExecStart(execID string, config types.ExecStartCheck) error
ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error)
ContainerInspect(containerID string) (types.ContainerJSON, error)
ContainerInspectWithRaw(containerID string, getSize bool) (types.ContainerJSON, []byte, error)
ContainerKill(containerID, signal string) error
ContainerList(options types.ContainerListOptions) ([]types.Container, error)
ContainerLogs(ctx context.Context, options types.ContainerLogsOptions) (io.ReadCloser, error)
ContainerPause(containerID string) error
ContainerRemove(options types.ContainerRemoveOptions) error
ContainerRename(containerID, newContainerName string) error
ContainerResize(options types.ResizeOptions) error
ContainerRestart(containerID string, timeout int) error
ContainerStatPath(containerID, path string) (types.ContainerPathStat, error)
ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
ContainerStart(containerID string) error
ContainerStop(containerID string, timeout int) error
ContainerTop(containerID string, arguments []string) (types.ContainerProcessList, error)
ContainerUnpause(containerID string) error
ContainerUpdate(containerID string, updateConfig container.UpdateConfig) error
ContainerWait(ctx context.Context, containerID string) (int, error)
CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
CopyToContainer(ctx context.Context, options types.CopyToContainerOptions) error
Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error)
ImageBuild(ctx context.Context, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
ImageCreate(ctx context.Context, options types.ImageCreateOptions) (io.ReadCloser, error)
ImageHistory(imageID string) ([]types.ImageHistory, error)
ImageImport(ctx context.Context, options types.ImageImportOptions) (io.ReadCloser, error)
ImageInspectWithRaw(imageID string, getSize bool) (types.ImageInspect, []byte, error)
ImageList(options types.ImageListOptions) ([]types.Image, error)
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
ImagePull(ctx context.Context, options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error)
ImagePush(ctx context.Context, options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error)
ImageRemove(options types.ImageRemoveOptions) ([]types.ImageDelete, error)
ImageSearch(options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error)
ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error)
ImageTag(options types.ImageTagOptions) error
Info() (types.Info, error)
NetworkConnect(networkID, containerID string, config *network.EndpointSettings) error
NetworkCreate(options types.NetworkCreate) (types.NetworkCreateResponse, error)
NetworkDisconnect(networkID, containerID string, force bool) error
NetworkInspect(networkID string) (types.NetworkResource, error)
NetworkList(options types.NetworkListOptions) ([]types.NetworkResource, error)
NetworkRemove(networkID string) error
RegistryLogin(auth types.AuthConfig) (types.AuthResponse, error)
ServerVersion() (types.Version, error)
VolumeCreate(options types.VolumeCreateRequest) (types.Volume, error)
VolumeInspect(volumeID string) (types.Volume, error)
VolumeList(filter filters.Args) (types.VolumesListResponse, error)
VolumeRemove(volumeID string) error
}
// Ensure that Client always implements APIClient.
var _ APIClient = &Client{}

View File

@ -0,0 +1,27 @@
package client
import (
"encoding/json"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
)
// RegistryLogin authenticates the docker server with a given docker registry.
// It returns UnauthorizerError when the authentication fails.
func (cli *Client) RegistryLogin(auth types.AuthConfig) (types.AuthResponse, error) {
resp, err := cli.post("/auth", url.Values{}, auth, nil)
if resp != nil && resp.statusCode == http.StatusUnauthorized {
return types.AuthResponse{}, unauthorizedError{err}
}
if err != nil {
return types.AuthResponse{}, err
}
var response types.AuthResponse
err = json.NewDecoder(resp.body).Decode(&response)
ensureReaderClosed(resp)
return response, err
}

View File

@ -0,0 +1,86 @@
package client
import (
"encoding/json"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"github.com/docker/engine-api/types/network"
)
// NetworkCreate creates a new network in the docker host.
func (cli *Client) NetworkCreate(options types.NetworkCreate) (types.NetworkCreateResponse, error) {
var response types.NetworkCreateResponse
serverResp, err := cli.post("/networks/create", nil, options, nil)
if err != nil {
return response, err
}
json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
}
// NetworkRemove removes an existent network from the docker host.
func (cli *Client) NetworkRemove(networkID string) error {
resp, err := cli.delete("/networks/"+networkID, nil, nil)
ensureReaderClosed(resp)
return err
}
// NetworkConnect connects a container to an existent network in the docker host.
func (cli *Client) NetworkConnect(networkID, containerID string, config *network.EndpointSettings) error {
nc := types.NetworkConnect{
Container: containerID,
EndpointConfig: config,
}
resp, err := cli.post("/networks/"+networkID+"/connect", nil, nc, nil)
ensureReaderClosed(resp)
return err
}
// NetworkDisconnect disconnects a container from an existent network in the docker host.
func (cli *Client) NetworkDisconnect(networkID, containerID string, force bool) error {
nd := types.NetworkDisconnect{Container: containerID, Force: force}
resp, err := cli.post("/networks/"+networkID+"/disconnect", nil, nd, nil)
ensureReaderClosed(resp)
return err
}
// NetworkList returns the list of networks configured in the docker host.
func (cli *Client) NetworkList(options types.NetworkListOptions) ([]types.NetworkResource, error) {
query := url.Values{}
if options.Filters.Len() > 0 {
filterJSON, err := filters.ToParam(options.Filters)
if err != nil {
return nil, err
}
query.Set("filters", filterJSON)
}
var networkResources []types.NetworkResource
resp, err := cli.get("/networks", query, nil)
if err != nil {
return networkResources, err
}
err = json.NewDecoder(resp.body).Decode(&networkResources)
ensureReaderClosed(resp)
return networkResources, err
}
// NetworkInspect returns the information for a specific network configured in the docker host.
func (cli *Client) NetworkInspect(networkID string) (types.NetworkResource, error) {
var networkResource types.NetworkResource
resp, err := cli.get("/networks/"+networkID, nil, nil)
if err != nil {
if resp.statusCode == http.StatusNotFound {
return networkResource, networkNotFoundError{networkID}
}
return networkResource, err
}
err = json.NewDecoder(resp.body).Decode(&networkResource)
ensureReaderClosed(resp)
return networkResource, err
}

View File

@ -0,0 +1,9 @@
package client
// RequestPrivilegeFunc is a function interface that
// clients can supply to retry operations after
// getting an authorization error.
// This function returns the registry authentication
// header value in base 64 format, or an error
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)

View File

@ -0,0 +1,195 @@
package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/docker/engine-api/client/transport/cancellable"
"golang.org/x/net/context"
)
// serverResponse is a wrapper for http API responses.
type serverResponse struct {
body io.ReadCloser
header http.Header
statusCode int
}
// head sends an http request to the docker API using the method HEAD.
func (cli *Client) head(path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(context.Background(), "HEAD", path, query, nil, headers)
}
// get sends an http request to the docker API using the method GET.
func (cli *Client) get(path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.getWithContext(context.Background(), path, query, headers)
}
// getWithContext sends an http request to the docker API using the method GET with a specific go context.
func (cli *Client) getWithContext(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "GET", path, query, nil, headers)
}
// post sends an http request to the docker API using the method POST.
func (cli *Client) post(path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) {
return cli.postWithContext(context.Background(), path, query, body, headers)
}
// postWithContext sends an http request to the docker API using the method POST with a specific go context.
func (cli *Client) postWithContext(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(ctx, "POST", path, query, body, headers)
}
// postRaw sends the raw input to the docker API using the method POST with a specific go context.
func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
return cli.sendClientRequest(ctx, "POST", path, query, body, headers)
}
// put sends an http request to the docker API using the method PUT.
func (cli *Client) put(path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(context.Background(), "PUT", path, query, body, headers)
}
// putRaw sends the raw input to the docker API using the method PUT.
func (cli *Client) putRaw(path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
return cli.putRawWithContext(context.Background(), path, query, body, headers)
}
// putRawWithContext sends the raw input to the docker API using the method PUT with a specific go context.
func (cli *Client) putRawWithContext(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
return cli.sendClientRequest(ctx, "PUT", path, query, body, headers)
}
// delete sends an http request to the docker API using the method DELETE.
func (cli *Client) delete(path string, query url.Values, headers map[string][]string) (*serverResponse, error) {
return cli.sendRequest(context.Background(), "DELETE", path, query, nil, headers)
}
func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body interface{}, headers map[string][]string) (*serverResponse, error) {
params, err := encodeData(body)
if err != nil {
return nil, err
}
if body != nil {
if headers == nil {
headers = make(map[string][]string)
}
headers["Content-Type"] = []string{"application/json"}
}
return cli.sendClientRequest(ctx, method, path, query, params, headers)
}
func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) {
serverResp := &serverResponse{
body: nil,
statusCode: -1,
}
expectedPayload := (method == "POST" || method == "PUT")
if expectedPayload && body == nil {
body = bytes.NewReader([]byte{})
}
req, err := cli.newRequest(method, path, query, body, headers)
req.URL.Host = cli.addr
req.URL.Scheme = cli.transport.Scheme()
if expectedPayload && req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "text/plain")
}
resp, err := cancellable.Do(ctx, cli.transport, req)
if resp != nil {
serverResp.statusCode = resp.StatusCode
}
if err != nil {
if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
return serverResp, ErrConnectionFailed
}
if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") {
return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
}
if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") {
return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
}
return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err)
}
if serverResp.statusCode < 200 || serverResp.statusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return serverResp, err
}
if len(body) == 0 {
return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL)
}
return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
}
serverResp.body = resp.Body
serverResp.header = resp.Header
return serverResp, nil
}
func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) {
apiPath := cli.getAPIPath(path, query)
req, err := http.NewRequest(method, apiPath, body)
if err != nil {
return nil, err
}
// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
// then the user can't change OUR headers
for k, v := range cli.customHTTPHeaders {
req.Header.Set(k, v)
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
return req, nil
}
func encodeData(data interface{}) (*bytes.Buffer, error) {
params := bytes.NewBuffer(nil)
if data != nil {
if err := json.NewEncoder(params).Encode(data); err != nil {
return nil, err
}
}
return params, nil
}
func ensureReaderClosed(response *serverResponse) {
if response != nil && response.body != nil {
response.body.Close()
}
}
func isTimeout(err error) bool {
type timeout interface {
Timeout() bool
}
e := err
switch urlErr := err.(type) {
case *url.Error:
e = urlErr.Err
}
t, ok := e.(timeout)
return ok && t.Timeout()
}

View File

@ -0,0 +1,23 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package cancellable
import (
"net/http"
"github.com/docker/engine-api/client/transport"
)
func canceler(client transport.Sender, req *http.Request) func() {
// TODO(djd): Respect any existing value of req.Cancel.
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View File

@ -0,0 +1,27 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package cancellable
import (
"net/http"
"github.com/docker/engine-api/client/transport"
)
type requestCanceler interface {
CancelRequest(*http.Request)
}
func canceler(client transport.Sender, req *http.Request) func() {
rc, ok := client.(requestCanceler)
if !ok {
return func() {}
}
return func() {
rc.CancelRequest(req)
}
}

View File

@ -0,0 +1,113 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cancellable provides helper function to cancel http requests.
package cancellable
import (
"io"
"net/http"
"github.com/docker/engine-api/client/transport"
"golang.org/x/net/context"
)
func nop() {}
var (
testHookContextDoneBeforeHeaders = nop
testHookDoReturned = nop
testHookDidBodyClose = nop
)
// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
//
// FORK INFORMATION:
//
// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by
// taking a Sender interface rather than a *http.Client directly. That allow us to use
// this funcion with mocked clients and hijacked connections.
func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go.
cancel := canceler(client, req)
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
go func() {
resp, err := client.Do(req)
testHookDoReturned()
result <- responseAndError{resp, err}
}()
var resp *http.Response
select {
case <-ctx.Done():
testHookContextDoneBeforeHeaders()
cancel()
// Clean up after the goroutine calling client.Do:
go func() {
if r := <-result; r.resp != nil && r.resp.Body != nil {
testHookDidBodyClose()
r.resp.Body.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
var err error
resp, err = r.resp, r.err
if err != nil {
return resp, err
}
}
c := make(chan struct{})
go func() {
select {
case <-ctx.Done():
cancel()
case <-c:
// The response's Body is closed.
}
}()
resp.Body = &notifyingReader{resp.Body, c}
return resp, nil
}
// notifyingReader is an io.ReadCloser that closes the notify channel after
// Close is called or a Read fails on the underlying ReadCloser.
type notifyingReader struct {
io.ReadCloser
notify chan<- struct{}
}
func (r *notifyingReader) Read(p []byte) (int, error) {
n, err := r.ReadCloser.Read(p)
if err != nil && r.notify != nil {
close(r.notify)
r.notify = nil
}
return n, err
}
func (r *notifyingReader) Close() error {
err := r.ReadCloser.Close()
if r.notify != nil {
close(r.notify)
r.notify = nil
}
return err
}

View File

@ -0,0 +1,47 @@
package transport
import (
"crypto/tls"
"net/http"
)
// Sender is an interface that clients must implement
// to be able to send requests to a remote connection.
type Sender interface {
// Do sends request to a remote endpoint.
Do(*http.Request) (*http.Response, error)
}
// Client is an interface that abstracts all remote connections.
type Client interface {
Sender
// Secure tells whether the connection is secure or not.
Secure() bool
// Scheme returns the connection protocol the client uses.
Scheme() string
// TLSConfig returns any TLS configuration the client uses.
TLSConfig() *tls.Config
}
// tlsInfo returns information about the TLS configuration.
type tlsInfo struct {
tlsConfig *tls.Config
}
// TLSConfig returns the TLS configuration.
func (t *tlsInfo) TLSConfig() *tls.Config {
return t.tlsConfig
}
// Scheme returns protocol scheme to use.
func (t *tlsInfo) Scheme() string {
if t.tlsConfig != nil {
return "https"
}
return "http"
}
// Secure returns true if there is a TLS configuration.
func (t *tlsInfo) Secure() bool {
return t.tlsConfig != nil
}

View File

@ -0,0 +1,37 @@
// +build test
package transport
import (
"bytes"
"crypto/tls"
"io/ioutil"
"net/http"
)
type mockClient struct {
*tlsInfo
do func(*http.Request) (*http.Response, error)
}
// NewMockClient returns a mocked client that runs the function supplied as `client.Do` call
func NewMockClient(tlsConfig *tls.Config, doer func(*http.Request) (*http.Response, error)) Client {
return mockClient{
tlsInfo: &tlsInfo{tlsConfig},
do: doer,
}
}
// Do executes the supplied function for the mock.
func (m mockClient) Do(req *http.Request) (*http.Response, error) {
return m.do(req)
}
func ErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: statusCode,
Body: ioutil.NopCloser(bytes.NewReader([]byte(message))),
}, nil
}
}

View File

@ -0,0 +1,57 @@
// Package transport provides function to send request to remote endpoints.
package transport
import (
"fmt"
"net/http"
"github.com/docker/go-connections/sockets"
)
// apiTransport holds information about the http transport to connect with the API.
type apiTransport struct {
*http.Client
*tlsInfo
transport *http.Transport
}
// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client.
// It uses Docker's default http transport configuration if the client is nil.
// It does not modify the client's transport if it's not nil.
func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) {
var transport *http.Transport
if client != nil {
tr, ok := client.Transport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
}
transport = tr
} else {
transport = defaultTransport(proto, addr)
client = &http.Client{
Transport: transport,
}
}
return &apiTransport{
Client: client,
tlsInfo: &tlsInfo{transport.TLSClientConfig},
transport: transport,
}, nil
}
// CancelRequest stops a request execution.
func (a *apiTransport) CancelRequest(req *http.Request) {
a.transport.CancelRequest(req)
}
// defaultTransport creates a new http.Transport with Docker's
// default transport configuration.
func defaultTransport(proto, addr string) *http.Transport {
tr := new(http.Transport)
sockets.ConfigureTransport(tr, proto, addr)
return tr
}
var _ Client = &apiTransport{}

View File

@ -0,0 +1,20 @@
package client
import (
"encoding/json"
"github.com/docker/engine-api/types"
)
// ServerVersion returns information of the docker client and server host.
func (cli *Client) ServerVersion() (types.Version, error) {
resp, err := cli.get("/version", nil, nil)
if err != nil {
return types.Version{}, err
}
var server types.Version
err = json.NewDecoder(resp.body).Decode(&server)
ensureReaderClosed(resp)
return server, err
}

View File

@ -0,0 +1,66 @@
package client
import (
"encoding/json"
"net/http"
"net/url"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
)
// VolumeList returns the volumes configured in the docker host.
func (cli *Client) VolumeList(filter filters.Args) (types.VolumesListResponse, error) {
var volumes types.VolumesListResponse
query := url.Values{}
if filter.Len() > 0 {
filterJSON, err := filters.ToParam(filter)
if err != nil {
return volumes, err
}
query.Set("filters", filterJSON)
}
resp, err := cli.get("/volumes", query, nil)
if err != nil {
return volumes, err
}
err = json.NewDecoder(resp.body).Decode(&volumes)
ensureReaderClosed(resp)
return volumes, err
}
// VolumeInspect returns the information about a specific volume in the docker host.
func (cli *Client) VolumeInspect(volumeID string) (types.Volume, error) {
var volume types.Volume
resp, err := cli.get("/volumes/"+volumeID, nil, nil)
if err != nil {
if resp.statusCode == http.StatusNotFound {
return volume, volumeNotFoundError{volumeID}
}
return volume, err
}
err = json.NewDecoder(resp.body).Decode(&volume)
ensureReaderClosed(resp)
return volume, err
}
// VolumeCreate creates a volume in the docker host.
func (cli *Client) VolumeCreate(options types.VolumeCreateRequest) (types.Volume, error) {
var volume types.Volume
resp, err := cli.post("/volumes/create", nil, options, nil)
if err != nil {
return volume, err
}
err = json.NewDecoder(resp.body).Decode(&volume)
ensureReaderClosed(resp)
return volume, err
}
// VolumeRemove removes a volume from the docker host.
func (cli *Client) VolumeRemove(volumeID string) error {
resp, err := cli.delete("/volumes/"+volumeID, nil, nil)
ensureReaderClosed(resp)
return err
}

View File

@ -4,8 +4,7 @@ package types
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth"`
Email string `json:"email"`
Auth string `json:"auth,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
RegistryToken string `json:"registrytoken,omitempty"`
}

View File

@ -127,7 +127,7 @@ type ImageBuildOptions struct {
Remove bool
ForceRemove bool
PullParent bool
IsolationLevel container.IsolationLevel
Isolation container.Isolation
CPUSetCPUs string
CPUSetMems string
CPUShares int64

View File

@ -24,12 +24,12 @@ type Config struct {
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd *strslice.StrSlice // Command to run when starting the container
Cmd strslice.StrSlice // Command to run when starting the container
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint *strslice.StrSlice // Entrypoint to run when starting the container
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile

View File

@ -12,13 +12,13 @@ import (
// NetworkMode represents the container network stack.
type NetworkMode string
// IsolationLevel represents the isolation level of a container. The supported
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type IsolationLevel string
type Isolation string
// IsDefault indicates the default isolation level of a container. On Linux this
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i IsolationLevel) IsDefault() bool {
func (i Isolation) IsDefault() bool {
return strings.ToLower(string(i)) == "default" || string(i) == ""
}
@ -213,8 +213,8 @@ type HostConfig struct {
VolumesFrom []string // List of volumes to take from other container
// Applicable to UNIX platforms
CapAdd *strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop *strslice.StrSlice // List of kernel capabilities to remove from the container
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
@ -234,7 +234,7 @@ type HostConfig struct {
// Applicable to Windows
ConsoleSize [2]int // Initial console size
Isolation IsolationLevel // Isolation level of the container (eg default, hyperv)
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources

View File

@ -4,8 +4,8 @@ package container
import "strings"
// IsValid indicates is an isolation level is valid
func (i IsolationLevel) IsValid() bool {
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
@ -72,12 +72,6 @@ func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
n := NetworkMode(network)
return n.IsBridge() || n.IsHost() || n.IsNone()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {

View File

@ -21,17 +21,17 @@ func (n NetworkMode) IsUserDefined() bool {
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i IsolationLevel) IsHyperV() bool {
func (i Isolation) IsHyperV() bool {
return strings.ToLower(string(i)) == "hyperv"
}
// IsProcess indicates the use of process isolation
func (i IsolationLevel) IsProcess() bool {
func (i Isolation) IsProcess() bool {
return strings.ToLower(string(i)) == "process"
}
// IsValid indicates is an isolation level is valid
func (i IsolationLevel) IsValid() bool {
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
}
@ -49,11 +49,6 @@ func (n NetworkMode) NetworkName() string {
return ""
}
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
return false
}
// ValidateNetMode ensures that the various combinations of requested
// network settings are valid.
func ValidateNetMode(c *Config, hc *HostConfig) error {
@ -70,10 +65,10 @@ func ValidateNetMode(c *Config, hc *HostConfig) error {
return nil
}
// ValidateIsolationLevel performs platform specific validation of the
// isolation level in the hostconfig structure. Windows supports 'default' (or
// ValidateIsolationperforms platform specific validation of the
// isolation technology in the hostconfig structure. Windows supports 'default' (or
// blank), 'process', or 'hyperv'.
func ValidateIsolationLevel(hc *HostConfig) error {
func ValidateIsolation(hc *HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil

View File

@ -7,23 +7,15 @@ import (
// StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options.
type StrSlice struct {
parts []string
}
type StrSlice []string
// MarshalJSON Marshals (or serializes) the StrSlice into the json format.
// This method is needed to implement json.Marshaller.
func (e *StrSlice) MarshalJSON() ([]byte, error) {
if e == nil {
return []byte{}, nil
}
return json.Marshal(e.Slice())
}
// UnmarshalJSON decodes the byte slice whether it's a string or an array of strings.
// This method is needed to implement json.Unmarshaler.
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
// strings. This method is needed to implement json.Unmarshaler.
func (e *StrSlice) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
// With no input, we preserve the existing value by returning nil and
// leaving the target alone. This allows defining default values for
// the type.
return nil
}
@ -36,36 +28,16 @@ func (e *StrSlice) UnmarshalJSON(b []byte) error {
p = append(p, s)
}
e.parts = p
*e = p
return nil
}
// Len returns the number of parts of the StrSlice.
func (e *StrSlice) Len() int {
if e == nil {
return 0
}
return len(e.parts)
}
// Slice gets the parts of the StrSlice as a Slice of string.
func (e *StrSlice) Slice() []string {
if e == nil {
return nil
}
return e.parts
}
// ToString gets space separated string of all the parts.
func (e *StrSlice) ToString() string {
s := e.Slice()
if s == nil {
return ""
}
return strings.Join(s, " ")
// String gets space separated string of all the parts.
func (e StrSlice) String() string {
return strings.Join([]string(e), " ")
}
// New creates an StrSlice based on the specified parts (as strings).
func New(parts ...string) *StrSlice {
return &StrSlice{parts}
func New(parts ...string) StrSlice {
return StrSlice(parts)
}

View File

@ -148,6 +148,7 @@ type Container struct {
NetworkMode string `json:",omitempty"`
}
NetworkSettings *SummaryNetworkSettings
Mounts []MountPoint
}
// CopyConfig contains request body of Remote API:
@ -217,6 +218,7 @@ type Info struct {
SystemTime string
ExecutionDriver string
LoggingDriver string
CgroupDriver string
NEventsListener int
KernelVersion string
OperatingSystem string
@ -238,8 +240,8 @@ type Info struct {
ClusterAdvertise string
}
// PluginsInfo is temp struct holds Plugins name
// registered with docker daemon. It used by Info struct
// PluginsInfo is a temp struct holding Plugins name
// registered with docker daemon. It is used by Info struct
type PluginsInfo struct {
// List of Volume plugins registered
Volume []string
@ -387,6 +389,7 @@ type NetworkResource struct {
ID string `json:"Id"`
Scope string
Driver string
EnableIPv6 bool
IPAM network.IPAM
Internal bool
Containers map[string]EndpointResource
@ -407,6 +410,7 @@ type NetworkCreate struct {
Name string
CheckDuplicate bool
Driver string
EnableIPv6 bool
IPAM network.IPAM
Internal bool
Options map[string]string

View File

@ -0,0 +1,89 @@
package sockets
import (
"errors"
"net"
"sync"
)
var errClosed = errors.New("use of closed network connection")
// InmemSocket implements net.Listener using in-memory only connections.
type InmemSocket struct {
chConn chan net.Conn
chClose chan struct{}
addr string
mu sync.Mutex
}
// dummyAddr is used to satisfy net.Addr for the in-mem socket
// it is just stored as a string and returns the string for all calls
type dummyAddr string
// NewInmemSocket creates an in-memory only net.Listener
// The addr argument can be any string, but is used to satisfy the `Addr()` part
// of the net.Listener interface
func NewInmemSocket(addr string, bufSize int) *InmemSocket {
return &InmemSocket{
chConn: make(chan net.Conn, bufSize),
chClose: make(chan struct{}),
addr: addr,
}
}
// Addr returns the socket's addr string to satisfy net.Listener
func (s *InmemSocket) Addr() net.Addr {
return dummyAddr(s.addr)
}
// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
func (s *InmemSocket) Accept() (net.Conn, error) {
select {
case conn := <-s.chConn:
return conn, nil
case <-s.chClose:
return nil, errClosed
}
}
// Close closes the listener. It will be unavailable for use once closed.
func (s *InmemSocket) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-s.chClose:
default:
close(s.chClose)
}
return nil
}
// Dial is used to establish a connection with the in-mem server
func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
srvConn, clientConn := net.Pipe()
select {
case s.chConn <- srvConn:
case <-s.chClose:
return nil, errClosed
}
return clientConn, nil
}
// Network returns the addr string, satisfies net.Addr
func (a dummyAddr) Network() string {
return string(a)
}
// String returns the string form
func (a dummyAddr) String() string {
return string(a)
}
// timeoutError is used when there is a timeout with a connection
// this implements the net.Error interface
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }

View File

@ -0,0 +1,51 @@
package sockets
import (
"net"
"net/url"
"os"
"strings"
"golang.org/x/net/proxy"
)
// GetProxyEnv allows access to the uppercase and the lowercase forms of
// proxy-related variables. See the Go specification for details on these
// variables. https://golang.org/pkg/net/http/
func GetProxyEnv(key string) string {
proxyValue := os.Getenv(strings.ToUpper(key))
if proxyValue == "" {
return os.Getenv(strings.ToLower(key))
}
return proxyValue
}
// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
// proxy.Dialer which will route the connections through the proxy using the
// given dialer.
func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
allProxy := GetProxyEnv("all_proxy")
if len(allProxy) == 0 {
return direct, nil
}
proxyURL, err := url.Parse(allProxy)
if err != nil {
return direct, err
}
proxyFromURL, err := proxy.FromURL(proxyURL, direct)
if err != nil {
return direct, err
}
noProxy := GetProxyEnv("no_proxy")
if len(noProxy) == 0 {
return proxyFromURL, nil
}
perHost := proxy.NewPerHost(proxyFromURL, direct)
perHost.AddFromString(noProxy)
return perHost, nil
}

View File

@ -0,0 +1,42 @@
// Package sockets provides helper functions to create and configure Unix or TCP sockets.
package sockets
import (
"net"
"net/http"
"time"
)
// Why 32? See https://github.com/docker/docker/pull/8035.
const defaultTimeout = 32 * time.Second
// ConfigureTransport configures the specified Transport according to the
// specified proto and addr.
// If the proto is unix (using a unix socket to communicate) or npipe the
// compression is disabled.
func ConfigureTransport(tr *http.Transport, proto, addr string) error {
switch proto {
case "unix":
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, defaultTimeout)
}
case "npipe":
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return DialPipe(addr, defaultTimeout)
}
default:
tr.Proxy = http.ProxyFromEnvironment
dialer, err := DialerFromEnvironment(&net.Dialer{
Timeout: defaultTimeout,
})
if err != nil {
return err
}
tr.Dial = dialer.Dial
}
return nil
}

View File

@ -0,0 +1,15 @@
// +build !windows
package sockets
import (
"net"
"syscall"
"time"
)
// DialPipe connects to a Windows named pipe.
// This is not supported on other OSes.
func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
return nil, syscall.EAFNOSUPPORT
}

View File

@ -0,0 +1,13 @@
package sockets
import (
"net"
"time"
"github.com/Microsoft/go-winio"
)
// DialPipe connects to a Windows named pipe.
func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(addr, &timeout)
}

View File

@ -0,0 +1,22 @@
// Package sockets provides helper functions to create and configure Unix or TCP sockets.
package sockets
import (
"crypto/tls"
"net"
)
// NewTCPSocket creates a TCP socket listener with the specified address and
// and the specified tls configuration. If TLSConfig is set, will encapsulate the
// TCP listener inside a TLS one.
func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
if tlsConfig != nil {
tlsConfig.NextProtos = []string{"http/1.1"}
l = tls.NewListener(l, tlsConfig)
}
return l, nil
}

View File

@ -0,0 +1,80 @@
// +build linux freebsd solaris
package sockets
import (
"fmt"
"net"
"os"
"strconv"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer/user"
)
// NewUnixSocket creates a unix socket with the specified path and group.
func NewUnixSocket(path, group string) (net.Listener, error) {
if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
return nil, err
}
mask := syscall.Umask(0777)
defer syscall.Umask(mask)
l, err := net.Listen("unix", path)
if err != nil {
return nil, err
}
if err := setSocketGroup(path, group); err != nil {
l.Close()
return nil, err
}
if err := os.Chmod(path, 0660); err != nil {
l.Close()
return nil, err
}
return l, nil
}
func setSocketGroup(path, group string) error {
if group == "" {
return nil
}
if err := changeGroup(path, group); err != nil {
if group != "docker" {
return err
}
logrus.Debugf("Warning: could not change group %s to docker: %v", path, err)
}
return nil
}
func changeGroup(path string, nameOrGid string) error {
gid, err := lookupGidByName(nameOrGid)
if err != nil {
return err
}
logrus.Debugf("%s group found. gid: %d", nameOrGid, gid)
return os.Chown(path, 0, gid)
}
func lookupGidByName(nameOrGid string) (int, error) {
groupFile, err := user.GetGroupPath()
if err != nil {
return -1, err
}
groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
})
if err != nil {
return -1, err
}
if groups != nil && len(groups) > 0 {
return groups[0].Gid, nil
}
gid, err := strconv.Atoi(nameOrGid)
if err == nil {
logrus.Warnf("Could not find GID %d", gid)
return gid, nil
}
return -1, fmt.Errorf("Group %s not found", nameOrGid)
}

View File

@ -41,12 +41,6 @@ var acceptedCBCCiphers = []uint16{
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
}
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
// options struct but wants to use a commonly accepted set of TLS cipher suites, with
// known weak algorithms removed.
@ -91,7 +85,7 @@ func certPool(caFile string) (*x509.CertPool, error) {
func Client(options Options) (*tls.Config, error) {
tlsConfig := ClientDefault
tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
if !options.InsecureSkipVerify {
if !options.InsecureSkipVerify && options.CAFile != "" {
CAs, err := certPool(options.CAFile)
if err != nil {
return nil, err
@ -99,7 +93,7 @@ func Client(options Options) (*tls.Config, error) {
tlsConfig.RootCAs = CAs
}
if options.CertFile != "" && options.KeyFile != "" {
if options.CertFile != "" || options.KeyFile != "" {
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
if err != nil {
return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err)

View File

@ -0,0 +1,17 @@
// +build go1.5
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
package tlsconfig
import (
"crypto/tls"
)
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}

View File

@ -0,0 +1,15 @@
// +build !go1.5
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
package tlsconfig
import (
"crypto/tls"
)
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}

View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,17 @@
runc
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -0,0 +1,2 @@
Tianon Gravi <admwiggin@gmail.com> (@tianon)
Aleksa Sarai <cyphar@cyphar.com> (@cyphar)

View File

@ -0,0 +1,108 @@
package user
import (
"errors"
"fmt"
"syscall"
)
var (
// The current operating system does not provide the required data for user lookups.
ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
)
func lookupUser(filter func(u User) bool) (User, error) {
// Get operating system-specific passwd reader-closer.
passwd, err := GetPasswd()
if err != nil {
return User{}, err
}
defer passwd.Close()
// Get the users.
users, err := ParsePasswdFilter(passwd, filter)
if err != nil {
return User{}, err
}
// No user entries found.
if len(users) == 0 {
return User{}, fmt.Errorf("no matching entries in passwd file")
}
// Assume the first entry is the "correct" one.
return users[0], nil
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(syscall.Getuid())
}
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (User, error) {
return lookupUser(func(u User) bool {
return u.Name == username
})
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (User, error) {
return lookupUser(func(u User) bool {
return u.Uid == uid
})
}
func lookupGroup(filter func(g Group) bool) (Group, error) {
// Get operating system-specific group reader-closer.
group, err := GetGroup()
if err != nil {
return Group{}, err
}
defer group.Close()
// Get the users.
groups, err := ParseGroupFilter(group, filter)
if err != nil {
return Group{}, err
}
// No user entries found.
if len(groups) == 0 {
return Group{}, fmt.Errorf("no matching entries in group file")
}
// Assume the first entry is the "correct" one.
return groups[0], nil
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(syscall.Getgid())
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (Group, error) {
return lookupGroup(func(g Group) bool {
return g.Name == groupname
})
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (Group, error) {
return lookupGroup(func(g Group) bool {
return g.Gid == gid
})
}

View File

@ -0,0 +1,30 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package user
import (
"io"
"os"
)
// Unix-specific path to the passwd and group formatted files.
const (
unixPasswdPath = "/etc/passwd"
unixGroupPath = "/etc/group"
)
func GetPasswdPath() (string, error) {
return unixPasswdPath, nil
}
func GetPasswd() (io.ReadCloser, error) {
return os.Open(unixPasswdPath)
}
func GetGroupPath() (string, error) {
return unixGroupPath, nil
}
func GetGroup() (io.ReadCloser, error) {
return os.Open(unixGroupPath)
}

Some files were not shown because too many files have changed in this diff Show More