mirror of https://github.com/docker/docs.git
commit
ee71beb8a8
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"ImportPath": "github.com/docker/vetinari",
|
||||
"GoVersion": "go1.4.1",
|
||||
"Packages": [
|
||||
"./cmd/vetinari-server"
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go-sqlite/go1/sqlite3",
|
||||
"Comment": "null-50",
|
||||
"Rev": "6e75c20f8fc4b936bccab88336915333ae165754"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/agl/ed25519",
|
||||
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/go-tuf",
|
||||
"Rev": "a06029e9b42bff41f0277e5ceb482ad00299210a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/context",
|
||||
"Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/mux",
|
||||
"Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tent/canonical-json-go",
|
||||
"Rev": "96e4ba3a7613a1216cbd1badca4efe382adea337"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -0,0 +1,2 @@
|
|||
/pkg
|
||||
/bin
|
92
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/backup.go
generated
vendored
Normal file
92
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/backup.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
#include "sqlite3.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Backup is a handle to an online backup operation between two databases.
|
||||
// [http://www.sqlite.org/c3ref/backup.html]
|
||||
type Backup struct {
|
||||
src *Conn
|
||||
dst *Conn
|
||||
bkup *C.sqlite3_backup
|
||||
}
|
||||
|
||||
// newBackup initializes an online backup operation from src.srcName to
|
||||
// dst.dstName.
|
||||
func newBackup(src *Conn, srcName string, dst *Conn, dstName string) (*Backup, error) {
|
||||
srcName += "\x00"
|
||||
dstName += "\x00"
|
||||
|
||||
bkup := C.sqlite3_backup_init(dst.db, cStr(dstName), src.db, cStr(srcName))
|
||||
if bkup == nil {
|
||||
return nil, libErr(C.sqlite3_errcode(dst.db), dst.db)
|
||||
}
|
||||
|
||||
b := &Backup{src, dst, bkup}
|
||||
runtime.SetFinalizer(b, (*Backup).Close)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Close releases all resources associated with the backup operation. It is safe
|
||||
// to call this method prior to backup completion to abort the operation.
|
||||
// [http://www.sqlite.org/c3ref/backup_finish.html#sqlite3backupfinish]
|
||||
func (b *Backup) Close() error {
|
||||
if bkup := b.bkup; bkup != nil {
|
||||
b.bkup = nil
|
||||
runtime.SetFinalizer(b, nil)
|
||||
if rc := C.sqlite3_backup_finish(bkup); rc != OK {
|
||||
return libErr(rc, b.dst.db)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Conn returns the source and destination connections that are used by this
|
||||
// backup operation. The destination connection must not be used until the
|
||||
// backup operation is closed.
|
||||
func (b *Backup) Conn() (src, dst *Conn) {
|
||||
return b.src, b.dst
|
||||
}
|
||||
|
||||
// Step copies up to n pages to the destination database. If n is negative, all
|
||||
// remaining pages are copied. io.EOF is returned upon successful backup
|
||||
// completion.
|
||||
// [http://www.sqlite.org/c3ref/backup_finish.html#sqlite3backupstep]
|
||||
func (b *Backup) Step(n int) error {
|
||||
if b.bkup == nil {
|
||||
return ErrBadBackup
|
||||
}
|
||||
if rc := C.sqlite3_backup_step(b.bkup, C.int(n)); rc != OK {
|
||||
// Do not close automatically since that clears the progress info
|
||||
if rc == DONE {
|
||||
return io.EOF
|
||||
}
|
||||
return libErr(rc, b.dst.db)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Progress returns the number of pages that still need to be backed up and the
|
||||
// total number of pages in the source database. The values are updated after
|
||||
// each call to Step and are reset to 0 after the backup is closed. The total
|
||||
// number of pages may change if the source database is modified during the
|
||||
// backup operation.
|
||||
// [http://www.sqlite.org/c3ref/backup_finish.html#sqlite3backupremaining]
|
||||
func (b *Backup) Progress() (remaining, total int) {
|
||||
if b.bkup != nil {
|
||||
remaining = int(C.sqlite3_backup_remaining(b.bkup))
|
||||
total = int(C.sqlite3_backup_pagecount(b.bkup))
|
||||
}
|
||||
return
|
||||
}
|
214
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec.go
generated
vendored
Normal file
214
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
#include "sqlite3.h"
|
||||
#include "lib/codec.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// CodecFunc is a codec initialization function registered for a specific key
|
||||
// prefix via RegisterCodec. It is called when a key with a matching prefix is
|
||||
// specified for an attached database. It returns the Codec implementation that
|
||||
// should be used to encode and decode all database and journal pages. Returning
|
||||
// (nil, nil) disables the codec.
|
||||
type CodecFunc func(ctx *CodecCtx, key []byte) (Codec, *Error)
|
||||
|
||||
// CodecCtx describes the database to which a codec is being attached.
|
||||
type CodecCtx struct {
|
||||
Path string // Full path to the database file
|
||||
Name string // Database name as it is known to SQLite (e.g. "main")
|
||||
PageSize int // Current page size in bytes
|
||||
Reserve int // Current number of bytes reserved in each page
|
||||
Fixed bool // True if the PageSize and Reserve values cannot be changed
|
||||
}
|
||||
|
||||
// newCodecCtx converts the C CodecCtx struct into its Go representation.
|
||||
func newCodecCtx(ctx *C.CodecCtx) *CodecCtx {
|
||||
return &CodecCtx{
|
||||
Path: C.GoString(ctx.zPath),
|
||||
Name: C.GoString(ctx.zName),
|
||||
PageSize: int(ctx.nBuf),
|
||||
Reserve: int(ctx.nRes),
|
||||
Fixed: ctx.fixed != 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Codec is the interface used to encode/decode database and journal pages as
|
||||
// they are written to and read from the disk.
|
||||
//
|
||||
// The op value passed to Encode and Decode methods identifies the operation
|
||||
// being performed. It is undocumented and changed meanings over time since the
|
||||
// codec API was first introduced in 2004. It is believed to be a bitmask of the
|
||||
// following values:
|
||||
//
|
||||
// 1 = journal page, not set for WAL, always set when decoding
|
||||
// 2 = disk I/O, always set
|
||||
// 4 = encode
|
||||
//
|
||||
// In the current implementation, op is always 3 when decoding, 6 when encoding
|
||||
// for the database file or the WAL, and 7 when encoding for the journal. Search
|
||||
// lib/sqlite3.c for "CODEC1" and "CODEC2" for more information.
|
||||
type Codec interface {
|
||||
// Reserve returns the number of bytes that should be reserved for the codec
|
||||
// at the end of each page. The upper limit is 255 (32 if the page size is
|
||||
// 512). Returning -1 leaves the current value unchanged.
|
||||
Reserve() int
|
||||
|
||||
// Resize is called when the codec is first attached to the pager and for
|
||||
// all subsequent page size changes. It can be used to allocate the encode
|
||||
// buffer.
|
||||
Resize(pageSize, reserve int)
|
||||
|
||||
// Encode returns an encoded copy of a page. The data outside of the reserve
|
||||
// area in the original page must not be modified. The codec can either copy
|
||||
// this data into a buffer for encoding or return the original page without
|
||||
// making any changes. Bytes 16 through 23 of page 1 cannot be encoded. Any
|
||||
// non-nil error will be interpreted by SQLite as a NOMEM condition. This is
|
||||
// a limitation of underlying C API.
|
||||
Encode(page []byte, pageNum uint32, op int) ([]byte, *Error)
|
||||
|
||||
// Decode decodes the page in-place, but it may use the encode buffer as
|
||||
// scratch space. Bytes 16 through 23 of page 1 must be left at their
|
||||
// original values. Any non-nil error will be interpreted by SQLite as a
|
||||
// NOMEM condition. This is a limitation of underlying C API.
|
||||
Decode(page []byte, pageNum uint32, op int) *Error
|
||||
|
||||
// Key returns the original key that was used to initialize the codec. Some
|
||||
// implementations may be better off returning nil or a fake value. Search
|
||||
// lib/sqlite3.c for "sqlite3CodecGetKey" to see how the key is used.
|
||||
Key() []byte
|
||||
|
||||
// Free releases codec resources when the pager is destroyed or when the
|
||||
// codec attachment fails.
|
||||
Free()
|
||||
}
|
||||
|
||||
// Codec registry and state reference maps.
|
||||
var (
|
||||
codecReg map[string]CodecFunc
|
||||
codecState map[*codec]struct{}
|
||||
codecMu sync.Mutex
|
||||
)
|
||||
|
||||
// RegisterCodec adds a new codec to the internal registry. Function f will be
|
||||
// called when a key in the format "<name>:<...>" is provided to an attached
|
||||
// database.
|
||||
func RegisterCodec(name string, f CodecFunc) {
|
||||
codecMu.Lock()
|
||||
defer codecMu.Unlock()
|
||||
if f == nil {
|
||||
delete(codecReg, name)
|
||||
return
|
||||
}
|
||||
if codecReg == nil {
|
||||
codecReg = make(map[string]CodecFunc, 8)
|
||||
}
|
||||
codecReg[name] = f
|
||||
}
|
||||
|
||||
// getCodec returns the CodecFunc for the given key.
|
||||
func getCodec(key []byte) CodecFunc {
|
||||
i := bytes.IndexByte(key, ':')
|
||||
if i == -1 {
|
||||
i = len(key)
|
||||
}
|
||||
codecMu.Lock()
|
||||
defer codecMu.Unlock()
|
||||
if codecReg == nil {
|
||||
return nil
|
||||
}
|
||||
return codecReg[bstr(key[:i])]
|
||||
}
|
||||
|
||||
// codec is a wrapper around the actual Codec interface. It keeps track of the
|
||||
// current page size in order to convert page pointers into byte slices.
|
||||
type codec struct {
|
||||
Codec
|
||||
pageSize C.int
|
||||
}
|
||||
|
||||
//export go_codec_init
|
||||
func go_codec_init(ctx *C.CodecCtx, pCodec *unsafe.Pointer, pzErrMsg **C.char) C.int {
|
||||
cf := getCodec(goBytes(ctx.pKey, ctx.nKey))
|
||||
if cf == nil {
|
||||
*pzErrMsg = C.CString("codec not found")
|
||||
return ERROR
|
||||
}
|
||||
ci, err := cf(newCodecCtx(ctx), C.GoBytes(ctx.pKey, ctx.nKey))
|
||||
if err != nil && err.rc != OK {
|
||||
if ci != nil {
|
||||
ci.Free()
|
||||
}
|
||||
if err.msg != "" {
|
||||
*pzErrMsg = C.CString(err.msg)
|
||||
}
|
||||
return C.int(err.rc)
|
||||
}
|
||||
if ci != nil {
|
||||
cs := &codec{ci, ctx.nBuf}
|
||||
*pCodec = unsafe.Pointer(cs)
|
||||
codecMu.Lock()
|
||||
defer codecMu.Unlock()
|
||||
if codecState == nil {
|
||||
codecState = make(map[*codec]struct{}, 8)
|
||||
}
|
||||
codecState[cs] = struct{}{}
|
||||
}
|
||||
return OK
|
||||
}
|
||||
|
||||
//export go_codec_reserve
|
||||
func go_codec_reserve(pCodec unsafe.Pointer) C.int {
|
||||
return C.int((*codec)(pCodec).Reserve())
|
||||
}
|
||||
|
||||
//export go_codec_resize
|
||||
func go_codec_resize(pCodec unsafe.Pointer, nBuf, nRes C.int) {
|
||||
cs := (*codec)(pCodec)
|
||||
cs.pageSize = nBuf
|
||||
cs.Resize(int(nBuf), int(nRes))
|
||||
}
|
||||
|
||||
//export go_codec_exec
|
||||
func go_codec_exec(pCodec, pData unsafe.Pointer, pgno uint32, op C.int) unsafe.Pointer {
|
||||
cs := (*codec)(pCodec)
|
||||
page := goBytes(pData, cs.pageSize)
|
||||
var err *Error
|
||||
if op&4 == 0 {
|
||||
err = cs.Decode(page, pgno, int(op))
|
||||
} else {
|
||||
page, err = cs.Encode(page, pgno, int(op))
|
||||
}
|
||||
if err == nil {
|
||||
return cBytes(page)
|
||||
}
|
||||
return nil // Can't do anything with the error at the moment
|
||||
}
|
||||
|
||||
//export go_codec_get_key
|
||||
func go_codec_get_key(pCodec unsafe.Pointer, pKey *unsafe.Pointer, nKey *C.int) {
|
||||
if key := (*codec)(pCodec).Key(); len(key) > 0 {
|
||||
*pKey = cBytes(key)
|
||||
*nKey = C.int(len(key))
|
||||
}
|
||||
}
|
||||
|
||||
//export go_codec_free
|
||||
func go_codec_free(pCodec unsafe.Pointer) {
|
||||
cs := (*codec)(pCodec)
|
||||
codecMu.Lock()
|
||||
delete(codecState, cs)
|
||||
codecMu.Unlock()
|
||||
cs.Free()
|
||||
cs.Codec = nil
|
||||
}
|
183
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/aes-hmac.go
generated
vendored
Normal file
183
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/aes-hmac.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
|
||||
. "code.google.com/p/go-sqlite/go1/sqlite3"
|
||||
)
|
||||
|
||||
type aesHmac struct {
|
||||
key []byte // Key provided to newAesHmac with the master key removed
|
||||
buf []byte // Page encryption buffer
|
||||
hdr [4]byte // Header included in each HMAC calculation (page number)
|
||||
tLen int // Tag length in bytes (HMAC truncation)
|
||||
|
||||
// Hash function and chaining mode constructors
|
||||
hash func() hash.Hash
|
||||
mode func(block cipher.Block, iv []byte) cipher.Stream
|
||||
|
||||
// Block cipher and HMAC initialized from the master key
|
||||
block cipher.Block
|
||||
hmac hash.Hash
|
||||
}
|
||||
|
||||
func newAesHmac(ctx *CodecCtx, key []byte) (Codec, *Error) {
|
||||
name, opts, mk := parseKey(key)
|
||||
if len(mk) == 0 {
|
||||
return nil, keyErr
|
||||
}
|
||||
defer wipe(mk)
|
||||
|
||||
// Configure the codec
|
||||
c := &aesHmac{
|
||||
key: key[:len(key)-len(mk)],
|
||||
tLen: 16,
|
||||
hash: sha1.New,
|
||||
mode: cipher.NewCTR,
|
||||
}
|
||||
suite := suiteId{
|
||||
Cipher: "aes",
|
||||
KeySize: "128",
|
||||
Mode: "ctr",
|
||||
MAC: "hmac",
|
||||
Hash: "sha1",
|
||||
Trunc: "128",
|
||||
}
|
||||
kLen := 16
|
||||
if err := c.config(opts, &suite, &kLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Derive encryption and authentication keys
|
||||
hLen := c.hash().Size()
|
||||
salt := make([]byte, hLen)
|
||||
copy(salt, name)
|
||||
dk := hkdf(mk, salt, kLen+hLen, c.hash)(suite.Id())
|
||||
defer wipe(dk)
|
||||
|
||||
// Initialize the block cipher and HMAC
|
||||
var err error
|
||||
if c.block, err = aes.NewCipher(dk[:kLen]); err != nil {
|
||||
return nil, NewError(MISUSE, err.Error())
|
||||
}
|
||||
c.hmac = hmac.New(c.hash, dk[kLen:])
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *aesHmac) Reserve() int {
|
||||
return aes.BlockSize + c.tLen
|
||||
}
|
||||
|
||||
func (c *aesHmac) Resize(pageSize, reserve int) {
|
||||
if reserve != c.Reserve() {
|
||||
panic("sqlite3: codec reserve value mismatch")
|
||||
}
|
||||
hLen := c.hash().Size()
|
||||
c.buf = make([]byte, pageSize, pageSize-c.tLen+hLen)
|
||||
}
|
||||
|
||||
func (c *aesHmac) Encode(p []byte, n uint32, op int) ([]byte, *Error) {
|
||||
iv := c.pIV(c.buf)
|
||||
if !rnd(iv) {
|
||||
return nil, prngErr
|
||||
}
|
||||
c.mode(c.block, iv).XORKeyStream(c.buf, c.pText(p))
|
||||
if n == 1 {
|
||||
copy(c.buf[16:], p[16:24])
|
||||
}
|
||||
c.auth(c.buf, n, false)
|
||||
return c.buf, nil
|
||||
}
|
||||
|
||||
func (c *aesHmac) Decode(p []byte, n uint32, op int) *Error {
|
||||
if !c.auth(p, n, true) {
|
||||
return codecErr
|
||||
}
|
||||
if n == 1 {
|
||||
copy(c.buf, p[16:24])
|
||||
}
|
||||
c.mode(c.block, c.pIV(p)).XORKeyStream(p, c.pText(p))
|
||||
if n == 1 {
|
||||
copy(p[16:24], c.buf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *aesHmac) Key() []byte {
|
||||
return c.key
|
||||
}
|
||||
|
||||
func (c *aesHmac) Free() {
|
||||
c.buf = nil
|
||||
c.block = nil
|
||||
c.hmac = nil
|
||||
}
|
||||
|
||||
// config applies the codec options that were provided in the key.
|
||||
func (c *aesHmac) config(opts map[string]string, s *suiteId, kLen *int) *Error {
|
||||
for k := range opts {
|
||||
switch k {
|
||||
case "192":
|
||||
s.KeySize = k
|
||||
*kLen = 24
|
||||
case "256":
|
||||
s.KeySize = k
|
||||
*kLen = 32
|
||||
case "ofb":
|
||||
s.Mode = k
|
||||
c.mode = cipher.NewOFB
|
||||
case "sha256":
|
||||
s.Hash = k
|
||||
c.hash = sha256.New
|
||||
default:
|
||||
return NewError(MISUSE, "invalid codec option: "+k)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// auth calculates and verifies the HMAC tag for page p. It returns true iff the
|
||||
// tag is successfully verified.
|
||||
func (c *aesHmac) auth(p []byte, n uint32, verify bool) bool {
|
||||
c.hdr[0] = byte(n >> 24)
|
||||
c.hdr[1] = byte(n >> 16)
|
||||
c.hdr[2] = byte(n >> 8)
|
||||
c.hdr[3] = byte(n)
|
||||
|
||||
tag := c.pTag(c.buf)
|
||||
c.hmac.Reset()
|
||||
c.hmac.Write(c.hdr[:])
|
||||
c.hmac.Write(c.pAuth(p))
|
||||
c.hmac.Sum(tag[:0])
|
||||
|
||||
return verify && hmac.Equal(tag, c.pTag(p))
|
||||
}
|
||||
|
||||
// pAuth returns the page subslice that gets authenticated.
|
||||
func (c *aesHmac) pAuth(p []byte) []byte {
|
||||
return p[:len(p)-c.tLen]
|
||||
}
|
||||
|
||||
// pText returns the page subslice that gets encrypted.
|
||||
func (c *aesHmac) pText(p []byte) []byte {
|
||||
return p[:len(p)-c.tLen-aes.BlockSize]
|
||||
}
|
||||
|
||||
// pIV returns the page initialization vector.
|
||||
func (c *aesHmac) pIV(p []byte) []byte {
|
||||
return p[len(p)-c.tLen-aes.BlockSize : len(p)-c.tLen]
|
||||
}
|
||||
|
||||
// pTag returns the page authentication tag.
|
||||
func (c *aesHmac) pTag(p []byte) []byte {
|
||||
return p[len(p)-c.tLen:]
|
||||
}
|
130
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/codec.go
generated
vendored
Normal file
130
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/codec.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
. "code.google.com/p/go-sqlite/go1/sqlite3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterCodec("aes-hmac", newAesHmac)
|
||||
RegisterCodec("hexdump", newHexDump)
|
||||
}
|
||||
|
||||
// Errors returned by codec implementations.
|
||||
var (
|
||||
codecErr = NewError(ERROR, "unspecified codec error")
|
||||
prngErr = NewError(ERROR, "csprng not available")
|
||||
keyErr = NewError(MISUSE, "invalid codec key format")
|
||||
)
|
||||
|
||||
// parseKey extracts the codec name, options, and anything left over from a key
|
||||
// in the format "<name>:<options>:<tail...>".
|
||||
func parseKey(key []byte) (name string, opts map[string]string, tail []byte) {
|
||||
k := bytes.SplitN(key, []byte{':'}, 3)
|
||||
name = string(k[0])
|
||||
opts = make(map[string]string)
|
||||
if len(k) > 1 && len(k[1]) > 0 {
|
||||
for _, opt := range strings.Split(string(k[1]), ",") {
|
||||
if i := strings.Index(opt, "="); i > 0 {
|
||||
opts[opt[:i]] = opt[i+1:]
|
||||
} else {
|
||||
opts[opt] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(k) > 2 && len(k[2]) > 0 {
|
||||
tail = k[2]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// hkdf implements the HMAC-based Key Derivation Function, as described in RFC
|
||||
// 5869. The extract step is skipped if salt == nil. It is the caller's
|
||||
// responsibility to set salt "to a string of HashLen zeros," if such behavior
|
||||
// is desired. It returns the function that performs the expand step using the
|
||||
// provided info value, which must be appendable. The derived key is valid until
|
||||
// the next expansion.
|
||||
func hkdf(ikm, salt []byte, dkLen int, h func() hash.Hash) func(info []byte) []byte {
|
||||
if salt != nil {
|
||||
prf := hmac.New(h, salt)
|
||||
prf.Write(ikm)
|
||||
ikm = prf.Sum(nil)
|
||||
}
|
||||
prf := hmac.New(h, ikm)
|
||||
hLen := prf.Size()
|
||||
n := (dkLen + hLen - 1) / hLen
|
||||
dk := make([]byte, dkLen, n*hLen)
|
||||
|
||||
return func(info []byte) []byte {
|
||||
info = append(info, 0)
|
||||
ctr := &info[len(info)-1]
|
||||
for i, t := 1, dk[:0]; i <= n; i++ {
|
||||
*ctr = byte(i)
|
||||
prf.Reset()
|
||||
prf.Write(t)
|
||||
prf.Write(info)
|
||||
t = prf.Sum(t[len(t):])
|
||||
}
|
||||
return dk
|
||||
}
|
||||
}
|
||||
|
||||
// rnd fills b with bytes from a CSPRNG.
|
||||
func rnd(b []byte) bool {
|
||||
_, err := io.ReadFull(rand.Reader, b)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// wipe overwrites b with zeros.
|
||||
func wipe(b []byte) {
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// suiteId constructs a canonical cipher suite identifier.
|
||||
type suiteId struct {
|
||||
Cipher string
|
||||
KeySize string
|
||||
Mode string
|
||||
MAC string
|
||||
Hash string
|
||||
Trunc string
|
||||
}
|
||||
|
||||
func (s *suiteId) Id() []byte {
|
||||
id := make([]byte, 0, 64)
|
||||
section := func(parts ...string) {
|
||||
for i, p := range parts {
|
||||
if p != "" {
|
||||
parts = parts[i:]
|
||||
goto write
|
||||
}
|
||||
}
|
||||
return
|
||||
write:
|
||||
if len(id) > 0 {
|
||||
id = append(id, ',')
|
||||
}
|
||||
id = append(id, parts[0]...)
|
||||
for _, p := range parts[1:] {
|
||||
if p != "" {
|
||||
id = append(id, '-')
|
||||
id = append(id, p...)
|
||||
}
|
||||
}
|
||||
}
|
||||
section(s.Cipher, s.KeySize, s.Mode)
|
||||
section(s.MAC, s.Hash, s.Trunc)
|
||||
return id
|
||||
}
|
128
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/codec_test.go
generated
vendored
Normal file
128
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/codec_test.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHKDF(t *testing.T) {
|
||||
tests := []struct {
|
||||
ikm string
|
||||
salt string
|
||||
info string
|
||||
dkLen int
|
||||
h func() hash.Hash
|
||||
out string
|
||||
}{
|
||||
// RFC 5869 Test Vectors
|
||||
{
|
||||
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
|
||||
"000102030405060708090a0b0c",
|
||||
"f0f1f2f3f4f5f6f7f8f9",
|
||||
42,
|
||||
sha256.New,
|
||||
"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
|
||||
}, {
|
||||
"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
|
||||
"606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
|
||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
|
||||
82,
|
||||
sha256.New,
|
||||
"b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87",
|
||||
}, {
|
||||
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
|
||||
"",
|
||||
"",
|
||||
42,
|
||||
sha256.New,
|
||||
"8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
|
||||
}, {
|
||||
"0b0b0b0b0b0b0b0b0b0b0b",
|
||||
"000102030405060708090a0b0c",
|
||||
"f0f1f2f3f4f5f6f7f8f9",
|
||||
42,
|
||||
sha1.New,
|
||||
"085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896",
|
||||
}, {
|
||||
"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f",
|
||||
"606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
|
||||
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
|
||||
82,
|
||||
sha1.New,
|
||||
"0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4",
|
||||
}, {
|
||||
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
|
||||
"",
|
||||
"",
|
||||
42,
|
||||
sha1.New,
|
||||
"0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918",
|
||||
}, {
|
||||
"0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c",
|
||||
"0000000000000000000000000000000000000000",
|
||||
"",
|
||||
42,
|
||||
sha1.New,
|
||||
"2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48",
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
ikm, _ := hex.DecodeString(test.ikm)
|
||||
salt, _ := hex.DecodeString(test.salt)
|
||||
info, _ := hex.DecodeString(test.info)
|
||||
dk := hkdf(ikm, salt, test.dkLen, test.h)(info)
|
||||
if out := hex.EncodeToString(dk); out != test.out {
|
||||
t.Errorf("hkdf(%d) expected %q; got %q", i, test.out, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuiteId(t *testing.T) {
|
||||
tests := []struct {
|
||||
suite suiteId
|
||||
out string
|
||||
}{
|
||||
{suiteId{},
|
||||
""},
|
||||
{suiteId{Cipher: "aes"},
|
||||
"aes"},
|
||||
{suiteId{KeySize: "128"},
|
||||
"128"},
|
||||
{suiteId{Cipher: "aes", KeySize: "128"},
|
||||
"aes-128"},
|
||||
{suiteId{Cipher: "aes", Mode: "ctr"},
|
||||
"aes-ctr"},
|
||||
{suiteId{Cipher: "aes", KeySize: "128", Mode: "ctr"},
|
||||
"aes-128-ctr"},
|
||||
{suiteId{MAC: "hmac"},
|
||||
"hmac"},
|
||||
{suiteId{MAC: "hmac", Hash: "sha1"},
|
||||
"hmac-sha1"},
|
||||
{suiteId{MAC: "hmac", Hash: "sha1", Trunc: "128"},
|
||||
"hmac-sha1-128"},
|
||||
{suiteId{Cipher: "aes", MAC: "hmac"},
|
||||
"aes,hmac"},
|
||||
{suiteId{Cipher: "aes", Hash: "sha1"},
|
||||
"aes,sha1"},
|
||||
{suiteId{Mode: "ctr", Hash: "sha1"},
|
||||
"ctr,sha1"},
|
||||
{suiteId{Cipher: "aes", KeySize: "128", MAC: "hmac", Hash: "sha256"},
|
||||
"aes-128,hmac-sha256"},
|
||||
{suiteId{Cipher: "aes", Mode: "ctr", Hash: "sha256", Trunc: "128"},
|
||||
"aes-ctr,sha256-128"},
|
||||
{suiteId{Cipher: "aes", KeySize: "256", Mode: "ctr", MAC: "hmac", Hash: "sha256", Trunc: "128"},
|
||||
"aes-256-ctr,hmac-sha256-128"},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if out := string(test.suite.Id()); out != test.out {
|
||||
t.Errorf("%#v expected %q; got %q", test.suite, test.out, out)
|
||||
}
|
||||
}
|
||||
}
|
110
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/doc.go
generated
vendored
Normal file
110
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package codec provides authenticated encryption and other codecs for the sqlite3
|
||||
package.
|
||||
|
||||
This package has no public interface and should be imported with the blank
|
||||
identifier to register the codecs. Use Conn.Key after opening a new connection,
|
||||
or the KEY clause in an ATTACH statement, to use one of the codecs:
|
||||
|
||||
c, _ := sqlite3.Open("file1.db")
|
||||
c.Key("main", []byte("aes-hmac::secretkey1"))
|
||||
c.Exec("ATTACH DATABASE 'file2.db' AS two KEY 'aes-hmac::secretkey2'")
|
||||
|
||||
If the KEY clause is omitted, SQLite uses the key from the main database, which
|
||||
may no longer be valid depending on how the codec is implemented (e.g. aes-hmac
|
||||
destroys the master key after initialization). Specify an empty string as the
|
||||
key to disable this behavior.
|
||||
|
||||
Codec Operation
|
||||
|
||||
Each SQLite database and journal file consists of one or more pages of identical
|
||||
size. Each page may have extra space reserved at the end, which SQLite will not
|
||||
use in any way. The exact number of bytes reserved is stored at offset 20 of the
|
||||
database file header, so the value is between 0 and 255. SQLite requires each
|
||||
page to have at least 480 usable bytes, so the value cannot exceed 32 bytes with
|
||||
a page size of 512. This extra space may be used by a codec to store per-page
|
||||
Initialization Vectors (IVs), Message Authentication Codes (MACs), or any other
|
||||
information.
|
||||
|
||||
CodecFunc is called to initialize a registered codec when a key with a matching
|
||||
prefix is provided. If it returns a non-nil Codec implementation, Codec.Reserve
|
||||
is called to determine how much space this codec needs reserved in each page for
|
||||
correct operation. Codec.Resize is called to provide the current page size and
|
||||
reserve values, and for all subsequent changes. The page size may be changed
|
||||
before the database file is created. Once the first CREATE TABLE statement is
|
||||
executed, the page size and reserve values are fixed.
|
||||
|
||||
Codec.Encode is called when a page is about to be written to the disk.
|
||||
Codec.Decode is called when a page was just read from the disk. This happens for
|
||||
both the main database file and the journal/WAL, so the pages are always encoded
|
||||
on the disk and decoded in memory. Codec.Free is called to free all codec
|
||||
resources when the database is detached.
|
||||
|
||||
AES-HMAC
|
||||
|
||||
The aes-hmac codec provides authenticated encryption using the Advanced
|
||||
Encryption Standard (AES) cipher and the Hash-based Message Authentication Code
|
||||
(HMAC) in Encrypt-then-MAC mode. Each page has an independent, pseudorandom IV,
|
||||
which is regenerated every time the page is encrypted, and an authentication
|
||||
tag, which is verified before the page is decrypted. The codec requires 32 bytes
|
||||
per page to store this information.
|
||||
|
||||
The key format is "aes-hmac:<options>:<master-key>", where <options> is a
|
||||
comma-separated list of codec options described below, and <master-key> is the
|
||||
key from which separate encryption and authentication keys are derived.
|
||||
|
||||
SECURITY WARNING: The master key is called a "key" and not a "password" for a
|
||||
reason. It is not passed through pbkdf2, bcrypt, scrypt, or any other key
|
||||
stretching function. The application is expected to ensure that this key is
|
||||
sufficiently resistant to brute-force attacks. Ideally, it should be obtained
|
||||
from a cryptographically secure pseudorandom number generator (CSPRNG), such as
|
||||
the one provided by the crypto/rand package.
|
||||
|
||||
The encryption and authentication keys are derived from the master key using the
|
||||
HMAC-based Key Derivation Function (HKDF), as described in RFC 5869. The salt is
|
||||
the codec name ("aes-hmac") extended with NULLs to HashLen bytes, and info is
|
||||
the codec configuration string (e.g. "aes-128-ctr,hmac-sha1-128"). This is done
|
||||
to obtain two keys of the required lengths, which are also bound to the codec
|
||||
configuration.
|
||||
|
||||
The default configuration is AES-128-CTR cipher and HMAC-SHA1-128 authentication
|
||||
(HMAC output is truncated to 128 bits). The following options may be used to
|
||||
change the defaults:
|
||||
|
||||
192
|
||||
AES-192 block cipher.
|
||||
256
|
||||
AES-256 block cipher.
|
||||
ofb
|
||||
Output feedback mode of operation.
|
||||
sha256
|
||||
SHA-256 hash function used by HKDF and HMAC.
|
||||
|
||||
For example, "aes-hmac:256,ofb,sha256:<master-key>" will use the AES-256-OFB
|
||||
cipher and HMAC-SHA256-128 authentication.
|
||||
|
||||
HEXDUMP
|
||||
|
||||
The hexdump codec logs all method calls and dumps the page content for each
|
||||
encode/decode operation to a file. It is intended to be used as an aid when
|
||||
writing your own codecs.
|
||||
|
||||
The key format is "hexdump:<options>:<file>", where <options> is a
|
||||
comma-separated list of codec options described below, and <file> is the output
|
||||
destination. The default destination is stderr. Dash ("-") means stdout. For
|
||||
obvious reasons, this codec cannot be used with an encrypted database except to
|
||||
see the first Codec.Decode call for page 1.
|
||||
|
||||
The following options are supported:
|
||||
|
||||
quiet
|
||||
Do not output a hex dump of each page.
|
||||
reserve=N
|
||||
Reserve N bytes in each page. The default is -1, which means don't
|
||||
change the current reserve value.
|
||||
*/
|
||||
package codec
|
105
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/hexdump.go
generated
vendored
Normal file
105
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/codec/hexdump.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
. "code.google.com/p/go-sqlite/go1/sqlite3"
|
||||
)
|
||||
|
||||
type hexDump struct {
|
||||
key []byte
|
||||
out *os.File
|
||||
quiet bool
|
||||
res int
|
||||
}
|
||||
|
||||
func newHexDump(ctx *CodecCtx, key []byte) (Codec, *Error) {
|
||||
_, opts, tail := parseKey(key)
|
||||
c := &hexDump{key, os.Stderr, false, -1}
|
||||
|
||||
// Set options
|
||||
for k, v := range opts {
|
||||
switch k {
|
||||
case "quiet":
|
||||
c.quiet = true
|
||||
case "reserve":
|
||||
if n, err := strconv.ParseUint(v, 10, 8); err == nil {
|
||||
c.res = int(n)
|
||||
}
|
||||
default:
|
||||
return nil, NewError(MISUSE, "invalid codec option: "+k)
|
||||
}
|
||||
}
|
||||
|
||||
// Open output file
|
||||
switch file := string(tail); file {
|
||||
case "":
|
||||
case "-":
|
||||
c.out = os.Stdout
|
||||
default:
|
||||
var err error
|
||||
c.out, err = os.OpenFile(file, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return nil, NewError(ERROR, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(c.out, "Init (\n"+
|
||||
" Path=%s\n"+
|
||||
" Name=%s\n"+
|
||||
" PageSize=%d\n"+
|
||||
" Reserve=%d\n"+
|
||||
" Fixed=%t\n"+
|
||||
")\n",
|
||||
ctx.Path, ctx.Name, ctx.PageSize, ctx.Reserve, ctx.Fixed)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *hexDump) Reserve() int {
|
||||
fmt.Fprintf(c.out, "Reserve\n")
|
||||
return c.res
|
||||
}
|
||||
|
||||
func (c *hexDump) Resize(pageSize, reserve int) {
|
||||
fmt.Fprintf(c.out, "Resize (pageSize=%d, reserve=%d)\n", pageSize, reserve)
|
||||
}
|
||||
|
||||
func (c *hexDump) Encode(page []byte, pageNum uint32, op int) ([]byte, *Error) {
|
||||
fmt.Fprintf(c.out, "Encode (pageNum=%d, op=%d)\n", pageNum, op)
|
||||
c.dump(page)
|
||||
return page, nil
|
||||
}
|
||||
|
||||
func (c *hexDump) Decode(page []byte, pageNum uint32, op int) *Error {
|
||||
fmt.Fprintf(c.out, "Decode (pageNum=%d, op=%d)\n", pageNum, op)
|
||||
c.dump(page)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *hexDump) Key() []byte {
|
||||
fmt.Fprintf(c.out, "Key\n")
|
||||
return c.key
|
||||
}
|
||||
|
||||
func (c *hexDump) Free() {
|
||||
fmt.Fprintf(c.out, "Free\n")
|
||||
if c.out != os.Stdout && c.out != os.Stderr {
|
||||
c.out.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *hexDump) dump(b []byte) {
|
||||
if !c.quiet {
|
||||
hd := hex.Dumper(c.out)
|
||||
hd.Write(b)
|
||||
hd.Close()
|
||||
c.out.Write([]byte("\n"))
|
||||
}
|
||||
}
|
208
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/const.go
generated
vendored
Normal file
208
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/const.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
#include "sqlite3.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Fundamental SQLite data types. These are returned by Stmt.DataTypes method.
|
||||
// [http://www.sqlite.org/c3ref/c_blob.html]
|
||||
const (
|
||||
INTEGER = C.SQLITE_INTEGER // 1
|
||||
FLOAT = C.SQLITE_FLOAT // 2
|
||||
TEXT = C.SQLITE_TEXT // 3
|
||||
BLOB = C.SQLITE_BLOB // 4
|
||||
NULL = C.SQLITE_NULL // 5
|
||||
)
|
||||
|
||||
// General result codes returned by the SQLite API. When converted to an error,
|
||||
// OK and ROW become nil, and DONE becomes either nil or io.EOF, depending on
|
||||
// the context in which the statement is executed. All other codes are returned
|
||||
// via the Error struct.
|
||||
// [http://www.sqlite.org/c3ref/c_abort.html]
|
||||
const (
|
||||
OK = C.SQLITE_OK // 0 = Successful result
|
||||
ERROR = C.SQLITE_ERROR // 1 = SQL error or missing database
|
||||
INTERNAL = C.SQLITE_INTERNAL // 2 = Internal logic error in SQLite
|
||||
PERM = C.SQLITE_PERM // 3 = Access permission denied
|
||||
ABORT = C.SQLITE_ABORT // 4 = Callback routine requested an abort
|
||||
BUSY = C.SQLITE_BUSY // 5 = The database file is locked
|
||||
LOCKED = C.SQLITE_LOCKED // 6 = A table in the database is locked
|
||||
NOMEM = C.SQLITE_NOMEM // 7 = A malloc() failed
|
||||
READONLY = C.SQLITE_READONLY // 8 = Attempt to write a readonly database
|
||||
INTERRUPT = C.SQLITE_INTERRUPT // 9 = Operation terminated by sqlite3_interrupt()
|
||||
IOERR = C.SQLITE_IOERR // 10 = Some kind of disk I/O error occurred
|
||||
CORRUPT = C.SQLITE_CORRUPT // 11 = The database disk image is malformed
|
||||
NOTFOUND = C.SQLITE_NOTFOUND // 12 = Unknown opcode in sqlite3_file_control()
|
||||
FULL = C.SQLITE_FULL // 13 = Insertion failed because database is full
|
||||
CANTOPEN = C.SQLITE_CANTOPEN // 14 = Unable to open the database file
|
||||
PROTOCOL = C.SQLITE_PROTOCOL // 15 = Database lock protocol error
|
||||
EMPTY = C.SQLITE_EMPTY // 16 = Database is empty
|
||||
SCHEMA = C.SQLITE_SCHEMA // 17 = The database schema changed
|
||||
TOOBIG = C.SQLITE_TOOBIG // 18 = String or BLOB exceeds size limit
|
||||
CONSTRAINT = C.SQLITE_CONSTRAINT // 19 = Abort due to constraint violation
|
||||
MISMATCH = C.SQLITE_MISMATCH // 20 = Data type mismatch
|
||||
MISUSE = C.SQLITE_MISUSE // 21 = Library used incorrectly
|
||||
NOLFS = C.SQLITE_NOLFS // 22 = Uses OS features not supported on host
|
||||
AUTH = C.SQLITE_AUTH // 23 = Authorization denied
|
||||
FORMAT = C.SQLITE_FORMAT // 24 = Auxiliary database format error
|
||||
RANGE = C.SQLITE_RANGE // 25 = 2nd parameter to sqlite3_bind out of range
|
||||
NOTADB = C.SQLITE_NOTADB // 26 = File opened that is not a database file
|
||||
NOTICE = C.SQLITE_NOTICE // 27 = Notifications from sqlite3_log()
|
||||
WARNING = C.SQLITE_WARNING // 28 = Warnings from sqlite3_log()
|
||||
ROW = C.SQLITE_ROW // 100 = sqlite3_step() has another row ready
|
||||
DONE = C.SQLITE_DONE // 101 = sqlite3_step() has finished executing
|
||||
)
|
||||
|
||||
// Extended result codes returned by the SQLite API. Extended result codes are
|
||||
// enabled by default for all new Conn objects. Use Error.Code()&0xFF to convert
|
||||
// an extended code to a general one.
|
||||
// [http://www.sqlite.org/c3ref/c_abort_rollback.html]
|
||||
const (
|
||||
IOERR_READ = C.SQLITE_IOERR_READ // (SQLITE_IOERR | (1<<8))
|
||||
IOERR_SHORT_READ = C.SQLITE_IOERR_SHORT_READ // (SQLITE_IOERR | (2<<8))
|
||||
IOERR_WRITE = C.SQLITE_IOERR_WRITE // (SQLITE_IOERR | (3<<8))
|
||||
IOERR_FSYNC = C.SQLITE_IOERR_FSYNC // (SQLITE_IOERR | (4<<8))
|
||||
IOERR_DIR_FSYNC = C.SQLITE_IOERR_DIR_FSYNC // (SQLITE_IOERR | (5<<8))
|
||||
IOERR_TRUNCATE = C.SQLITE_IOERR_TRUNCATE // (SQLITE_IOERR | (6<<8))
|
||||
IOERR_FSTAT = C.SQLITE_IOERR_FSTAT // (SQLITE_IOERR | (7<<8))
|
||||
IOERR_UNLOCK = C.SQLITE_IOERR_UNLOCK // (SQLITE_IOERR | (8<<8))
|
||||
IOERR_RDLOCK = C.SQLITE_IOERR_RDLOCK // (SQLITE_IOERR | (9<<8))
|
||||
IOERR_DELETE = C.SQLITE_IOERR_DELETE // (SQLITE_IOERR | (10<<8))
|
||||
IOERR_BLOCKED = C.SQLITE_IOERR_BLOCKED // (SQLITE_IOERR | (11<<8))
|
||||
IOERR_NOMEM = C.SQLITE_IOERR_NOMEM // (SQLITE_IOERR | (12<<8))
|
||||
IOERR_ACCESS = C.SQLITE_IOERR_ACCESS // (SQLITE_IOERR | (13<<8))
|
||||
IOERR_CHECKRESERVEDLOCK = C.SQLITE_IOERR_CHECKRESERVEDLOCK // (SQLITE_IOERR | (14<<8))
|
||||
IOERR_LOCK = C.SQLITE_IOERR_LOCK // (SQLITE_IOERR | (15<<8))
|
||||
IOERR_CLOSE = C.SQLITE_IOERR_CLOSE // (SQLITE_IOERR | (16<<8))
|
||||
IOERR_DIR_CLOSE = C.SQLITE_IOERR_DIR_CLOSE // (SQLITE_IOERR | (17<<8))
|
||||
IOERR_SHMOPEN = C.SQLITE_IOERR_SHMOPEN // (SQLITE_IOERR | (18<<8))
|
||||
IOERR_SHMSIZE = C.SQLITE_IOERR_SHMSIZE // (SQLITE_IOERR | (19<<8))
|
||||
IOERR_SHMLOCK = C.SQLITE_IOERR_SHMLOCK // (SQLITE_IOERR | (20<<8))
|
||||
IOERR_SHMMAP = C.SQLITE_IOERR_SHMMAP // (SQLITE_IOERR | (21<<8))
|
||||
IOERR_SEEK = C.SQLITE_IOERR_SEEK // (SQLITE_IOERR | (22<<8))
|
||||
IOERR_DELETE_NOENT = C.SQLITE_IOERR_DELETE_NOENT // (SQLITE_IOERR | (23<<8))
|
||||
IOERR_MMAP = C.SQLITE_IOERR_MMAP // (SQLITE_IOERR | (24<<8))
|
||||
IOERR_GETTEMPPATH = C.SQLITE_IOERR_GETTEMPPATH // (SQLITE_IOERR | (25<<8))
|
||||
LOCKED_SHAREDCACHE = C.SQLITE_LOCKED_SHAREDCACHE // (SQLITE_LOCKED | (1<<8))
|
||||
BUSY_RECOVERY = C.SQLITE_BUSY_RECOVERY // (SQLITE_BUSY | (1<<8))
|
||||
BUSY_SNAPSHOT = C.SQLITE_BUSY_SNAPSHOT // (SQLITE_BUSY | (2<<8))
|
||||
CANTOPEN_NOTEMPDIR = C.SQLITE_CANTOPEN_NOTEMPDIR // (SQLITE_CANTOPEN | (1<<8))
|
||||
CANTOPEN_ISDIR = C.SQLITE_CANTOPEN_ISDIR // (SQLITE_CANTOPEN | (2<<8))
|
||||
CANTOPEN_FULLPATH = C.SQLITE_CANTOPEN_FULLPATH // (SQLITE_CANTOPEN | (3<<8))
|
||||
CORRUPT_VTAB = C.SQLITE_CORRUPT_VTAB // (SQLITE_CORRUPT | (1<<8))
|
||||
READONLY_RECOVERY = C.SQLITE_READONLY_RECOVERY // (SQLITE_READONLY | (1<<8))
|
||||
READONLY_CANTLOCK = C.SQLITE_READONLY_CANTLOCK // (SQLITE_READONLY | (2<<8))
|
||||
READONLY_ROLLBACK = C.SQLITE_READONLY_ROLLBACK // (SQLITE_READONLY | (3<<8))
|
||||
ABORT_ROLLBACK = C.SQLITE_ABORT_ROLLBACK // (SQLITE_ABORT | (2<<8))
|
||||
CONSTRAINT_CHECK = C.SQLITE_CONSTRAINT_CHECK // (SQLITE_CONSTRAINT | (1<<8))
|
||||
CONSTRAINT_COMMITHOOK = C.SQLITE_CONSTRAINT_COMMITHOOK // (SQLITE_CONSTRAINT | (2<<8))
|
||||
CONSTRAINT_FOREIGNKEY = C.SQLITE_CONSTRAINT_FOREIGNKEY // (SQLITE_CONSTRAINT | (3<<8))
|
||||
CONSTRAINT_FUNCTION = C.SQLITE_CONSTRAINT_FUNCTION // (SQLITE_CONSTRAINT | (4<<8))
|
||||
CONSTRAINT_NOTNULL = C.SQLITE_CONSTRAINT_NOTNULL // (SQLITE_CONSTRAINT | (5<<8))
|
||||
CONSTRAINT_PRIMARYKEY = C.SQLITE_CONSTRAINT_PRIMARYKEY // (SQLITE_CONSTRAINT | (6<<8))
|
||||
CONSTRAINT_TRIGGER = C.SQLITE_CONSTRAINT_TRIGGER // (SQLITE_CONSTRAINT | (7<<8))
|
||||
CONSTRAINT_UNIQUE = C.SQLITE_CONSTRAINT_UNIQUE // (SQLITE_CONSTRAINT | (8<<8))
|
||||
CONSTRAINT_VTAB = C.SQLITE_CONSTRAINT_VTAB // (SQLITE_CONSTRAINT | (9<<8))
|
||||
NOTICE_RECOVER_WAL = C.SQLITE_NOTICE_RECOVER_WAL // (SQLITE_NOTICE | (1<<8))
|
||||
NOTICE_RECOVER_ROLLBACK = C.SQLITE_NOTICE_RECOVER_ROLLBACK // (SQLITE_NOTICE | (2<<8))
|
||||
WARNING_AUTOINDEX = C.SQLITE_WARNING_AUTOINDEX // (SQLITE_WARNING | (1<<8))
|
||||
)
|
||||
|
||||
// Codes used by SQLite to indicate the operation type when invoking authorizer
|
||||
// and row update callbacks.
|
||||
// [http://www.sqlite.org/c3ref/c_alter_table.html]
|
||||
const (
|
||||
CREATE_INDEX = C.SQLITE_CREATE_INDEX // 1
|
||||
CREATE_TABLE = C.SQLITE_CREATE_TABLE // 2
|
||||
CREATE_TEMP_INDEX = C.SQLITE_CREATE_TEMP_INDEX // 3
|
||||
CREATE_TEMP_TABLE = C.SQLITE_CREATE_TEMP_TABLE // 4
|
||||
CREATE_TEMP_TRIGGER = C.SQLITE_CREATE_TEMP_TRIGGER // 5
|
||||
CREATE_TEMP_VIEW = C.SQLITE_CREATE_TEMP_VIEW // 6
|
||||
CREATE_TRIGGER = C.SQLITE_CREATE_TRIGGER // 7
|
||||
CREATE_VIEW = C.SQLITE_CREATE_VIEW // 8
|
||||
DELETE = C.SQLITE_DELETE // 9
|
||||
DROP_INDEX = C.SQLITE_DROP_INDEX // 10
|
||||
DROP_TABLE = C.SQLITE_DROP_TABLE // 11
|
||||
DROP_TEMP_INDEX = C.SQLITE_DROP_TEMP_INDEX // 12
|
||||
DROP_TEMP_TABLE = C.SQLITE_DROP_TEMP_TABLE // 13
|
||||
DROP_TEMP_TRIGGER = C.SQLITE_DROP_TEMP_TRIGGER // 14
|
||||
DROP_TEMP_VIEW = C.SQLITE_DROP_TEMP_VIEW // 15
|
||||
DROP_TRIGGER = C.SQLITE_DROP_TRIGGER // 16
|
||||
DROP_VIEW = C.SQLITE_DROP_VIEW // 17
|
||||
INSERT = C.SQLITE_INSERT // 18
|
||||
PRAGMA = C.SQLITE_PRAGMA // 19
|
||||
READ = C.SQLITE_READ // 20
|
||||
SELECT = C.SQLITE_SELECT // 21
|
||||
TRANSACTION = C.SQLITE_TRANSACTION // 22
|
||||
UPDATE = C.SQLITE_UPDATE // 23
|
||||
ATTACH = C.SQLITE_ATTACH // 24
|
||||
DETACH = C.SQLITE_DETACH // 25
|
||||
ALTER_TABLE = C.SQLITE_ALTER_TABLE // 26
|
||||
REINDEX = C.SQLITE_REINDEX // 27
|
||||
ANALYZE = C.SQLITE_ANALYZE // 28
|
||||
CREATE_VTABLE = C.SQLITE_CREATE_VTABLE // 29
|
||||
DROP_VTABLE = C.SQLITE_DROP_VTABLE // 30
|
||||
FUNCTION = C.SQLITE_FUNCTION // 31
|
||||
SAVEPOINT = C.SQLITE_SAVEPOINT // 32
|
||||
)
|
||||
|
||||
// Core SQLite performance counters that can be queried with Status.
|
||||
// [http://www.sqlite.org/c3ref/c_status_malloc_count.html]
|
||||
const (
|
||||
STATUS_MEMORY_USED = C.SQLITE_STATUS_MEMORY_USED // 0
|
||||
STATUS_PAGECACHE_USED = C.SQLITE_STATUS_PAGECACHE_USED // 1
|
||||
STATUS_PAGECACHE_OVERFLOW = C.SQLITE_STATUS_PAGECACHE_OVERFLOW // 2
|
||||
STATUS_SCRATCH_USED = C.SQLITE_STATUS_SCRATCH_USED // 3
|
||||
STATUS_SCRATCH_OVERFLOW = C.SQLITE_STATUS_SCRATCH_OVERFLOW // 4
|
||||
STATUS_MALLOC_SIZE = C.SQLITE_STATUS_MALLOC_SIZE // 5
|
||||
STATUS_PARSER_STACK = C.SQLITE_STATUS_PARSER_STACK // 6
|
||||
STATUS_PAGECACHE_SIZE = C.SQLITE_STATUS_PAGECACHE_SIZE // 7
|
||||
STATUS_SCRATCH_SIZE = C.SQLITE_STATUS_SCRATCH_SIZE // 8
|
||||
STATUS_MALLOC_COUNT = C.SQLITE_STATUS_MALLOC_COUNT // 9
|
||||
)
|
||||
|
||||
// Connection performance counters that can be queried with Conn.Status.
|
||||
// [http://www.sqlite.org/c3ref/c_dbstatus_options.html]
|
||||
const (
|
||||
DBSTATUS_LOOKASIDE_USED = C.SQLITE_DBSTATUS_LOOKASIDE_USED // 0
|
||||
DBSTATUS_CACHE_USED = C.SQLITE_DBSTATUS_CACHE_USED // 1
|
||||
DBSTATUS_SCHEMA_USED = C.SQLITE_DBSTATUS_SCHEMA_USED // 2
|
||||
DBSTATUS_STMT_USED = C.SQLITE_DBSTATUS_STMT_USED // 3
|
||||
DBSTATUS_LOOKASIDE_HIT = C.SQLITE_DBSTATUS_LOOKASIDE_HIT // 4
|
||||
DBSTATUS_LOOKASIDE_MISS_SIZE = C.SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE // 5
|
||||
DBSTATUS_LOOKASIDE_MISS_FULL = C.SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL // 6
|
||||
DBSTATUS_CACHE_HIT = C.SQLITE_DBSTATUS_CACHE_HIT // 7
|
||||
DBSTATUS_CACHE_MISS = C.SQLITE_DBSTATUS_CACHE_MISS // 8
|
||||
DBSTATUS_CACHE_WRITE = C.SQLITE_DBSTATUS_CACHE_WRITE // 9
|
||||
DBSTATUS_DEFERRED_FKS = C.SQLITE_DBSTATUS_DEFERRED_FKS // 10
|
||||
)
|
||||
|
||||
// Statement performance counters that can be queried with Stmt.Status.
|
||||
// [http://www.sqlite.org/c3ref/c_stmtstatus_counter.html]
|
||||
const (
|
||||
STMTSTATUS_FULLSCAN_STEP = C.SQLITE_STMTSTATUS_FULLSCAN_STEP // 1
|
||||
STMTSTATUS_SORT = C.SQLITE_STMTSTATUS_SORT // 2
|
||||
STMTSTATUS_AUTOINDEX = C.SQLITE_STMTSTATUS_AUTOINDEX // 3
|
||||
STMTSTATUS_VM_STEP = C.SQLITE_STMTSTATUS_VM_STEP // 4
|
||||
)
|
||||
|
||||
// Per-connection limits that can be queried and changed with Conn.Limit.
|
||||
// [http://www.sqlite.org/c3ref/c_limit_attached.html]
|
||||
const (
|
||||
LIMIT_LENGTH = C.SQLITE_LIMIT_LENGTH // 0
|
||||
LIMIT_SQL_LENGTH = C.SQLITE_LIMIT_SQL_LENGTH // 1
|
||||
LIMIT_COLUMN = C.SQLITE_LIMIT_COLUMN // 2
|
||||
LIMIT_EXPR_DEPTH = C.SQLITE_LIMIT_EXPR_DEPTH // 3
|
||||
LIMIT_COMPOUND_SELECT = C.SQLITE_LIMIT_COMPOUND_SELECT // 4
|
||||
LIMIT_VDBE_OP = C.SQLITE_LIMIT_VDBE_OP // 5
|
||||
LIMIT_FUNCTION_ARG = C.SQLITE_LIMIT_FUNCTION_ARG // 6
|
||||
LIMIT_ATTACHED = C.SQLITE_LIMIT_ATTACHED // 7
|
||||
LIMIT_LIKE_PATTERN_LENGTH = C.SQLITE_LIMIT_LIKE_PATTERN_LENGTH // 8
|
||||
LIMIT_VARIABLE_NUMBER = C.SQLITE_LIMIT_VARIABLE_NUMBER // 9
|
||||
LIMIT_TRIGGER_DEPTH = C.SQLITE_LIMIT_TRIGGER_DEPTH // 10
|
||||
)
|
176
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/doc.go
generated
vendored
Normal file
176
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package sqlite3 provides an interface to SQLite version 3 databases.
|
||||
|
||||
Database connections are created either by using this package directly or with
|
||||
the "sqlite3" database/sql driver. The direct interface, which is described
|
||||
below, exposes SQLite-specific features, such as incremental I/O and online
|
||||
backups. The driver is recommended when your application has to support multiple
|
||||
database engines.
|
||||
|
||||
Installation
|
||||
|
||||
Minimum requirements are Go 1.1+ with CGO enabled and GCC/MinGW C compiler. The
|
||||
SQLite amalgamation version 3.8.0.2 (2013-09-03) is compiled as part of the
|
||||
package (see http://www.sqlite.org/amalgamation.html). Compilation options are
|
||||
defined at the top of sqlite3.go (#cgo CFLAGS). Dynamic linking with a shared
|
||||
SQLite library is not supported.
|
||||
|
||||
Windows users should install mingw-w64 (http://mingw-w64.sourceforge.net/),
|
||||
TDM64-GCC (http://tdm-gcc.tdragon.net/), or another MinGW distribution, and make
|
||||
sure that gcc.exe is available from the %PATH%. MSYS is not required.
|
||||
|
||||
Run 'go get code.google.com/p/go-sqlite/go1/sqlite3' to download, build, and
|
||||
install the package.
|
||||
|
||||
Concurrency
|
||||
|
||||
A single connection instance and all of its derived objects (prepared
|
||||
statements, backup operations, etc.) may NOT be used concurrently from multiple
|
||||
goroutines without external synchronization. The only exception is
|
||||
Conn.Interrupt(), which may be called from another goroutine to abort a
|
||||
long-running operation. It is safe to use separate connection instances
|
||||
concurrently, even if they are accessing the same database file. For example:
|
||||
|
||||
// ERROR (without any extra synchronization)
|
||||
c, _ := sqlite3.Open("sqlite.db")
|
||||
go use(c)
|
||||
go use(c)
|
||||
|
||||
// OK
|
||||
c1, _ := sqlite3.Open("sqlite.db")
|
||||
c2, _ := sqlite3.Open("sqlite.db")
|
||||
go use(c1)
|
||||
go use(c2)
|
||||
|
||||
Maps
|
||||
|
||||
Use NamedArgs map to bind values to named statement parameters (see
|
||||
http://www.sqlite.org/lang_expr.html#varparam). Use RowMap to retrieve the
|
||||
current row as a map of column/value pairs. Here is a short example with the
|
||||
error-handling code omitted for brevity:
|
||||
|
||||
c, _ := sqlite3.Open(":memory:")
|
||||
c.Exec("CREATE TABLE x(a, b, c)")
|
||||
|
||||
args := sqlite3.NamedArgs{"$a": 1, "$b": "demo"}
|
||||
c.Exec("INSERT INTO x VALUES($a, $b, $c)", args) // $c will be NULL
|
||||
|
||||
sql := "SELECT rowid, * FROM x"
|
||||
row := make(sqlite3.RowMap)
|
||||
for s, err := c.Query(sql); err == nil; err = s.Next() {
|
||||
var rowid int64
|
||||
s.Scan(&rowid, row) // Assigns 1st column to rowid, the rest to row
|
||||
fmt.Println(rowid, row) // Prints "1 map[a:1 b:demo c:<nil>]"
|
||||
}
|
||||
|
||||
Data Types
|
||||
|
||||
See http://www.sqlite.org/datatype3.html for a description of the SQLite data
|
||||
type system. The following Go data types are supported as arguments to prepared
|
||||
statements and may be used in NamedArgs:
|
||||
|
||||
Go Type SQLite Type Notes
|
||||
--------- ----------- ----------------------------------------------------
|
||||
<nil> NULL Unbound parameters are NULL by default.
|
||||
int INTEGER
|
||||
int64 INTEGER
|
||||
float64 FLOAT
|
||||
bool INTEGER Converted as false = 0, true = 1.
|
||||
string TEXT SQLite makes a private copy when the value is bound.
|
||||
[]byte BLOB SQLite makes a private copy when the value is bound.
|
||||
time.Time INTEGER Converted by calling Unix().
|
||||
RawString TEXT SQLite uses the value directly without copying. The
|
||||
caller must keep a reference to the value for the
|
||||
duration of the query to prevent garbage collection.
|
||||
RawBytes BLOB Same as RawString. The value must not be modified
|
||||
for the duration of the query.
|
||||
ZeroBlob BLOB Allocates a zero-filled BLOB of the specified length
|
||||
(e.g. ZeroBlob(4096) allocates 4KB).
|
||||
|
||||
Note that the table above describes how the value is bound to the statement. The
|
||||
final storage class is determined according to the column affinity rules.
|
||||
|
||||
See http://www.sqlite.org/c3ref/column_blob.html for a description of how column
|
||||
values are retrieved from the results of a query. The following static Go data
|
||||
types are supported for retrieving column values:
|
||||
|
||||
Go Type Req. Type Notes
|
||||
---------- --------- ---------------------------------------------------
|
||||
*int INTEGER
|
||||
*int64 INTEGER
|
||||
*float64 FLOAT
|
||||
*bool INTEGER Converted as 0 = false, otherwise true.
|
||||
*string TEXT The caller receives a copy of the value.
|
||||
*[]byte BLOB The caller receives a copy of the value.
|
||||
*time.Time INTEGER Converted by calling time.Unix(). Text values are not
|
||||
supported, but the conversion can be performed with
|
||||
the date and time SQL functions.
|
||||
*RawString TEXT The value is used directly without copying and
|
||||
remains valid until the next Stmt method call.
|
||||
*RawBytes BLOB Same as *RawString. The value must not be modified.
|
||||
Re-slicing is ok, but be careful with append().
|
||||
io.Writer BLOB The value is written out directly into the writer.
|
||||
|
||||
For *interface{} and RowMap arguments, the Go data type is dynamically selected
|
||||
based on the SQLite storage class and column declaration prefix:
|
||||
|
||||
SQLite Type Col. Decl. Go Type Notes
|
||||
----------- ---------- --------- ----------------------------------------
|
||||
NULL <nil>
|
||||
INTEGER "DATE..." time.Time Converted by calling time.Unix().
|
||||
INTEGER "TIME..." time.Time Converted by calling time.Unix().
|
||||
INTEGER "BOOL..." bool Converted as 0 = false, otherwise true.
|
||||
INTEGER int64
|
||||
FLOAT float64
|
||||
TEXT string
|
||||
BLOB []byte
|
||||
|
||||
Database Names
|
||||
|
||||
Methods that require a database name as one of the arguments (e.g. Conn.Path())
|
||||
expect the symbolic name by which the database is known to the connection, not a
|
||||
path to a file. Valid database names are "main", "temp", or a name specified
|
||||
after the AS clause in an ATTACH statement.
|
||||
|
||||
Callbacks
|
||||
|
||||
SQLite can execute callbacks for various internal events. The package provides
|
||||
types and methods for registering callback handlers. Unless stated otherwise in
|
||||
SQLite documentation, callback handlers are not reentrant and must not do
|
||||
anything to modify the associated database connection. This includes
|
||||
preparing/running any other SQL statements. The safest bet is to avoid all
|
||||
interactions with Conn, Stmt, and other related objects within the handler.
|
||||
|
||||
Codecs and Encryption
|
||||
|
||||
SQLite has an undocumented codec API, which operates between the pager and VFS
|
||||
layers, and is used by the SQLite Encryption Extension (SEE) to encrypt database
|
||||
and journal contents. Consider purchasing a SEE license if you require
|
||||
production-quality encryption support (http://www.hwaci.com/sw/sqlite/see.html).
|
||||
|
||||
This package has an experimental API (read: unstable, may eat your data) for
|
||||
writing codecs in Go. The "codec" subpackage provides additional documentation
|
||||
and several existing codec implementations.
|
||||
|
||||
Codecs are registered via the RegisterCodec function for a specific key prefix.
|
||||
For example, the "aes-hmac" codec is initialized when a key in the format
|
||||
"aes-hmac:<...>" is provided to an attached database. The key format after the
|
||||
first colon is codec-specific. See CodecFunc for more information.
|
||||
|
||||
The codec API has several limitations. Codecs cannot be used for in-memory or
|
||||
temporary databases. Once a database is created, the page size and the amount of
|
||||
reserved space at the end of each page cannot be changed (i.e. "PRAGMA
|
||||
page_size=N; VACUUM;" will not work). Online backups will fail unless the
|
||||
destination database has the same page size and reserve values. Bytes 16 through
|
||||
23 of page 1 (the database header, see http://www.sqlite.org/fileformat2.html)
|
||||
cannot be altered, so it is always possible to identify encrypted SQLite
|
||||
databases.
|
||||
|
||||
The rekey function is currently not implemented. The key can only be changed via
|
||||
the backup API or by dumping and restoring the database contents.
|
||||
*/
|
||||
package sqlite3
|
161
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/driver.go
generated
vendored
Normal file
161
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Driver implements the interface required by database/sql.
|
||||
type Driver string
|
||||
|
||||
func register(name string) {
|
||||
defer func() { recover() }()
|
||||
sql.Register(name, Driver(name))
|
||||
}
|
||||
|
||||
func (Driver) Open(name string) (driver.Conn, error) {
|
||||
c, err := Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.BusyTimeout(5 * time.Second)
|
||||
return &conn{c}, nil
|
||||
}
|
||||
|
||||
// conn implements driver.Conn.
|
||||
type conn struct {
|
||||
*Conn
|
||||
}
|
||||
|
||||
func (c *conn) Prepare(query string) (driver.Stmt, error) {
|
||||
if c.Conn.db == nil {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
s, err := c.Conn.Prepare(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stmt{s, false}, nil
|
||||
}
|
||||
|
||||
func (c *conn) Begin() (driver.Tx, error) {
|
||||
if c.Conn.db == nil {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if err := c.Conn.Begin(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Conn, nil
|
||||
}
|
||||
|
||||
func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
if c.Conn.db == nil {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if err := c.Conn.Exec(query, vtoi(args)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: Do the driver.Result values need to be cached?
|
||||
return result{c.Conn}, nil
|
||||
}
|
||||
|
||||
// stmt implements driver.Stmt.
|
||||
type stmt struct {
|
||||
*Stmt
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (s *stmt) Close() error {
|
||||
if !s.closed {
|
||||
s.closed = true
|
||||
if !s.Stmt.Busy() {
|
||||
return s.Stmt.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stmt) NumInput() int {
|
||||
return s.Stmt.NumParams()
|
||||
}
|
||||
|
||||
func (s *stmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
if err := s.Stmt.Exec(vtoi(args)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result{s.Stmt.Conn()}, nil
|
||||
}
|
||||
|
||||
func (s *stmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
if err := s.Stmt.Query(vtoi(args)...); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
return &rows{s, true}, nil
|
||||
}
|
||||
|
||||
// result implements driver.Result.
|
||||
type result struct {
|
||||
*Conn
|
||||
}
|
||||
|
||||
func (r result) LastInsertId() (int64, error) {
|
||||
return int64(r.Conn.LastInsertId()), nil
|
||||
}
|
||||
|
||||
func (r result) RowsAffected() (int64, error) {
|
||||
return int64(r.Conn.RowsAffected()), nil
|
||||
}
|
||||
|
||||
// rows implements driver.Rows.
|
||||
type rows struct {
|
||||
*stmt
|
||||
first bool
|
||||
}
|
||||
|
||||
func (r *rows) Close() error {
|
||||
if r.stmt.closed {
|
||||
return r.stmt.Stmt.Close()
|
||||
}
|
||||
r.stmt.Stmt.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rows) Next(dest []driver.Value) error {
|
||||
if r.first {
|
||||
r.first = false
|
||||
if !r.stmt.Stmt.Busy() {
|
||||
return io.EOF
|
||||
}
|
||||
} else if err := r.stmt.Stmt.Next(); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range dest {
|
||||
v := (*interface{})(&dest[i])
|
||||
err := r.stmt.Stmt.scanDynamic(C.int(i), v, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// vtoi converts []driver.Value to []interface{} without copying the contents.
|
||||
func vtoi(v []driver.Value) (i []interface{}) {
|
||||
if len(v) > 0 {
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&i))
|
||||
h.Data = uintptr(unsafe.Pointer(&v[0]))
|
||||
h.Len = len(v)
|
||||
h.Cap = cap(v)
|
||||
}
|
||||
return
|
||||
}
|
182
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/io.go
generated
vendored
Normal file
182
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/io.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
#include "sqlite3.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// ErrBlobFull is returned by BlobIO.Write when there isn't enough space left to
|
||||
// write the provided bytes.
|
||||
var ErrBlobFull = &Error{ERROR, "incremental write failed, no space left"}
|
||||
|
||||
// BlobIO is a handle to a single BLOB (binary large object) or TEXT value
|
||||
// opened for incremental I/O. This allows the value to be treated as a file for
|
||||
// reading and writing. The value length cannot be changed using this API; use
|
||||
// an UPDATE statement for that. The recommended way of allocating space for a
|
||||
// BLOB is to use the ZeroBlob type or the zeroblob() SQL function.
|
||||
// [http://www.sqlite.org/c3ref/blob.html]
|
||||
type BlobIO struct {
|
||||
conn *Conn
|
||||
blob *C.sqlite3_blob
|
||||
|
||||
row int64 // ROWID of the row containing the BLOB/TEXT value
|
||||
len int // Value length in bytes
|
||||
off int // Current read/write offset
|
||||
}
|
||||
|
||||
// newBlobIO initializes an incremental I/O operation.
|
||||
func newBlobIO(c *Conn, db, tbl, col string, row int64, rw bool) (*BlobIO, error) {
|
||||
db += "\x00"
|
||||
tbl += "\x00"
|
||||
col += "\x00"
|
||||
|
||||
var blob *C.sqlite3_blob
|
||||
rc := C.sqlite3_blob_open(c.db, cStr(db), cStr(tbl), cStr(col),
|
||||
C.sqlite3_int64(row), cBool(rw), &blob)
|
||||
if rc != OK {
|
||||
return nil, libErr(rc, c.db)
|
||||
}
|
||||
|
||||
b := &BlobIO{
|
||||
conn: c,
|
||||
blob: blob,
|
||||
row: row,
|
||||
len: int(C.sqlite3_blob_bytes(blob)),
|
||||
}
|
||||
runtime.SetFinalizer(b, (*BlobIO).Close)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Close releases all resources associated with the incremental I/O operation.
|
||||
// It is important to check the error returned by this method, since disk I/O
|
||||
// and other types of errors may not be reported until the changes are actually
|
||||
// committed to the database.
|
||||
// [http://www.sqlite.org/c3ref/blob_close.html]
|
||||
func (b *BlobIO) Close() error {
|
||||
if blob := b.blob; blob != nil {
|
||||
b.blob = nil
|
||||
b.len = 0
|
||||
b.off = 0
|
||||
runtime.SetFinalizer(b, nil)
|
||||
if rc := C.sqlite3_blob_close(blob); rc != OK {
|
||||
return libErr(rc, b.conn.db)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Conn returns the connection that that created this incremental I/O operation.
|
||||
func (b *BlobIO) Conn() *Conn {
|
||||
return b.conn
|
||||
}
|
||||
|
||||
// Row returns the ROWID of the row containing the BLOB/TEXT value.
|
||||
func (b *BlobIO) Row() int64 {
|
||||
return b.row
|
||||
}
|
||||
|
||||
// Len returns the length of the BLOB/TEXT value in bytes. It is not possible to
|
||||
// read/write/seek beyond this length. The length changes to 0 if the I/O handle
|
||||
// expires due to an update of any column in the same row. This condition is
|
||||
// indicated by an ABORT error code returned from Read or Write. An expired
|
||||
// handle is closed automatically and cannot be reopened. Any writes that
|
||||
// occurred before the abort are not rolled back.
|
||||
// [http://www.sqlite.org/c3ref/blob_bytes.html]
|
||||
func (b *BlobIO) Len() int {
|
||||
return b.len
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
// [http://www.sqlite.org/c3ref/blob_read.html]
|
||||
func (b *BlobIO) Read(p []byte) (n int, err error) {
|
||||
if b.blob == nil {
|
||||
return 0, ErrBadIO
|
||||
}
|
||||
if b.off >= b.len {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if n = b.len - b.off; len(p) < n {
|
||||
n = len(p)
|
||||
}
|
||||
rc := C.sqlite3_blob_read(b.blob, cBytes(p), C.int(n), C.int(b.off))
|
||||
return b.io(rc, n)
|
||||
}
|
||||
|
||||
// Write implements the io.Writer interface. The number of bytes written is
|
||||
// always either 0 or len(p). ErrBlobFull is returned if there isn't enough
|
||||
// space left to write all of p.
|
||||
// [http://www.sqlite.org/c3ref/blob_write.html]
|
||||
func (b *BlobIO) Write(p []byte) (n int, err error) {
|
||||
if b.blob == nil {
|
||||
return 0, ErrBadIO
|
||||
}
|
||||
if n = len(p); b.off+n > b.len {
|
||||
// Doesn't make sense to do a partial write. Better to return quickly
|
||||
// and let the caller reallocate the BLOB.
|
||||
return 0, ErrBlobFull
|
||||
}
|
||||
rc := C.sqlite3_blob_write(b.blob, cBytes(p), C.int(n), C.int(b.off))
|
||||
return b.io(rc, n)
|
||||
}
|
||||
|
||||
// Seek implements the io.Seeker interface.
|
||||
func (b *BlobIO) Seek(offset int64, whence int) (ret int64, err error) {
|
||||
if b.blob == nil {
|
||||
return 0, ErrBadIO
|
||||
}
|
||||
switch whence {
|
||||
case 0:
|
||||
case 1:
|
||||
offset += int64(b.off)
|
||||
case 2:
|
||||
offset += int64(b.len)
|
||||
default:
|
||||
return 0, pkgErr(MISUSE, "invalid whence for BlobIO.Seek (%d)", whence)
|
||||
}
|
||||
if offset < 0 || offset > int64(b.len) {
|
||||
return 0, pkgErr(MISUSE, "invalid offset for BlobIO.Seek (%d)", offset)
|
||||
}
|
||||
b.off = int(offset)
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
// Reopen closes the current value and opens another one in the same column,
|
||||
// specified by its ROWID. If an error is encountered, the I/O handle becomes
|
||||
// unusable and is automatically closed.
|
||||
// [http://www.sqlite.org/c3ref/blob_reopen.html]
|
||||
func (b *BlobIO) Reopen(row int64) error {
|
||||
if b.blob == nil {
|
||||
return ErrBadIO
|
||||
}
|
||||
if rc := C.sqlite3_blob_reopen(b.blob, C.sqlite3_int64(row)); rc != OK {
|
||||
err := libErr(rc, b.conn.db)
|
||||
b.Close()
|
||||
return err
|
||||
}
|
||||
b.row = row
|
||||
b.len = int(C.sqlite3_blob_bytes(b.blob))
|
||||
b.off = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// io handles the completion of a single Read/Write call.
|
||||
func (b *BlobIO) io(rc C.int, n int) (int, error) {
|
||||
if rc == OK {
|
||||
b.off += n
|
||||
return n, nil
|
||||
}
|
||||
err := libErr(rc, b.conn.db)
|
||||
if rc == ABORT {
|
||||
b.Close()
|
||||
}
|
||||
return 0, err
|
||||
}
|
117
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/codec.c
generated
vendored
Normal file
117
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/codec.c
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#if defined(SQLITE_AMALGAMATION) && defined(SQLITE_HAS_CODEC)
|
||||
|
||||
#include "codec.h"
|
||||
|
||||
// codec.go exports.
|
||||
int go_codec_init(const CodecCtx*,void**,char**);
|
||||
int go_codec_reserve(void*);
|
||||
void go_codec_resize(void*,int,int);
|
||||
void *go_codec_exec(void*,void*,u32,int);
|
||||
void go_codec_get_key(void*,void**,int*);
|
||||
void go_codec_free(void*);
|
||||
|
||||
// sqlite3_key sets the codec key for the main database.
|
||||
SQLITE_API int sqlite3_key(sqlite3 *db, const void *pKey, int nKey) {
|
||||
return sqlite3_key_v2(db, 0, pKey, nKey);
|
||||
}
|
||||
|
||||
// sqlite3_key_v2 sets the codec key for the specified database.
|
||||
SQLITE_API int sqlite3_key_v2(sqlite3 *db, const char *zDb, const void *pKey, int nKey) {
|
||||
int iDb = 0;
|
||||
int rc;
|
||||
sqlite3_mutex_enter(db->mutex);
|
||||
if (zDb && zDb[0]) {
|
||||
iDb = sqlite3FindDbName(db, zDb);
|
||||
}
|
||||
if (iDb < 0) {
|
||||
rc = SQLITE_ERROR;
|
||||
sqlite3Error(db, SQLITE_ERROR, "unknown database: %s", zDb);
|
||||
} else {
|
||||
rc = sqlite3CodecAttach(db, iDb, pKey, nKey);
|
||||
}
|
||||
rc = sqlite3ApiExit(db, rc);
|
||||
sqlite3_mutex_leave(db->mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
// sqlite3_rekey changes the codec key for the main database.
|
||||
SQLITE_API int sqlite3_rekey(sqlite3 *db, const void *pKey, int nKey) {
|
||||
return sqlite3_rekey_v2(db, 0, pKey, nKey);
|
||||
}
|
||||
|
||||
// sqlite3_rekey_v2 changes the codec key for the specified database.
|
||||
SQLITE_API int sqlite3_rekey_v2(sqlite3 *db, const char *zDb, const void *pKey, int nKey) {
|
||||
int iDb = 0;
|
||||
int rc;
|
||||
sqlite3_mutex_enter(db->mutex);
|
||||
|
||||
rc = SQLITE_ERROR;
|
||||
sqlite3Error(db, SQLITE_ERROR, "rekey is not implemented");
|
||||
|
||||
rc = sqlite3ApiExit(db, rc);
|
||||
sqlite3_mutex_leave(db->mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
// sqlite3_activate_see isn't used by Go codecs, but it needs to be linked in.
|
||||
SQLITE_API void sqlite3_activate_see(const char *zPassPhrase) {}
|
||||
|
||||
// sqlite3CodecAttach initializes the codec, reserves space at the end of each
|
||||
// page, and attaches the codec to the specified database.
|
||||
int sqlite3CodecAttach(sqlite3 *db, int iDb, const void *pKey, int nKey) {
|
||||
Btree *pBt = db->aDb[iDb].pBt;
|
||||
Pager *pPager = sqlite3BtreePager(pBt);
|
||||
CodecCtx ctx;
|
||||
void *pCodec = 0;
|
||||
char *zErrMsg = 0;
|
||||
int rc;
|
||||
|
||||
// An empty KEY clause in an ATTACH statement disables the codec and SQLite
|
||||
// doesn't support codecs for in-memory databases.
|
||||
if (nKey <= 0 || pPager->memDb) return SQLITE_OK;
|
||||
|
||||
ctx.db = db;
|
||||
ctx.zPath = sqlite3BtreeGetFilename(pBt);
|
||||
ctx.zName = db->aDb[iDb].zName;
|
||||
ctx.nBuf = sqlite3BtreeGetPageSize(pBt);
|
||||
ctx.nRes = sqlite3BtreeGetReserve(pBt);
|
||||
ctx.pKey = pKey;
|
||||
ctx.nKey = nKey;
|
||||
|
||||
sqlite3BtreeEnter(pBt);
|
||||
ctx.fixed = (pBt->pBt->btsFlags & BTS_PAGESIZE_FIXED) != 0;
|
||||
sqlite3BtreeLeave(pBt);
|
||||
|
||||
if ((rc=go_codec_init(&ctx, &pCodec, &zErrMsg)) != SQLITE_OK) {
|
||||
sqlite3Error(db, rc, (zErrMsg ? "%s" : 0), zErrMsg);
|
||||
free(zErrMsg);
|
||||
} else if (pCodec) {
|
||||
int nRes = go_codec_reserve(pCodec);
|
||||
if (nRes != ctx.nRes && nRes >= 0) {
|
||||
rc = sqlite3BtreeSetPageSize(pBt, -1, nRes, 0);
|
||||
}
|
||||
if (rc != SQLITE_OK) {
|
||||
go_codec_free(pCodec);
|
||||
sqlite3Error(db, rc, "unable to reserve page space for the codec");
|
||||
} else {
|
||||
sqlite3PagerSetCodec(pPager, go_codec_exec, go_codec_resize, go_codec_free, pCodec);
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
// sqlite3CodecGetKey returns the codec key for the specified database.
|
||||
void sqlite3CodecGetKey(sqlite3 *db, int iDb, void **pKey, int *nKey) {
|
||||
void *pCodec = sqlite3PagerGetCodec(sqlite3BtreePager(db->aDb[iDb].pBt));
|
||||
*pKey = 0;
|
||||
*nKey = 0;
|
||||
if (pCodec) {
|
||||
go_codec_get_key(pCodec, pKey, nKey);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
24
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/codec.h
generated
vendored
Normal file
24
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/codec.h
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#ifndef _CODEC_H_
|
||||
#define _CODEC_H_
|
||||
|
||||
// Codec initialization context.
|
||||
typedef struct {
|
||||
sqlite3 *db;
|
||||
const char *zPath;
|
||||
const char *zName;
|
||||
int nBuf;
|
||||
int nRes;
|
||||
int fixed;
|
||||
const void *pKey;
|
||||
int nKey;
|
||||
} CodecCtx;
|
||||
|
||||
// SQLite codec hooks.
|
||||
int sqlite3CodecAttach(sqlite3*,int,const void*,int);
|
||||
void sqlite3CodecGetKey(sqlite3*,int,void**,int*);
|
||||
|
||||
#endif
|
141343
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/sqlite3.c
generated
vendored
Normal file
141343
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/sqlite3.c
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7297
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/sqlite3.h
generated
vendored
Normal file
7297
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/lib/sqlite3.h
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3.c
generated
vendored
Normal file
6
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3.c
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "lib/sqlite3.c"
|
||||
#include "lib/codec.c"
|
1137
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3.go
generated
vendored
Normal file
1137
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3.h
generated
vendored
Normal file
6
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3.h
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "lib/sqlite3.h"
|
||||
#include "lib/codec.h"
|
1307
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3_test.go
generated
vendored
Normal file
1307
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/sqlite3_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
375
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/util.go
generated
vendored
Normal file
375
Godeps/_workspace/src/code.google.com/p/go-sqlite/go1/sqlite3/util.go
generated
vendored
Normal file
|
@ -0,0 +1,375 @@
|
|||
// Copyright 2013 The Go-SQLite Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
#include "sqlite3.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NamedArgs is a name/value map of arguments passed to a prepared statement
|
||||
// that uses ?NNN, :AAA, @AAA, and/or $AAA parameter formats. Name matching is
|
||||
// case-sensitive and the prefix character (one of [?:@$]) must be included in
|
||||
// the name. Names that are missing from the map are treated as NULL. Names that
|
||||
// are not used in the prepared statement are ignored.
|
||||
//
|
||||
// It is not possible to mix named and anonymous ("?") parameters in the same
|
||||
// statement.
|
||||
// [http://www.sqlite.org/lang_expr.html#varparam]
|
||||
type NamedArgs map[string]interface{}
|
||||
|
||||
// RowMap may be passed as the last (or only) argument to Stmt.Scan to create a
|
||||
// map of all remaining column/value pairs in the current row. The map is not
|
||||
// cleared before being populated with new column values. Assignment is
|
||||
// performed in left-to-right column order, and values may be overwritten if the
|
||||
// query returns two or more columns with identical names.
|
||||
type RowMap map[string]interface{}
|
||||
|
||||
// RawString and RawBytes are special string and []byte types that may be used
|
||||
// for database input and output without the cost of an extra copy operation.
|
||||
//
|
||||
// When used as an argument to a statement, the contents are bound using
|
||||
// SQLITE_STATIC instead of SQLITE_TRANSIENT flag. This requires the contents to
|
||||
// remain valid and unmodified until the end of statement execution. In
|
||||
// particular, the caller must keep a reference to the value to prevent it from
|
||||
// being garbage collected.
|
||||
//
|
||||
// When used for retrieving query output, the internal string/[]byte pointer is
|
||||
// set to reference memory belonging to SQLite. The memory remains valid until
|
||||
// another method is called on the Stmt object and should not be modified.
|
||||
type (
|
||||
RawString string
|
||||
RawBytes []byte
|
||||
)
|
||||
|
||||
// Copy returns a Go-managed copy of s.
|
||||
func (s RawString) Copy() string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return C.GoStringN((*C.char)(unsafe.Pointer(h.Data)), C.int(h.Len))
|
||||
}
|
||||
|
||||
// Copy returns a Go-managed copy of b.
|
||||
func (b RawBytes) Copy() []byte {
|
||||
if len(b) == 0 {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return []byte("")
|
||||
}
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
return C.GoBytes(unsafe.Pointer(h.Data), C.int(h.Len))
|
||||
}
|
||||
|
||||
// ZeroBlob is a special argument type used to allocate a zero-filled BLOB of
|
||||
// the specified length. The BLOB can then be opened for incremental I/O to
|
||||
// efficiently transfer a large amount of data. The maximum BLOB size can be
|
||||
// queried with Conn.Limit(LIMIT_LENGTH, -1).
|
||||
type ZeroBlob int
|
||||
|
||||
// BusyFunc is a callback function invoked by SQLite when it is unable to
|
||||
// acquire a lock on a table. Count is the number of times that the callback has
|
||||
// been invoked for this locking event so far. If the function returns false,
|
||||
// then the operation is aborted. Otherwise, the function should block for a
|
||||
// while before returning true and letting SQLite make another locking attempt.
|
||||
type BusyFunc func(count int) (retry bool)
|
||||
|
||||
// CommitFunc is a callback function invoked by SQLite before a transaction is
|
||||
// committed. If the function returns true, the transaction is rolled back.
|
||||
type CommitFunc func() (abort bool)
|
||||
|
||||
// RollbackFunc is a callback function invoked by SQLite when a transaction is
|
||||
// rolled back.
|
||||
type RollbackFunc func()
|
||||
|
||||
// UpdateFunc is a callback function invoked by SQLite when a row is updated,
|
||||
// inserted, or deleted.
|
||||
type UpdateFunc func(op int, db, tbl RawString, row int64)
|
||||
|
||||
// Error is returned for all SQLite API result codes other than OK, ROW, and
|
||||
// DONE.
|
||||
type Error struct {
|
||||
rc int
|
||||
msg string
|
||||
}
|
||||
|
||||
// NewError creates a new Error instance using the specified SQLite result code
|
||||
// and error message.
|
||||
func NewError(rc int, msg string) *Error {
|
||||
return &Error{rc, msg}
|
||||
}
|
||||
|
||||
// libErr reports an error originating in SQLite. The error message is obtained
|
||||
// from the database connection when possible, which may include some additional
|
||||
// information. Otherwise, the result code is translated to a generic message.
|
||||
func libErr(rc C.int, db *C.sqlite3) error {
|
||||
if db != nil && rc == C.sqlite3_errcode(db) {
|
||||
return &Error{int(rc), C.GoString(C.sqlite3_errmsg(db))}
|
||||
}
|
||||
return &Error{int(rc), C.GoString(C.sqlite3_errstr(rc))}
|
||||
}
|
||||
|
||||
// pkgErr reports an error originating in this package.
|
||||
func pkgErr(rc int, msg string, v ...interface{}) error {
|
||||
if len(v) == 0 {
|
||||
return &Error{rc, msg}
|
||||
}
|
||||
return &Error{rc, fmt.Sprintf(msg, v...)}
|
||||
}
|
||||
|
||||
// Code returns the SQLite extended result code.
|
||||
func (err *Error) Code() int {
|
||||
return err.rc
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (err *Error) Error() string {
|
||||
return fmt.Sprintf("sqlite3: %s [%d]", err.msg, err.rc)
|
||||
}
|
||||
|
||||
// Errors returned for access attempts to closed or invalid objects.
|
||||
var (
|
||||
ErrBadConn = &Error{MISUSE, "closed or invalid connection"}
|
||||
ErrBadStmt = &Error{MISUSE, "closed or invalid statement"}
|
||||
ErrBadIO = &Error{MISUSE, "closed or invalid incremental I/O operation"}
|
||||
ErrBadBackup = &Error{MISUSE, "closed or invalid backup operation"}
|
||||
)
|
||||
|
||||
// Complete returns true if sql appears to contain a complete statement that is
|
||||
// ready to be parsed. This does not validate the statement syntax.
|
||||
// [http://www.sqlite.org/c3ref/complete.html]
|
||||
func Complete(sql string) bool {
|
||||
if initErr != nil {
|
||||
return false
|
||||
}
|
||||
sql += "\x00"
|
||||
return C.sqlite3_complete(cStr(sql)) == 1
|
||||
}
|
||||
|
||||
// ReleaseMemory attempts to free n bytes of heap memory by deallocating
|
||||
// non-essential memory held by the SQLite library. It returns the number of
|
||||
// bytes actually freed.
|
||||
//
|
||||
// This function is currently a no-op because SQLite is not compiled with the
|
||||
// SQLITE_ENABLE_MEMORY_MANAGEMENT option.
|
||||
// [http://www.sqlite.org/c3ref/release_memory.html]
|
||||
func ReleaseMemory(n int) int {
|
||||
if initErr != nil {
|
||||
return 0
|
||||
}
|
||||
return int(C.sqlite3_release_memory(C.int(n)))
|
||||
}
|
||||
|
||||
// SingleThread returns true if the SQLite library was compiled with
|
||||
// -DSQLITE_THREADSAFE=0. In this threading mode all mutex code is omitted and
|
||||
// the package becomes unsafe for concurrent access, even to separate database
|
||||
// connections.
|
||||
//
|
||||
// The SQLite source that's part of this package is compiled with
|
||||
// -DSQLITE_THREADSAFE=2, so this function should always return false. It is
|
||||
// kept for backward compatibility when dynamic linking was supported in Go 1.0.
|
||||
// [http://www.sqlite.org/threadsafe.html]
|
||||
func SingleThread() bool {
|
||||
return initErr == nil && C.sqlite3_threadsafe() == 0
|
||||
}
|
||||
|
||||
// SoftHeapLimit sets and/or queries the soft limit on the amount of heap memory
|
||||
// that may be allocated by SQLite. A negative value for n keeps the current
|
||||
// limit, while 0 removes the limit. The previous limit value is returned, with
|
||||
// negative values indicating an error.
|
||||
// [http://www.sqlite.org/c3ref/soft_heap_limit64.html]
|
||||
func SoftHeapLimit(n int64) int64 {
|
||||
if initErr != nil {
|
||||
return -1
|
||||
}
|
||||
return int64(C.sqlite3_soft_heap_limit64(C.sqlite3_int64(n)))
|
||||
}
|
||||
|
||||
// SourceId returns the check-in identifier of SQLite within its configuration
|
||||
// management system.
|
||||
// [http://www.sqlite.org/c3ref/c_source_id.html]
|
||||
func SourceId() string {
|
||||
if initErr != nil {
|
||||
return ""
|
||||
}
|
||||
return C.GoString(C.sqlite3_sourceid())
|
||||
}
|
||||
|
||||
// Status returns the current and peak values of a core performance
|
||||
// counter, specified by one of the STATUS constants. If reset is true, the peak
|
||||
// value is reset back down to the current value after retrieval.
|
||||
// [http://www.sqlite.org/c3ref/status.html]
|
||||
func Status(op int, reset bool) (cur, peak int, err error) {
|
||||
if initErr != nil {
|
||||
return 0, 0, initErr
|
||||
}
|
||||
var cCur, cPeak C.int
|
||||
rc := C.sqlite3_status(C.int(op), &cCur, &cPeak, cBool(reset))
|
||||
if rc != OK {
|
||||
return 0, 0, pkgErr(MISUSE, "invalid status op (%d)", op)
|
||||
}
|
||||
return int(cCur), int(cPeak), nil
|
||||
}
|
||||
|
||||
// Version returns the SQLite version as a string in the format "X.Y.Z[.N]".
|
||||
// [http://www.sqlite.org/c3ref/libversion.html]
|
||||
func Version() string {
|
||||
if initErr != nil {
|
||||
return ""
|
||||
}
|
||||
return goStr(C.sqlite3_libversion())
|
||||
}
|
||||
|
||||
// VersionNum returns the SQLite version as an integer in the format X*1000000 +
|
||||
// Y*1000 + Z, where X is the major version, Y is the minor version, and Z is
|
||||
// the release number.
|
||||
func VersionNum() int {
|
||||
if initErr != nil {
|
||||
return 0
|
||||
}
|
||||
return int(C.sqlite3_libversion_number())
|
||||
}
|
||||
|
||||
// Print prints out all rows returned by a query. This function is intended as a
|
||||
// debugging aid and may be removed or altered in the future. Do not use it in
|
||||
// production applications.
|
||||
func Print(s *Stmt) error {
|
||||
if s == nil || s.NumColumns() == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if !s.Busy() {
|
||||
if err = s.Query(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cols := s.Columns()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(cols)*10))
|
||||
row := make(RowMap, len(cols))
|
||||
|
||||
buf.WriteByte('~')
|
||||
for _, col := range cols {
|
||||
fmt.Fprintf(buf, " %s ~", col)
|
||||
}
|
||||
fmt.Println(buf)
|
||||
for ; err == nil; err = s.Next() {
|
||||
if err = s.Scan(row); err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Reset()
|
||||
buf.WriteByte('|')
|
||||
for _, col := range cols {
|
||||
fmt.Fprintf(buf, " %*v |", len(col), row[col])
|
||||
}
|
||||
fmt.Println(buf)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// raw casts s to a RawString.
|
||||
func raw(s string) RawString {
|
||||
return RawString(s)
|
||||
}
|
||||
|
||||
// cStr returns a pointer to the first byte in s.
|
||||
func cStr(s string) *C.char {
|
||||
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return (*C.char)(unsafe.Pointer(h.Data))
|
||||
}
|
||||
|
||||
// cStrOffset returns the offset of p in s or -1 if p doesn't point into s.
|
||||
func cStrOffset(s string, p *C.char) int {
|
||||
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
if off := uintptr(unsafe.Pointer(p)) - h.Data; off < uintptr(h.Len) {
|
||||
return int(off)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// cBytes returns a pointer to the first byte in b.
|
||||
func cBytes(b []byte) unsafe.Pointer {
|
||||
return unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&b)).Data)
|
||||
}
|
||||
|
||||
// cBool returns a C representation of a Go bool (false = 0, true = 1).
|
||||
func cBool(b bool) C.int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// goStr returns a Go representation of a null-terminated C string.
|
||||
func goStr(p *C.char) (s string) {
|
||||
if p != nil && *p != 0 {
|
||||
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
h.Data = uintptr(unsafe.Pointer(p))
|
||||
for *p != 0 {
|
||||
p = (*C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) // p++
|
||||
}
|
||||
h.Len = int(uintptr(unsafe.Pointer(p)) - h.Data)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// goStrN returns a Go representation of an n-byte C string.
|
||||
func goStrN(p *C.char, n C.int) (s string) {
|
||||
if n > 0 {
|
||||
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
h.Data = uintptr(unsafe.Pointer(p))
|
||||
h.Len = int(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// goBytes returns a Go representation of an n-byte C array.
|
||||
func goBytes(p unsafe.Pointer, n C.int) (b []byte) {
|
||||
if n > 0 {
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
h.Data = uintptr(p)
|
||||
h.Len = int(n)
|
||||
h.Cap = int(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// bstr returns a string pointing into the byte slice b.
|
||||
func bstr(b []byte) (s string) {
|
||||
if len(b) > 0 {
|
||||
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
h.Data = uintptr(unsafe.Pointer(&b[0]))
|
||||
h.Len = len(b)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//export go_busy_handler
|
||||
func go_busy_handler(c unsafe.Pointer, count C.int) (retry C.int) {
|
||||
return cBool((*Conn)(c).busy(int(count)))
|
||||
}
|
||||
|
||||
//export go_commit_hook
|
||||
func go_commit_hook(c unsafe.Pointer) (abort C.int) {
|
||||
return cBool((*Conn)(c).commit())
|
||||
}
|
||||
|
||||
//export go_rollback_hook
|
||||
func go_rollback_hook(c unsafe.Pointer) {
|
||||
(*Conn)(c).rollback()
|
||||
}
|
||||
|
||||
//export go_update_hook
|
||||
func go_update_hook(c unsafe.Pointer, op C.int, db, tbl *C.char, row C.sqlite3_int64) {
|
||||
(*Conn)(c).update(int(op), raw(goStr(db)), raw(goStr(tbl)), int64(row))
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// http://ed25519.cr.yp.to/.
|
||||
package ed25519
|
||||
|
||||
// This code is a port of the public domain, "ref10" implementation of ed25519
|
||||
// from SUPERCOP.
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"io"
|
||||
|
||||
"github.com/agl/ed25519/edwards25519"
|
||||
)
|
||||
|
||||
const (
|
||||
PublicKeySize = 32
|
||||
PrivateKeySize = 64
|
||||
SignatureSize = 64
|
||||
)
|
||||
|
||||
// GenerateKey generates a public/private key pair using randomness from rand.
|
||||
func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) {
|
||||
privateKey = new([64]byte)
|
||||
publicKey = new([32]byte)
|
||||
_, err = io.ReadFull(rand, privateKey[:32])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
digest := h.Sum(nil)
|
||||
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var hBytes [32]byte
|
||||
copy(hBytes[:], digest)
|
||||
edwards25519.GeScalarMultBase(&A, &hBytes)
|
||||
A.ToBytes(publicKey)
|
||||
|
||||
copy(privateKey[32:], publicKey[:])
|
||||
return
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature.
|
||||
func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
|
||||
var digest1, messageDigest, hramDigest [64]byte
|
||||
var expandedSecretKey [32]byte
|
||||
h.Sum(digest1[:0])
|
||||
copy(expandedSecretKey[:], digest1[:])
|
||||
expandedSecretKey[0] &= 248
|
||||
expandedSecretKey[31] &= 63
|
||||
expandedSecretKey[31] |= 64
|
||||
|
||||
h.Reset()
|
||||
h.Write(digest1[32:])
|
||||
h.Write(message)
|
||||
h.Sum(messageDigest[:0])
|
||||
|
||||
var messageDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
|
||||
var R edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
|
||||
|
||||
var encodedR [32]byte
|
||||
R.ToBytes(&encodedR)
|
||||
|
||||
h.Reset()
|
||||
h.Write(encodedR[:])
|
||||
h.Write(privateKey[32:])
|
||||
h.Write(message)
|
||||
h.Sum(hramDigest[:0])
|
||||
var hramDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
|
||||
|
||||
var s [32]byte
|
||||
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
|
||||
|
||||
signature := new([64]byte)
|
||||
copy(signature[:], encodedR[:])
|
||||
copy(signature[32:], s[:])
|
||||
return signature
|
||||
}
|
||||
|
||||
// Verify returns true iff sig is a valid signature of message by publicKey.
|
||||
func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool {
|
||||
if sig[63]&224 != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
if !A.FromBytes(publicKey) {
|
||||
return false
|
||||
}
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(sig[:32])
|
||||
h.Write(publicKey[:])
|
||||
h.Write(message)
|
||||
var digest [64]byte
|
||||
h.Sum(digest[:0])
|
||||
|
||||
var hReduced [32]byte
|
||||
edwards25519.ScReduce(&hReduced, &digest)
|
||||
|
||||
var R edwards25519.ProjectiveGroupElement
|
||||
var b [32]byte
|
||||
copy(b[:], sig[32:])
|
||||
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
|
||||
|
||||
var checkR [32]byte
|
||||
R.ToBytes(&checkR)
|
||||
return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(buf []byte) (int, error) {
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func TestSignVerify(t *testing.T) {
|
||||
var zero zeroReader
|
||||
public, private, _ := GenerateKey(zero)
|
||||
|
||||
message := []byte("test message")
|
||||
sig := Sign(private, message)
|
||||
if !Verify(public, message, sig) {
|
||||
t.Errorf("valid signature rejected")
|
||||
}
|
||||
|
||||
wrongMessage := []byte("wrong message")
|
||||
if Verify(public, wrongMessage, sig) {
|
||||
t.Errorf("signature of different message accepted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGolden(t *testing.T) {
|
||||
// sign.input.gz is a selection of test cases from
|
||||
// http://ed25519.cr.yp.to/python/sign.input
|
||||
testDataZ, err := os.Open("testdata/sign.input.gz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer testDataZ.Close()
|
||||
testData, err := gzip.NewReader(testDataZ)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer testData.Close()
|
||||
|
||||
in := bufio.NewReaderSize(testData, 1<<12)
|
||||
lineNo := 0
|
||||
for {
|
||||
lineNo++
|
||||
lineBytes, isPrefix, err := in.ReadLine()
|
||||
if isPrefix {
|
||||
t.Fatal("bufio buffer too small")
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("error reading test data: %s", err)
|
||||
}
|
||||
|
||||
line := string(lineBytes)
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) != 5 {
|
||||
t.Fatalf("bad number of parts on line %d", lineNo)
|
||||
}
|
||||
|
||||
privBytes, _ := hex.DecodeString(parts[0])
|
||||
pubKeyBytes, _ := hex.DecodeString(parts[1])
|
||||
msg, _ := hex.DecodeString(parts[2])
|
||||
sig, _ := hex.DecodeString(parts[3])
|
||||
// The signatures in the test vectors also include the message
|
||||
// at the end, but we just want R and S.
|
||||
sig = sig[:SignatureSize]
|
||||
|
||||
if l := len(pubKeyBytes); l != PublicKeySize {
|
||||
t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l)
|
||||
}
|
||||
|
||||
var priv [PrivateKeySize]byte
|
||||
copy(priv[:], privBytes)
|
||||
copy(priv[32:], pubKeyBytes)
|
||||
|
||||
sig2 := Sign(&priv, msg)
|
||||
if !bytes.Equal(sig, sig2[:]) {
|
||||
t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2)
|
||||
}
|
||||
|
||||
var pubKey [PublicKeySize]byte
|
||||
copy(pubKey[:], pubKeyBytes)
|
||||
if !Verify(&pubKey, msg, sig2) {
|
||||
t.Errorf("signature failed to verify on line %d", lineNo)
|
||||
}
|
||||
}
|
||||
}
|
1411
Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/const.go
generated
vendored
Normal file
1411
Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/const.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2127
Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/edwards25519.go
generated
vendored
Normal file
2127
Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/edwards25519.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
344
Godeps/_workspace/src/github.com/agl/ed25519/extra25519/extra25519.go
generated
vendored
Normal file
344
Godeps/_workspace/src/github.com/agl/ed25519/extra25519/extra25519.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package extra25519
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
|
||||
"github.com/agl/ed25519/edwards25519"
|
||||
)
|
||||
|
||||
// PrivateKeyToCurve25519 converts an ed25519 private key into a corresponding
|
||||
// curve25519 private key such that the resulting curve25519 public key will
|
||||
// equal the result from PublicKeyToCurve25519.
|
||||
func PrivateKeyToCurve25519(curve25519Private *[32]byte, privateKey *[64]byte) {
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
digest := h.Sum(nil)
|
||||
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
copy(curve25519Private[:], digest)
|
||||
}
|
||||
|
||||
func edwardsToMontgomeryX(outX, y *edwards25519.FieldElement) {
|
||||
// We only need the x-coordinate of the curve25519 point, which I'll
|
||||
// call u. The isomorphism is u=(y+1)/(1-y), since y=Y/Z, this gives
|
||||
// u=(Y+Z)/(Z-Y). We know that Z=1, thus u=(Y+1)/(1-Y).
|
||||
var oneMinusY edwards25519.FieldElement
|
||||
edwards25519.FeOne(&oneMinusY)
|
||||
edwards25519.FeSub(&oneMinusY, &oneMinusY, y)
|
||||
edwards25519.FeInvert(&oneMinusY, &oneMinusY)
|
||||
|
||||
edwards25519.FeOne(outX)
|
||||
edwards25519.FeAdd(outX, outX, y)
|
||||
|
||||
edwards25519.FeMul(outX, outX, &oneMinusY)
|
||||
}
|
||||
|
||||
// PublicKeyToCurve25519 converts an Ed25519 public key into the curve25519
|
||||
// public key that would be generated from the same private key.
|
||||
func PublicKeyToCurve25519(curve25519Public *[32]byte, publicKey *[32]byte) bool {
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
if !A.FromBytes(publicKey) {
|
||||
return false
|
||||
}
|
||||
|
||||
// A.Z = 1 as a postcondition of FromBytes.
|
||||
var x edwards25519.FieldElement
|
||||
edwardsToMontgomeryX(&x, &A.Y)
|
||||
edwards25519.FeToBytes(curve25519Public, &x)
|
||||
return true
|
||||
}
|
||||
|
||||
// sqrtMinusA is sqrt(-486662)
|
||||
var sqrtMinusA = edwards25519.FieldElement{
|
||||
12222970, 8312128, 11511410, -9067497, 15300785, 241793, -25456130, -14121551, 12187136, -3972024,
|
||||
}
|
||||
|
||||
// sqrtMinusHalf is sqrt(-1/2)
|
||||
var sqrtMinusHalf = edwards25519.FieldElement{
|
||||
-17256545, 3971863, 28865457, -1750208, 27359696, -16640980, 12573105, 1002827, -163343, 11073975,
|
||||
}
|
||||
|
||||
// halfQMinus1Bytes is (2^255-20)/2 expressed in little endian form.
|
||||
var halfQMinus1Bytes = [32]byte{
|
||||
0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f,
|
||||
}
|
||||
|
||||
// feBytesLess returns one if a <= b and zero otherwise.
|
||||
func feBytesLE(a, b *[32]byte) int32 {
|
||||
equalSoFar := int32(-1)
|
||||
greater := int32(0)
|
||||
|
||||
for i := uint(31); i < 32; i-- {
|
||||
x := int32(a[i])
|
||||
y := int32(b[i])
|
||||
|
||||
greater = (^equalSoFar & greater) | (equalSoFar & ((x - y) >> 31))
|
||||
equalSoFar = equalSoFar & (((x ^ y) - 1) >> 31)
|
||||
}
|
||||
|
||||
return int32(^equalSoFar & 1 & greater)
|
||||
}
|
||||
|
||||
// ScalarBaseMult computes a curve25519 public key from a private key and also
|
||||
// a uniform representative for that public key. Note that this function will
|
||||
// fail and return false for about half of private keys.
|
||||
// See http://elligator.cr.yp.to/elligator-20130828.pdf.
|
||||
func ScalarBaseMult(publicKey, representative, privateKey *[32]byte) bool {
|
||||
var maskedPrivateKey [32]byte
|
||||
copy(maskedPrivateKey[:], privateKey[:])
|
||||
|
||||
maskedPrivateKey[0] &= 248
|
||||
maskedPrivateKey[31] &= 127
|
||||
maskedPrivateKey[31] |= 64
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&A, &maskedPrivateKey)
|
||||
|
||||
var inv1 edwards25519.FieldElement
|
||||
edwards25519.FeSub(&inv1, &A.Z, &A.Y)
|
||||
edwards25519.FeMul(&inv1, &inv1, &A.X)
|
||||
edwards25519.FeInvert(&inv1, &inv1)
|
||||
|
||||
var t0, u edwards25519.FieldElement
|
||||
edwards25519.FeMul(&u, &inv1, &A.X)
|
||||
edwards25519.FeAdd(&t0, &A.Y, &A.Z)
|
||||
edwards25519.FeMul(&u, &u, &t0)
|
||||
|
||||
var v edwards25519.FieldElement
|
||||
edwards25519.FeMul(&v, &t0, &inv1)
|
||||
edwards25519.FeMul(&v, &v, &A.Z)
|
||||
edwards25519.FeMul(&v, &v, &sqrtMinusA)
|
||||
|
||||
var b edwards25519.FieldElement
|
||||
edwards25519.FeAdd(&b, &u, &edwards25519.A)
|
||||
|
||||
var c, b3, b8 edwards25519.FieldElement
|
||||
edwards25519.FeSquare(&b3, &b) // 2
|
||||
edwards25519.FeMul(&b3, &b3, &b) // 3
|
||||
edwards25519.FeSquare(&c, &b3) // 6
|
||||
edwards25519.FeMul(&c, &c, &b) // 7
|
||||
edwards25519.FeMul(&b8, &c, &b) // 8
|
||||
edwards25519.FeMul(&c, &c, &u)
|
||||
q58(&c, &c)
|
||||
|
||||
var chi edwards25519.FieldElement
|
||||
edwards25519.FeSquare(&chi, &c)
|
||||
edwards25519.FeSquare(&chi, &chi)
|
||||
|
||||
edwards25519.FeSquare(&t0, &u)
|
||||
edwards25519.FeMul(&chi, &chi, &t0)
|
||||
|
||||
edwards25519.FeSquare(&t0, &b) // 2
|
||||
edwards25519.FeMul(&t0, &t0, &b) // 3
|
||||
edwards25519.FeSquare(&t0, &t0) // 6
|
||||
edwards25519.FeMul(&t0, &t0, &b) // 7
|
||||
edwards25519.FeSquare(&t0, &t0) // 14
|
||||
edwards25519.FeMul(&chi, &chi, &t0)
|
||||
edwards25519.FeNeg(&chi, &chi)
|
||||
|
||||
var chiBytes [32]byte
|
||||
edwards25519.FeToBytes(&chiBytes, &chi)
|
||||
// chi[1] is either 0 or 0xff
|
||||
if chiBytes[1] == 0xff {
|
||||
return false
|
||||
}
|
||||
|
||||
// Calculate r1 = sqrt(-u/(2*(u+A)))
|
||||
var r1 edwards25519.FieldElement
|
||||
edwards25519.FeMul(&r1, &c, &u)
|
||||
edwards25519.FeMul(&r1, &r1, &b3)
|
||||
edwards25519.FeMul(&r1, &r1, &sqrtMinusHalf)
|
||||
|
||||
var maybeSqrtM1 edwards25519.FieldElement
|
||||
edwards25519.FeSquare(&t0, &r1)
|
||||
edwards25519.FeMul(&t0, &t0, &b)
|
||||
edwards25519.FeAdd(&t0, &t0, &t0)
|
||||
edwards25519.FeAdd(&t0, &t0, &u)
|
||||
|
||||
edwards25519.FeOne(&maybeSqrtM1)
|
||||
edwards25519.FeCMove(&maybeSqrtM1, &edwards25519.SqrtM1, edwards25519.FeIsNonZero(&t0))
|
||||
edwards25519.FeMul(&r1, &r1, &maybeSqrtM1)
|
||||
|
||||
// Calculate r = sqrt(-(u+A)/(2u))
|
||||
var r edwards25519.FieldElement
|
||||
edwards25519.FeSquare(&t0, &c) // 2
|
||||
edwards25519.FeMul(&t0, &t0, &c) // 3
|
||||
edwards25519.FeSquare(&t0, &t0) // 6
|
||||
edwards25519.FeMul(&r, &t0, &c) // 7
|
||||
|
||||
edwards25519.FeSquare(&t0, &u) // 2
|
||||
edwards25519.FeMul(&t0, &t0, &u) // 3
|
||||
edwards25519.FeMul(&r, &r, &t0)
|
||||
|
||||
edwards25519.FeSquare(&t0, &b8) // 16
|
||||
edwards25519.FeMul(&t0, &t0, &b8) // 24
|
||||
edwards25519.FeMul(&t0, &t0, &b) // 25
|
||||
edwards25519.FeMul(&r, &r, &t0)
|
||||
edwards25519.FeMul(&r, &r, &sqrtMinusHalf)
|
||||
|
||||
edwards25519.FeSquare(&t0, &r)
|
||||
edwards25519.FeMul(&t0, &t0, &u)
|
||||
edwards25519.FeAdd(&t0, &t0, &t0)
|
||||
edwards25519.FeAdd(&t0, &t0, &b)
|
||||
edwards25519.FeOne(&maybeSqrtM1)
|
||||
edwards25519.FeCMove(&maybeSqrtM1, &edwards25519.SqrtM1, edwards25519.FeIsNonZero(&t0))
|
||||
edwards25519.FeMul(&r, &r, &maybeSqrtM1)
|
||||
|
||||
var vBytes [32]byte
|
||||
edwards25519.FeToBytes(&vBytes, &v)
|
||||
vInSquareRootImage := feBytesLE(&vBytes, &halfQMinus1Bytes)
|
||||
edwards25519.FeCMove(&r, &r1, vInSquareRootImage)
|
||||
|
||||
edwards25519.FeToBytes(publicKey, &u)
|
||||
edwards25519.FeToBytes(representative, &r)
|
||||
return true
|
||||
}
|
||||
|
||||
// q58 calculates out = z^((p-5)/8).
|
||||
func q58(out, z *edwards25519.FieldElement) {
|
||||
var t1, t2, t3 edwards25519.FieldElement
|
||||
var i int
|
||||
|
||||
edwards25519.FeSquare(&t1, z) // 2^1
|
||||
edwards25519.FeMul(&t1, &t1, z) // 2^1 + 2^0
|
||||
edwards25519.FeSquare(&t1, &t1) // 2^2 + 2^1
|
||||
edwards25519.FeSquare(&t2, &t1) // 2^3 + 2^2
|
||||
edwards25519.FeSquare(&t2, &t2) // 2^4 + 2^3
|
||||
edwards25519.FeMul(&t2, &t2, &t1) // 4,3,2,1
|
||||
edwards25519.FeMul(&t1, &t2, z) // 4..0
|
||||
edwards25519.FeSquare(&t2, &t1) // 5..1
|
||||
for i = 1; i < 5; i++ { // 9,8,7,6,5
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0
|
||||
edwards25519.FeSquare(&t2, &t1) // 10..1
|
||||
for i = 1; i < 10; i++ { // 19..10
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t2, &t1) // 19..0
|
||||
edwards25519.FeSquare(&t3, &t2) // 20..1
|
||||
for i = 1; i < 20; i++ { // 39..20
|
||||
edwards25519.FeSquare(&t3, &t3)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t3, &t2) // 39..0
|
||||
edwards25519.FeSquare(&t2, &t2) // 40..1
|
||||
for i = 1; i < 10; i++ { // 49..10
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t1, &t2, &t1) // 49..0
|
||||
edwards25519.FeSquare(&t2, &t1) // 50..1
|
||||
for i = 1; i < 50; i++ { // 99..50
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t2, &t1) // 99..0
|
||||
edwards25519.FeSquare(&t3, &t2) // 100..1
|
||||
for i = 1; i < 100; i++ { // 199..100
|
||||
edwards25519.FeSquare(&t3, &t3)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t3, &t2) // 199..0
|
||||
edwards25519.FeSquare(&t2, &t2) // 200..1
|
||||
for i = 1; i < 50; i++ { // 249..50
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t1, &t2, &t1) // 249..0
|
||||
edwards25519.FeSquare(&t1, &t1) // 250..1
|
||||
edwards25519.FeSquare(&t1, &t1) // 251..2
|
||||
edwards25519.FeMul(out, &t1, z) // 251..2,0
|
||||
}
|
||||
|
||||
// chi calculates out = z^((p-1)/2). The result is either 1, 0, or -1 depending
|
||||
// on whether z is a non-zero square, zero, or a non-square.
|
||||
func chi(out, z *edwards25519.FieldElement) {
|
||||
var t0, t1, t2, t3 edwards25519.FieldElement
|
||||
var i int
|
||||
|
||||
edwards25519.FeSquare(&t0, z) // 2^1
|
||||
edwards25519.FeMul(&t1, &t0, z) // 2^1 + 2^0
|
||||
edwards25519.FeSquare(&t0, &t1) // 2^2 + 2^1
|
||||
edwards25519.FeSquare(&t2, &t0) // 2^3 + 2^2
|
||||
edwards25519.FeSquare(&t2, &t2) // 4,3
|
||||
edwards25519.FeMul(&t2, &t2, &t0) // 4,3,2,1
|
||||
edwards25519.FeMul(&t1, &t2, z) // 4..0
|
||||
edwards25519.FeSquare(&t2, &t1) // 5..1
|
||||
for i = 1; i < 5; i++ { // 9,8,7,6,5
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0
|
||||
edwards25519.FeSquare(&t2, &t1) // 10..1
|
||||
for i = 1; i < 10; i++ { // 19..10
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t2, &t1) // 19..0
|
||||
edwards25519.FeSquare(&t3, &t2) // 20..1
|
||||
for i = 1; i < 20; i++ { // 39..20
|
||||
edwards25519.FeSquare(&t3, &t3)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t3, &t2) // 39..0
|
||||
edwards25519.FeSquare(&t2, &t2) // 40..1
|
||||
for i = 1; i < 10; i++ { // 49..10
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t1, &t2, &t1) // 49..0
|
||||
edwards25519.FeSquare(&t2, &t1) // 50..1
|
||||
for i = 1; i < 50; i++ { // 99..50
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t2, &t1) // 99..0
|
||||
edwards25519.FeSquare(&t3, &t2) // 100..1
|
||||
for i = 1; i < 100; i++ { // 199..100
|
||||
edwards25519.FeSquare(&t3, &t3)
|
||||
}
|
||||
edwards25519.FeMul(&t2, &t3, &t2) // 199..0
|
||||
edwards25519.FeSquare(&t2, &t2) // 200..1
|
||||
for i = 1; i < 50; i++ { // 249..50
|
||||
edwards25519.FeSquare(&t2, &t2)
|
||||
}
|
||||
edwards25519.FeMul(&t1, &t2, &t1) // 249..0
|
||||
edwards25519.FeSquare(&t1, &t1) // 250..1
|
||||
for i = 1; i < 4; i++ { // 253..4
|
||||
edwards25519.FeSquare(&t1, &t1)
|
||||
}
|
||||
edwards25519.FeMul(out, &t1, &t0) // 253..4,2,1
|
||||
}
|
||||
|
||||
// RepresentativeToPublicKey converts a uniform representative value for a
|
||||
// curve25519 public key, as produced by ScalarBaseMult, to a curve25519 public
|
||||
// key.
|
||||
func RepresentativeToPublicKey(publicKey, representative *[32]byte) {
|
||||
var rr2, v, e edwards25519.FieldElement
|
||||
edwards25519.FeFromBytes(&rr2, representative)
|
||||
|
||||
edwards25519.FeSquare2(&rr2, &rr2)
|
||||
rr2[0]++
|
||||
edwards25519.FeInvert(&rr2, &rr2)
|
||||
edwards25519.FeMul(&v, &edwards25519.A, &rr2)
|
||||
edwards25519.FeNeg(&v, &v)
|
||||
|
||||
var v2, v3 edwards25519.FieldElement
|
||||
edwards25519.FeSquare(&v2, &v)
|
||||
edwards25519.FeMul(&v3, &v, &v2)
|
||||
edwards25519.FeAdd(&e, &v3, &v)
|
||||
edwards25519.FeMul(&v2, &v2, &edwards25519.A)
|
||||
edwards25519.FeAdd(&e, &v2, &e)
|
||||
chi(&e, &e)
|
||||
var eBytes [32]byte
|
||||
edwards25519.FeToBytes(&eBytes, &e)
|
||||
// eBytes[1] is either 0 (for e = 1) or 0xff (for e = -1)
|
||||
eIsMinus1 := int32(eBytes[1]) & 1
|
||||
var negV edwards25519.FieldElement
|
||||
edwards25519.FeNeg(&negV, &v)
|
||||
edwards25519.FeCMove(&v, &negV, eIsMinus1)
|
||||
|
||||
edwards25519.FeZero(&v2)
|
||||
edwards25519.FeCMove(&v2, &edwards25519.A, eIsMinus1)
|
||||
edwards25519.FeSub(&v, &v, &v2)
|
||||
|
||||
edwards25519.FeToBytes(publicKey, &v)
|
||||
}
|
78
Godeps/_workspace/src/github.com/agl/ed25519/extra25519/extra25519_test.go
generated
vendored
Normal file
78
Godeps/_workspace/src/github.com/agl/ed25519/extra25519/extra25519_test.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package extra25519
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/go.crypto/curve25519"
|
||||
"github.com/agl/ed25519"
|
||||
)
|
||||
|
||||
func TestCurve25519Conversion(t *testing.T) {
|
||||
public, private, _ := ed25519.GenerateKey(rand.Reader)
|
||||
|
||||
var curve25519Public, curve25519Public2, curve25519Private [32]byte
|
||||
PrivateKeyToCurve25519(&curve25519Private, private)
|
||||
curve25519.ScalarBaseMult(&curve25519Public, &curve25519Private)
|
||||
|
||||
if !PublicKeyToCurve25519(&curve25519Public2, public) {
|
||||
t.Fatalf("PublicKeyToCurve25519 failed")
|
||||
}
|
||||
|
||||
if !bytes.Equal(curve25519Public[:], curve25519Public2[:]) {
|
||||
t.Errorf("Values didn't match: curve25519 produced %x, conversion produced %x", curve25519Public[:], curve25519Public2[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestElligator(t *testing.T) {
|
||||
var publicKey, publicKey2, publicKey3, representative, privateKey [32]byte
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
rand.Reader.Read(privateKey[:])
|
||||
|
||||
if !ScalarBaseMult(&publicKey, &representative, &privateKey) {
|
||||
continue
|
||||
}
|
||||
RepresentativeToPublicKey(&publicKey2, &representative)
|
||||
if !bytes.Equal(publicKey[:], publicKey2[:]) {
|
||||
t.Fatal("The resulting public key doesn't match the initial one.")
|
||||
}
|
||||
|
||||
curve25519.ScalarBaseMult(&publicKey3, &privateKey)
|
||||
if !bytes.Equal(publicKey[:], publicKey3[:]) {
|
||||
t.Fatal("The public key doesn't match the value that curve25519 produced.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkKeyGeneration(b *testing.B) {
|
||||
var publicKey, representative, privateKey [32]byte
|
||||
|
||||
// Find the private key that results in a point that's in the image of the map.
|
||||
for {
|
||||
rand.Reader.Read(privateKey[:])
|
||||
if ScalarBaseMult(&publicKey, &representative, &privateKey) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ScalarBaseMult(&publicKey, &representative, &privateKey);
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMap(b *testing.B) {
|
||||
var publicKey, representative [32]byte
|
||||
rand.Reader.Read(representative[:])
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
RepresentativeToPublicKey(&publicKey, &representative);
|
||||
}
|
||||
}
|
BIN
Godeps/_workspace/src/github.com/agl/ed25519/testdata/sign.input.gz
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/agl/ed25519/testdata/sign.input.gz
generated
vendored
Normal file
Binary file not shown.
|
@ -0,0 +1 @@
|
|||
/db/
|
|
@ -0,0 +1,30 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
sudo: false
|
||||
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
|
||||
script:
|
||||
- go test -race -cover ./...
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- "chat.freenode.net#flynn"
|
||||
use_notice: true
|
||||
skip_join: true
|
||||
on_success: change
|
||||
on_failure: always
|
||||
template:
|
||||
- "%{repository}/%{branch} - %{commit}: %{message} %{build_url}"
|
||||
email:
|
||||
on_success: never
|
||||
on_failure: always
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
60
Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Godeps.json
generated
vendored
Normal file
60
Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Godeps.json
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"ImportPath": "github.com/flynn/go-tuf",
|
||||
"GoVersion": "go1.4.1",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/agl/ed25519",
|
||||
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-19-g00c6357",
|
||||
"Rev": "00c635718fa0592764453e60194451889876eea0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term",
|
||||
"Comment": "v1.4.1-775-g70fbd45",
|
||||
"Rev": "70fbd45a5c88f6f39a07b04f81a07721bf5f3eed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dustin/go-humanize",
|
||||
"Rev": "145fabdb1ab757076a70a886d092a3af27f66f4c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/flynn/go-docopt",
|
||||
"Comment": "0.6.1-rc2-26-gf6dd2eb",
|
||||
"Rev": "f6dd2ebbb31e9721c860cf1faf5c944aa73e3844"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tent/canonical-json-go",
|
||||
"Rev": "96e4ba3a7613a1216cbd1badca4efe382adea337"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/poly1305",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -0,0 +1,29 @@
|
|||
Flynn is a trademark of Prime Directive, Inc.
|
||||
|
||||
Copyright (c) 2014-2015 Prime Directive, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Prime Directive, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,2 @@
|
|||
Jonathan Rudenberg <jonathan@flynn.io> (github: titanous)
|
||||
Lewis Marshall <lewis@flynn.io> (github: lmars)
|
|
@ -0,0 +1,511 @@
|
|||
# go-tuf [](https://travis-ci.org/flynn/go-tuf)
|
||||
|
||||
This is a Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/),
|
||||
a framework for securing software update systems.
|
||||
|
||||
## Directory layout
|
||||
|
||||
A TUF repository has the following directory layout:
|
||||
|
||||
```
|
||||
.
|
||||
├── keys
|
||||
├── repository
|
||||
│ └── targets
|
||||
└── staged
|
||||
└── targets
|
||||
```
|
||||
|
||||
The directories contain the following files:
|
||||
|
||||
* `keys/` - signing keys (optionally encrypted) with filename pattern `ROLE.json`
|
||||
* `repository/` - signed manifests
|
||||
* `repository/targets/` - hashed target files
|
||||
* `staged/` - either signed, unsigned or partially signed manifests
|
||||
* `staged/targets/` - unhashed target files
|
||||
|
||||
## CLI
|
||||
|
||||
`go-tuf` provides a CLI for managing a local TUF repository.
|
||||
|
||||
### Install
|
||||
|
||||
```
|
||||
go get github.com/flynn/go-tuf/cmd/tuf
|
||||
```
|
||||
|
||||
### Commands
|
||||
|
||||
#### `tuf init [--consistent-snapshot=false]`
|
||||
|
||||
Initializes a new repository.
|
||||
|
||||
This is only required if the repository should not generate consistent
|
||||
snapshots (i.e. by passing `--consistent-snapshot=false`). If consistent
|
||||
snapshots should be generated, the repository will be implicitly
|
||||
initialized to do so when generating keys.
|
||||
|
||||
#### `tuf gen-key <role>`
|
||||
|
||||
Prompts the user for an encryption passphrase (unless the
|
||||
`--insecure-plaintext` flag is set), then generates a new signing key and
|
||||
writes it to the relevant key file in the `keys` directory. It also stages
|
||||
the addition of the new key to the `root` manifest.
|
||||
|
||||
#### `tuf add [<path>...]`
|
||||
|
||||
Hashes files in the `staged/targets` directory at the given path(s), then
|
||||
updates and stages the `targets` manifest. Specifying no paths hashes all
|
||||
files in the `staged/targets` directory.
|
||||
|
||||
#### `tuf remove [<path>...]`
|
||||
|
||||
Stages the removal of files with the given path(s) from the `targets` manifest
|
||||
(they get removed from the filesystem when the change is committed). Specifying
|
||||
no paths removes all files from the `targets` manifest.
|
||||
|
||||
#### `tuf snapshot [--compression=<format>]`
|
||||
|
||||
Expects a staged, fully signed `targets` manifest and stages an appropriate
|
||||
`snapshot` manifest. It optionally compresses the staged `targets` manifest.
|
||||
|
||||
#### `tuf timestamp`
|
||||
|
||||
Stages an appropriate `timestamp` manifest. If a `snapshot` manifest is staged,
|
||||
it must be fully signed.
|
||||
|
||||
#### `tuf sign ROLE`
|
||||
|
||||
Signs the given role's staged manifest with all keys present in the `keys`
|
||||
directory for that role.
|
||||
|
||||
#### `tuf commit`
|
||||
|
||||
Verifies that all staged changes contain the correct information and are signed
|
||||
to the correct threshold, then moves the staged files into the `repository`
|
||||
directory. It also removes any target files which are not in the `targets`
|
||||
manifest.
|
||||
|
||||
#### `tuf regenerate [--consistent-snapshot=false]`
|
||||
|
||||
Recreates the `targets` manifest based on the files in `repository/targets`.
|
||||
|
||||
#### `tuf clean`
|
||||
|
||||
Removes all staged manifests and targets.
|
||||
|
||||
#### `tuf root-keys`
|
||||
|
||||
Outputs a JSON serialized array of root keys to STDOUT. The resulting JSON
|
||||
should be distributed to clients for performing initial updates.
|
||||
|
||||
For a list of supported commands, run `tuf help` from the command line.
|
||||
|
||||
### Examples
|
||||
|
||||
The following are example workflows for managing a TUF repository with the CLI.
|
||||
|
||||
The `tree` commands do not need to be run, but their output serve as an
|
||||
illustration of what files should exist after performing certain commands.
|
||||
|
||||
Although only two machines are referenced (i.e. the "root" and "repo" boxes),
|
||||
the workflows can be trivially extended to many signing machines by copying
|
||||
staged changes and signing on each machine in turn before finally committing.
|
||||
|
||||
Some key IDs are truncated for illustrative purposes.
|
||||
|
||||
#### Create signed root manifest
|
||||
|
||||
Generate a root key on the root box:
|
||||
|
||||
```
|
||||
$ tuf gen-key root
|
||||
Enter root keys passphrase:
|
||||
Repeat root keys passphrase:
|
||||
Generated root key with ID 184b133f
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── root.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
```
|
||||
|
||||
Copy `staged/root.json` from the root box to the repo box and generate targets,
|
||||
snapshot and timestamp keys:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
|
||||
$ tuf gen-key targets
|
||||
Enter targets keys passphrase:
|
||||
Repeat targets keys passphrase:
|
||||
Generated targets key with ID 8cf4810c
|
||||
|
||||
$ tuf gen-key snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
Repeat snapshot keys passphrase:
|
||||
Generated snapshot key with ID 3e070e53
|
||||
|
||||
$ tuf gen-key timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
Repeat timestamp keys passphrase:
|
||||
Generated timestamp key with ID a3768063
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
```
|
||||
|
||||
Copy `staged/root.json` from the repo box back to the root box and sign it:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── root.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
|
||||
$ tuf sign root.json
|
||||
Enter root keys passphrase:
|
||||
```
|
||||
|
||||
The staged `root.json` can now be copied back to the repo box ready to be
|
||||
committed alongside other manifests.
|
||||
|
||||
#### Add a target file
|
||||
|
||||
Assuming a staged, signed `root` manifest and the file to add exists at
|
||||
`staged/targets/foo/bar/baz.txt`:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
└── foo
|
||||
└── bar
|
||||
└── baz.txt
|
||||
|
||||
$ tuf add foo/bar/baz.txt
|
||||
Enter targets keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
├── targets
|
||||
│ └── foo
|
||||
│ └── bar
|
||||
│ └── baz.txt
|
||||
└── targets.json
|
||||
|
||||
$ tuf snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
├── snapshot.json
|
||||
├── targets
|
||||
│ └── foo
|
||||
│ └── bar
|
||||
│ └── baz.txt
|
||||
├── targets.json
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Remove a target file
|
||||
|
||||
Assuming the file to remove is at `repository/targets/foo/bar/baz.txt`:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
|
||||
$ tuf remove foo/bar/baz.txt
|
||||
Enter targets keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
└── targets.json
|
||||
|
||||
$ tuf snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
├── snapshot.json
|
||||
├── targets.json
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Regenerate manifests based on targets tree
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
|
||||
$ tuf regenerate
|
||||
Enter targets keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
└── targets.json
|
||||
|
||||
$ tuf snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
├── snapshot.json
|
||||
├── targets.json
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Update timestamp.json
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Modify key thresholds
|
||||
|
||||
TODO
|
||||
|
||||
## Client
|
||||
|
||||
For the client package, see https://godoc.org/github.com/flynn/go-tuf/client.
|
||||
|
||||
For the client CLI, see https://github.com/flynn/go-tuf/tree/master/cmd/tuf-client.
|
599
Godeps/_workspace/src/github.com/endophage/go-tuf/client/client.go
generated
vendored
Normal file
599
Godeps/_workspace/src/github.com/endophage/go-tuf/client/client.go
generated
vendored
Normal file
|
@ -0,0 +1,599 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/flynn/go-tuf/data"
|
||||
"github.com/flynn/go-tuf/keys"
|
||||
"github.com/flynn/go-tuf/signed"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
)
|
||||
|
||||
// LocalStore is local storage for downloaded top-level metadata.
|
||||
type LocalStore interface {
|
||||
// GetMeta returns top-level metadata from local storage. The keys are
|
||||
// in the form `ROLE.json`, with ROLE being a valid top-level role.
|
||||
GetMeta() (map[string]json.RawMessage, error)
|
||||
|
||||
// SetMeta persists the given top-level metadata in local storage, the
|
||||
// name taking the same format as the keys returned by GetMeta.
|
||||
SetMeta(name string, meta json.RawMessage) error
|
||||
}
|
||||
|
||||
// RemoteStore downloads top-level metadata and target files from a remote
|
||||
// repository.
|
||||
type RemoteStore interface {
|
||||
// GetMeta downloads the given metadata from remote storage.
|
||||
//
|
||||
// `name` is the filename of the metadata (e.g. "root.json")
|
||||
//
|
||||
// `err` is ErrNotFound if the given file does not exist.
|
||||
//
|
||||
// `size` is the size of the stream, -1 indicating an unknown length.
|
||||
GetMeta(name string) (stream io.ReadCloser, size int64, err error)
|
||||
|
||||
// GetTarget downloads the given target file from remote storage.
|
||||
//
|
||||
// `path` is the path of the file relative to the root of the remote
|
||||
// targets directory (e.g. "/path/to/file.txt").
|
||||
//
|
||||
// `err` is ErrNotFound if the given file does not exist.
|
||||
//
|
||||
// `size` is the size of the stream, -1 indicating an unknown length.
|
||||
GetTarget(path string) (stream io.ReadCloser, size int64, err error)
|
||||
}
|
||||
|
||||
// Client provides methods for fetching updates from a remote repository and
|
||||
// downloading remote target files.
|
||||
type Client struct {
|
||||
local LocalStore
|
||||
remote RemoteStore
|
||||
|
||||
// The following four fields represent the versions of metatdata either
|
||||
// from local storage or from recently downloaded metadata
|
||||
rootVer int
|
||||
targetsVer int
|
||||
snapshotVer int
|
||||
timestampVer int
|
||||
|
||||
// targets is the list of available targets, either from local storage
|
||||
// or from recently downloaded targets metadata
|
||||
targets data.Files
|
||||
|
||||
// localMeta is the raw metadata from local storage and is used to
|
||||
// check whether remote metadata is present locally
|
||||
localMeta map[string]json.RawMessage
|
||||
|
||||
// db is a key DB used for verifying metadata
|
||||
db *keys.DB
|
||||
|
||||
// consistentSnapshot indicates whether the remote storage is using
|
||||
// consistent snapshots (as specified in root.json)
|
||||
consistentSnapshot bool
|
||||
}
|
||||
|
||||
func NewClient(local LocalStore, remote RemoteStore) *Client {
|
||||
return &Client{
|
||||
local: local,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes a local repository.
|
||||
//
|
||||
// The latest root.json is fetched from remote storage, verified using rootKeys
|
||||
// and threshold, and then saved in local storage. It is expected that rootKeys
|
||||
// were securely distributed with the software being updated.
|
||||
func (c *Client) Init(rootKeys []*data.Key, threshold int) error {
|
||||
if len(rootKeys) < threshold {
|
||||
return ErrInsufficientKeys
|
||||
}
|
||||
rootJSON, err := c.downloadMetaUnsafe("root.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.db = keys.NewDB()
|
||||
rootKeyIDs := make([]string, len(rootKeys))
|
||||
for i, key := range rootKeys {
|
||||
id := key.ID()
|
||||
rootKeyIDs[i] = id
|
||||
if err := c.db.AddKey(id, key); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
role := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs}
|
||||
if err := c.db.AddRole("root", role); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.decodeRoot(rootJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.local.SetMeta("root.json", rootJSON)
|
||||
}
|
||||
|
||||
// Update downloads and verifies remote metadata and returns updated targets.
|
||||
//
|
||||
// It performs the update part of "The client application" workflow from
|
||||
// section 5.1 of the TUF spec:
|
||||
//
|
||||
// https://github.com/theupdateframework/tuf/blob/v0.9.9/docs/tuf-spec.txt#L714
|
||||
func (c *Client) Update() (data.Files, error) {
|
||||
return c.update(false)
|
||||
}
|
||||
|
||||
func (c *Client) update(latestRoot bool) (data.Files, error) {
|
||||
// Always start the update using local metadata
|
||||
if err := c.getLocalMeta(); err != nil {
|
||||
if _, ok := err.(signed.ErrExpired); ok {
|
||||
if !latestRoot {
|
||||
return c.updateWithLatestRoot(nil)
|
||||
}
|
||||
// this should not be reached as if the latest root has
|
||||
// been downloaded and it is expired, updateWithLatestRoot
|
||||
// should not have continued the update
|
||||
return nil, err
|
||||
}
|
||||
if latestRoot && err == signed.ErrRoleThreshold {
|
||||
// Root was updated with new keys, so our local metadata is no
|
||||
// longer validating. Read only the versions from the local metadata
|
||||
// and re-download everything.
|
||||
if err := c.getRootAndLocalVersionsUnsafe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get timestamp.json, extract snapshot.json file meta and save the
|
||||
// timestamp.json locally
|
||||
timestampJSON, err := c.downloadMetaUnsafe("timestamp.json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshotMeta, err := c.decodeTimestamp(timestampJSON)
|
||||
if err != nil {
|
||||
// ErrRoleThreshold could indicate timestamp keys have been
|
||||
// revoked, so retry with the latest root.json
|
||||
if isDecodeFailedWithErr(err, signed.ErrRoleThreshold) && !latestRoot {
|
||||
return c.updateWithLatestRoot(nil)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if err := c.local.SetMeta("timestamp.json", timestampJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return ErrLatestSnapshot if we already have the latest snapshot.json
|
||||
if c.hasMeta("snapshot.json", snapshotMeta) {
|
||||
return nil, ErrLatestSnapshot{c.snapshotVer}
|
||||
}
|
||||
|
||||
// Get snapshot.json, then extract root.json and targets.json file meta.
|
||||
//
|
||||
// The snapshot.json is only saved locally after checking root.json and
|
||||
// targets.json so that it will be re-downloaded on subsequent updates
|
||||
// if this update fails.
|
||||
snapshotJSON, err := c.downloadMeta("snapshot.json", snapshotMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootMeta, targetsMeta, err := c.decodeSnapshot(snapshotJSON)
|
||||
if err != nil {
|
||||
// ErrRoleThreshold could indicate snapshot keys have been
|
||||
// revoked, so retry with the latest root.json
|
||||
if isDecodeFailedWithErr(err, signed.ErrRoleThreshold) && !latestRoot {
|
||||
return c.updateWithLatestRoot(nil)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we don't have the root.json, download it, save it in local
|
||||
// storage and restart the update
|
||||
if !c.hasMeta("root.json", rootMeta) {
|
||||
return c.updateWithLatestRoot(&rootMeta)
|
||||
}
|
||||
|
||||
// If we don't have the targets.json, download it, determine updated
|
||||
// targets and save targets.json in local storage
|
||||
var updatedTargets data.Files
|
||||
if !c.hasMeta("targets.json", targetsMeta) {
|
||||
targetsJSON, err := c.downloadMeta("targets.json", targetsMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedTargets, err = c.decodeTargets(targetsJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.local.SetMeta("targets.json", targetsJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Save the snapshot.json now it has been processed successfully
|
||||
if err := c.local.SetMeta("snapshot.json", snapshotJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return updatedTargets, nil
|
||||
}
|
||||
|
||||
func (c *Client) updateWithLatestRoot(m *data.FileMeta) (data.Files, error) {
|
||||
var rootJSON json.RawMessage
|
||||
var err error
|
||||
if m == nil {
|
||||
rootJSON, err = c.downloadMetaUnsafe("root.json")
|
||||
} else {
|
||||
rootJSON, err = c.downloadMeta("root.json", *m)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.decodeRoot(rootJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.local.SetMeta("root.json", rootJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.update(true)
|
||||
}
|
||||
|
||||
// getLocalMeta decodes and verifies metadata from local storage.
|
||||
//
|
||||
// The verification of local files is purely for consistency, if an attacker
|
||||
// has compromised the local storage, there is no guarantee it can be trusted.
|
||||
func (c *Client) getLocalMeta() error {
|
||||
meta, err := c.local.GetMeta()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rootJSON, ok := meta["root.json"]; ok {
|
||||
// unmarshal root.json without verifying as we need the root
|
||||
// keys first
|
||||
s := &data.Signed{}
|
||||
if err := json.Unmarshal(rootJSON, s); err != nil {
|
||||
return err
|
||||
}
|
||||
root := &data.Root{}
|
||||
if err := json.Unmarshal(s.Signed, root); err != nil {
|
||||
return err
|
||||
}
|
||||
db := keys.NewDB()
|
||||
for id, k := range root.Keys {
|
||||
if err := db.AddKey(id, k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for name, role := range root.Roles {
|
||||
if err := db.AddRole(name, role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := signed.Verify(s, "root", 0, db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.consistentSnapshot = root.ConsistentSnapshot
|
||||
c.db = db
|
||||
} else {
|
||||
return ErrNoRootKeys
|
||||
}
|
||||
|
||||
if snapshotJSON, ok := meta["snapshot.json"]; ok {
|
||||
snapshot := &data.Snapshot{}
|
||||
if err := signed.UnmarshalTrusted(snapshotJSON, snapshot, "snapshot", c.db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.snapshotVer = snapshot.Version
|
||||
}
|
||||
|
||||
if targetsJSON, ok := meta["targets.json"]; ok {
|
||||
targets := &data.Targets{}
|
||||
if err := signed.UnmarshalTrusted(targetsJSON, targets, "targets", c.db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.targetsVer = targets.Version
|
||||
c.targets = targets.Targets
|
||||
}
|
||||
|
||||
if timestampJSON, ok := meta["timestamp.json"]; ok {
|
||||
timestamp := &data.Timestamp{}
|
||||
if err := signed.UnmarshalTrusted(timestampJSON, timestamp, "timestamp", c.db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.timestampVer = timestamp.Version
|
||||
}
|
||||
|
||||
c.localMeta = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxMetaSize is the maximum number of bytes that will be downloaded when
|
||||
// getting remote metadata without knowing it's length.
|
||||
const maxMetaSize = 50 * 1024
|
||||
|
||||
// downloadMetaUnsafe downloads top-level metadata from remote storage without
|
||||
// verifying it's length and hashes (used for example to download timestamp.json
|
||||
// which has unknown size). It will download at most maxMetaSize bytes.
|
||||
func (c *Client) downloadMetaUnsafe(name string) ([]byte, error) {
|
||||
r, size, err := c.remote.GetMeta(name)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
return nil, ErrMissingRemoteMetadata{name}
|
||||
}
|
||||
return nil, ErrDownloadFailed{name, err}
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// return ErrMetaTooLarge if the reported size is greater than maxMetaSize
|
||||
if size > maxMetaSize {
|
||||
return nil, ErrMetaTooLarge{name, size}
|
||||
}
|
||||
|
||||
// although the size has been checked above, use a LimitReader in case
|
||||
// the reported size is inaccurate, or size is -1 which indicates an
|
||||
// unknown length
|
||||
return ioutil.ReadAll(io.LimitReader(r, maxMetaSize))
|
||||
}
|
||||
|
||||
// getRootAndLocalVersionsUnsafe decodes the versions stored in the local
|
||||
// metadata without verifying signatures to protect against downgrade attacks
|
||||
// when the root is replaced and contains new keys. It also sets the local meta
|
||||
// cache to only contain the local root metadata.
|
||||
func (c *Client) getRootAndLocalVersionsUnsafe() error {
|
||||
type versionData struct {
|
||||
Signed struct {
|
||||
Version int
|
||||
}
|
||||
}
|
||||
|
||||
meta, err := c.local.GetMeta()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getVersion := func(name string) (int, error) {
|
||||
m, ok := meta[name]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
var data versionData
|
||||
if err := json.Unmarshal(m, &data); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return data.Signed.Version, nil
|
||||
}
|
||||
|
||||
c.timestampVer, err = getVersion("timestamp.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.snapshotVer, err = getVersion("snapshot.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.targetsVer, err = getVersion("targets.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
root, ok := meta["root.json"]
|
||||
if !ok {
|
||||
return errors.New("tuf: missing local root after downloading, this should not be possible")
|
||||
}
|
||||
c.localMeta = map[string]json.RawMessage{"root.json": root}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remoteGetFunc is the type of function the download method uses to download
|
||||
// remote files
|
||||
type remoteGetFunc func(string) (io.ReadCloser, int64, error)
|
||||
|
||||
// download downloads the given file from remote storage using the get function,
|
||||
// adding hashes to the path if consistent snapshots are in use
|
||||
func (c *Client) download(file string, get remoteGetFunc, hashes data.Hashes) (io.ReadCloser, int64, error) {
|
||||
if c.consistentSnapshot {
|
||||
// try each hashed path in turn, and either return the contents,
|
||||
// try the next one if a 404 is returned, or return an error
|
||||
for _, path := range util.HashedPaths(file, hashes) {
|
||||
r, size, err := get(path)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return r, size, nil
|
||||
}
|
||||
return nil, 0, ErrNotFound{file}
|
||||
} else {
|
||||
return get(file)
|
||||
}
|
||||
}
|
||||
|
||||
// downloadMeta downloads top-level metadata from remote storage and verifies
|
||||
// it using the given file metadata.
|
||||
func (c *Client) downloadMeta(name string, m data.FileMeta) ([]byte, error) {
|
||||
r, size, err := c.download(name, c.remote.GetMeta, m.Hashes)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
return nil, ErrMissingRemoteMetadata{name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// return ErrWrongSize if the reported size is known and incorrect
|
||||
if size >= 0 && size != m.Length {
|
||||
return nil, ErrWrongSize{name, size, m.Length}
|
||||
}
|
||||
|
||||
// wrap the data in a LimitReader so we download at most m.Length bytes
|
||||
stream := io.LimitReader(r, m.Length)
|
||||
|
||||
// read the data, simultaneously writing it to buf and generating metadata
|
||||
var buf bytes.Buffer
|
||||
meta, err := util.GenerateFileMeta(io.TeeReader(stream, &buf), m.HashAlgorithms()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := util.FileMetaEqual(meta, m); err != nil {
|
||||
return nil, ErrDownloadFailed{name, err}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// decodeRoot decodes and verifies root metadata.
|
||||
func (c *Client) decodeRoot(b json.RawMessage) error {
|
||||
root := &data.Root{}
|
||||
if err := signed.Unmarshal(b, root, "root", c.rootVer, c.db); err != nil {
|
||||
return ErrDecodeFailed{"root.json", err}
|
||||
}
|
||||
c.rootVer = root.Version
|
||||
c.consistentSnapshot = root.ConsistentSnapshot
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeSnapshot decodes and verifies snapshot metadata, and returns the new
|
||||
// root and targets file meta.
|
||||
func (c *Client) decodeSnapshot(b json.RawMessage) (data.FileMeta, data.FileMeta, error) {
|
||||
snapshot := &data.Snapshot{}
|
||||
if err := signed.Unmarshal(b, snapshot, "snapshot", c.snapshotVer, c.db); err != nil {
|
||||
return data.FileMeta{}, data.FileMeta{}, ErrDecodeFailed{"snapshot.json", err}
|
||||
}
|
||||
c.snapshotVer = snapshot.Version
|
||||
return snapshot.Meta["root.json"], snapshot.Meta["targets.json"], nil
|
||||
}
|
||||
|
||||
// decodeTargets decodes and verifies targets metadata, sets c.targets and
|
||||
// returns updated targets.
|
||||
func (c *Client) decodeTargets(b json.RawMessage) (data.Files, error) {
|
||||
targets := &data.Targets{}
|
||||
if err := signed.Unmarshal(b, targets, "targets", c.targetsVer, c.db); err != nil {
|
||||
return nil, ErrDecodeFailed{"targets.json", err}
|
||||
}
|
||||
updatedTargets := make(data.Files)
|
||||
for path, meta := range targets.Targets {
|
||||
if local, ok := c.targets[path]; ok {
|
||||
if err := util.FileMetaEqual(local, meta); err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
updatedTargets[path] = meta
|
||||
}
|
||||
c.targetsVer = targets.Version
|
||||
c.targets = targets.Targets
|
||||
return updatedTargets, nil
|
||||
}
|
||||
|
||||
// decodeTimestamp decodes and verifies timestamp metadata, and returns the
|
||||
// new snapshot file meta.
|
||||
func (c *Client) decodeTimestamp(b json.RawMessage) (data.FileMeta, error) {
|
||||
timestamp := &data.Timestamp{}
|
||||
if err := signed.Unmarshal(b, timestamp, "timestamp", c.timestampVer, c.db); err != nil {
|
||||
return data.FileMeta{}, ErrDecodeFailed{"timestamp.json", err}
|
||||
}
|
||||
c.timestampVer = timestamp.Version
|
||||
return timestamp.Meta["snapshot.json"], nil
|
||||
}
|
||||
|
||||
// hasMeta checks whether local metadata has the given file meta
|
||||
func (c *Client) hasMeta(name string, m data.FileMeta) bool {
|
||||
b, ok := c.localMeta[name]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
err = util.FileMetaEqual(meta, m)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type Destination interface {
|
||||
io.Writer
|
||||
Delete() error
|
||||
}
|
||||
|
||||
// Download downloads the given target file from remote storage into dest.
|
||||
//
|
||||
// dest will be deleted and an error returned in the following situations:
|
||||
//
|
||||
// * The target does not exist in the local targets.json
|
||||
// * The target does not exist in remote storage
|
||||
// * Metadata cannot be generated for the downloaded data
|
||||
// * Generated metadata does not match local metadata for the given file
|
||||
func (c *Client) Download(name string, dest Destination) (err error) {
|
||||
// delete dest if there is an error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
dest.Delete()
|
||||
}
|
||||
}()
|
||||
|
||||
// populate c.targets from local storage if not set
|
||||
if c.targets == nil {
|
||||
if err := c.getLocalMeta(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// return ErrUnknownTarget if the file is not in the local targets.json
|
||||
normalizedName := util.NormalizeTarget(name)
|
||||
localMeta, ok := c.targets[normalizedName]
|
||||
if !ok {
|
||||
return ErrUnknownTarget{name}
|
||||
}
|
||||
|
||||
// get the data from remote storage
|
||||
r, size, err := c.download(normalizedName, c.remote.GetTarget, localMeta.Hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// return ErrWrongSize if the reported size is known and incorrect
|
||||
if size >= 0 && size != localMeta.Length {
|
||||
return ErrWrongSize{name, size, localMeta.Length}
|
||||
}
|
||||
|
||||
// wrap the data in a LimitReader so we download at most localMeta.Length bytes
|
||||
stream := io.LimitReader(r, localMeta.Length)
|
||||
|
||||
// read the data, simultaneously writing it to dest and generating metadata
|
||||
actual, err := util.GenerateFileMeta(io.TeeReader(stream, dest), localMeta.HashAlgorithms()...)
|
||||
if err != nil {
|
||||
return ErrDownloadFailed{name, err}
|
||||
}
|
||||
|
||||
// check the data has the correct length and hashes
|
||||
if err := util.FileMetaEqual(actual, localMeta); err != nil {
|
||||
if err == util.ErrWrongLength {
|
||||
return ErrWrongSize{name, actual.Length, localMeta.Length}
|
||||
}
|
||||
return ErrDownloadFailed{name, err}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Targets returns the complete list of available targets.
|
||||
func (c *Client) Targets() (data.Files, error) {
|
||||
// populate c.targets from local storage if not set
|
||||
if c.targets == nil {
|
||||
if err := c.getLocalMeta(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.targets, nil
|
||||
}
|
836
Godeps/_workspace/src/github.com/endophage/go-tuf/client/client_test.go
generated
vendored
Normal file
836
Godeps/_workspace/src/github.com/endophage/go-tuf/client/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,836 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/data"
|
||||
"github.com/flynn/go-tuf/keys"
|
||||
"github.com/flynn/go-tuf/signed"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type ClientSuite struct {
|
||||
store tuf.LocalStore
|
||||
repo *tuf.Repo
|
||||
local LocalStore
|
||||
remote *fakeRemoteStore
|
||||
expiredTime time.Time
|
||||
keyIDs map[string]string
|
||||
}
|
||||
|
||||
var _ = Suite(&ClientSuite{})
|
||||
|
||||
func newFakeRemoteStore() *fakeRemoteStore {
|
||||
return &fakeRemoteStore{
|
||||
meta: make(map[string]*fakeFile),
|
||||
targets: make(map[string]*fakeFile),
|
||||
}
|
||||
}
|
||||
|
||||
type fakeRemoteStore struct {
|
||||
meta map[string]*fakeFile
|
||||
targets map[string]*fakeFile
|
||||
}
|
||||
|
||||
func (f *fakeRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) {
|
||||
return f.get(name, f.meta)
|
||||
}
|
||||
|
||||
func (f *fakeRemoteStore) GetTarget(path string) (io.ReadCloser, int64, error) {
|
||||
return f.get(path, f.targets)
|
||||
}
|
||||
|
||||
func (f *fakeRemoteStore) get(name string, store map[string]*fakeFile) (io.ReadCloser, int64, error) {
|
||||
file, ok := store[name]
|
||||
if !ok {
|
||||
return nil, 0, ErrNotFound{name}
|
||||
}
|
||||
return file, file.size, nil
|
||||
}
|
||||
|
||||
func newFakeFile(b []byte) *fakeFile {
|
||||
return &fakeFile{buf: bytes.NewReader(b), size: int64(len(b))}
|
||||
}
|
||||
|
||||
type fakeFile struct {
|
||||
buf *bytes.Reader
|
||||
bytesRead int
|
||||
size int64
|
||||
}
|
||||
|
||||
func (f *fakeFile) Read(p []byte) (int, error) {
|
||||
n, err := f.buf.Read(p)
|
||||
f.bytesRead += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *fakeFile) Close() error {
|
||||
f.buf.Seek(0, os.SEEK_SET)
|
||||
return nil
|
||||
}
|
||||
|
||||
var targetFiles = map[string][]byte{
|
||||
"/foo.txt": []byte("foo"),
|
||||
"/bar.txt": []byte("bar"),
|
||||
"/baz.txt": []byte("baz"),
|
||||
}
|
||||
|
||||
func (s *ClientSuite) SetUpTest(c *C) {
|
||||
s.store = tuf.MemoryStore(nil, targetFiles)
|
||||
|
||||
// create a valid repo containing foo.txt
|
||||
var err error
|
||||
s.repo, err = tuf.NewRepo(s.store)
|
||||
c.Assert(err, IsNil)
|
||||
// don't use consistent snapshots to make testing easier (consistent
|
||||
// snapshots are tested explicitly elsewhere)
|
||||
c.Assert(s.repo.Init(false), IsNil)
|
||||
s.keyIDs = map[string]string{
|
||||
"root": s.genKey(c, "root"),
|
||||
"targets": s.genKey(c, "targets"),
|
||||
"snapshot": s.genKey(c, "snapshot"),
|
||||
"timestamp": s.genKey(c, "timestamp"),
|
||||
}
|
||||
c.Assert(s.repo.AddTarget("foo.txt", nil), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
|
||||
// create a remote store containing valid repo files
|
||||
s.remote = newFakeRemoteStore()
|
||||
s.syncRemote(c)
|
||||
for path, data := range targetFiles {
|
||||
s.remote.targets[path] = newFakeFile(data)
|
||||
}
|
||||
|
||||
s.expiredTime = time.Now().Add(time.Hour)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) genKey(c *C, role string) string {
|
||||
id, err := s.repo.GenKey(role)
|
||||
c.Assert(err, IsNil)
|
||||
return id
|
||||
}
|
||||
|
||||
func (s *ClientSuite) genKeyExpired(c *C, role string) string {
|
||||
id, err := s.repo.GenKeyWithExpires(role, s.expiredTime)
|
||||
c.Assert(err, IsNil)
|
||||
return id
|
||||
}
|
||||
|
||||
// withMetaExpired sets signed.IsExpired throughout the invocation of f so that
|
||||
// any metadata marked to expire at s.expiredTime will be expired (this avoids
|
||||
// the need to sleep in the tests).
|
||||
func (s *ClientSuite) withMetaExpired(f func()) {
|
||||
e := signed.IsExpired
|
||||
defer func() { signed.IsExpired = e }()
|
||||
signed.IsExpired = func(t time.Time) bool {
|
||||
return t.Unix() == s.expiredTime.Round(time.Second).Unix()
|
||||
}
|
||||
f()
|
||||
}
|
||||
|
||||
func (s *ClientSuite) syncLocal(c *C) {
|
||||
meta, err := s.store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
for k, v := range meta {
|
||||
c.Assert(s.local.SetMeta(k, v), IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) syncRemote(c *C) {
|
||||
meta, err := s.store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
for name, data := range meta {
|
||||
s.remote.meta[name] = newFakeFile(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) addRemoteTarget(c *C, name string) {
|
||||
c.Assert(s.repo.AddTarget(name, nil), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) rootKeys(c *C) []*data.Key {
|
||||
rootKeys, err := s.repo.RootKeys()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(rootKeys, HasLen, 1)
|
||||
return rootKeys
|
||||
}
|
||||
|
||||
func (s *ClientSuite) newClient(c *C) *Client {
|
||||
s.local = MemoryLocalStore()
|
||||
client := NewClient(s.local, s.remote)
|
||||
c.Assert(client.Init(s.rootKeys(c), 1), IsNil)
|
||||
return client
|
||||
}
|
||||
|
||||
func (s *ClientSuite) updatedClient(c *C) *Client {
|
||||
client := s.newClient(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
return client
|
||||
}
|
||||
|
||||
func assertFiles(c *C, files data.Files, names []string) {
|
||||
c.Assert(files, HasLen, len(names))
|
||||
for _, name := range names {
|
||||
target, ok := targetFiles[name]
|
||||
if !ok {
|
||||
c.Fatalf("unknown target %s", name)
|
||||
}
|
||||
file, ok := files[name]
|
||||
if !ok {
|
||||
c.Fatalf("expected files to contain %s", name)
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(target), file.HashAlgorithms()...)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(util.FileMetaEqual(file, meta), IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func assertWrongHash(c *C, err error) {
|
||||
// just test the type of err rather using DeepEquals as it contains
|
||||
// hashes we don't necessarily need to check.
|
||||
e, ok := err.(ErrDownloadFailed)
|
||||
if !ok {
|
||||
c.Fatalf("expected err to have type ErrDownloadFailed, got %T", err)
|
||||
}
|
||||
if _, ok := e.Err.(util.ErrWrongHash); !ok {
|
||||
c.Fatalf("expected err.Err to have type util.ErrWrongHash, got %T", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) assertErrExpired(c *C, err error, file string) {
|
||||
decodeErr, ok := err.(ErrDecodeFailed)
|
||||
if !ok {
|
||||
c.Fatalf("expected err to have type ErrDecodeFailed, got %T", err)
|
||||
}
|
||||
c.Assert(decodeErr.File, Equals, file)
|
||||
expiredErr, ok := decodeErr.Err.(signed.ErrExpired)
|
||||
if !ok {
|
||||
c.Fatalf("expected err.Err to have type signed.ErrExpired, got %T", err)
|
||||
}
|
||||
c.Assert(expiredErr.Expired.Unix(), Equals, s.expiredTime.Round(time.Second).Unix())
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestInitRootTooLarge(c *C) {
|
||||
client := NewClient(MemoryLocalStore(), s.remote)
|
||||
s.remote.meta["root.json"] = newFakeFile(make([]byte, maxMetaSize+1))
|
||||
c.Assert(client.Init(s.rootKeys(c), 0), Equals, ErrMetaTooLarge{"root.json", maxMetaSize + 1})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestInitRootExpired(c *C) {
|
||||
s.genKeyExpired(c, "targets")
|
||||
s.syncRemote(c)
|
||||
client := NewClient(MemoryLocalStore(), s.remote)
|
||||
s.withMetaExpired(func() {
|
||||
s.assertErrExpired(c, client.Init(s.rootKeys(c), 1), "root.json")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestInit(c *C) {
|
||||
client := NewClient(MemoryLocalStore(), s.remote)
|
||||
|
||||
// check Init() returns keys.ErrInvalidThreshold with an invalid threshold
|
||||
c.Assert(client.Init(s.rootKeys(c), 0), Equals, keys.ErrInvalidThreshold)
|
||||
|
||||
// check Init() returns signed.ErrRoleThreshold when not enough keys
|
||||
c.Assert(client.Init(s.rootKeys(c), 2), Equals, ErrInsufficientKeys)
|
||||
|
||||
// check Update() returns ErrNoRootKeys when uninitialized
|
||||
_, err := client.Update()
|
||||
c.Assert(err, Equals, ErrNoRootKeys)
|
||||
|
||||
// check Update() does not return ErrNoRootKeys after initialization
|
||||
c.Assert(client.Init(s.rootKeys(c), 1), IsNil)
|
||||
_, err = client.Update()
|
||||
c.Assert(err, Not(Equals), ErrNoRootKeys)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestFirstUpdate(c *C) {
|
||||
files, err := s.newClient(c).Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(files, HasLen, 1)
|
||||
assertFiles(c, files, []string{"/foo.txt"})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestMissingRemoteMetadata(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
delete(s.remote.meta, "targets.json")
|
||||
_, err := client.Update()
|
||||
c.Assert(err, Equals, ErrMissingRemoteMetadata{"targets.json"})
|
||||
|
||||
delete(s.remote.meta, "timestamp.json")
|
||||
_, err = client.Update()
|
||||
c.Assert(err, Equals, ErrMissingRemoteMetadata{"timestamp.json"})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNoChangeUpdate(c *C) {
|
||||
client := s.newClient(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
_, err = client.Update()
|
||||
c.Assert(IsLatestSnapshot(err), Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTimestamp(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
version := client.timestampVer
|
||||
c.Assert(version > 0, Equals, true)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(IsLatestSnapshot(err), Equals, true)
|
||||
c.Assert(client.timestampVer > version, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewRoot(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace all keys
|
||||
newKeyIDs := make(map[string]string)
|
||||
for role, id := range s.keyIDs {
|
||||
c.Assert(s.repo.RevokeKey(role, id), IsNil)
|
||||
newKeyIDs[role] = s.genKey(c, role)
|
||||
}
|
||||
|
||||
// update metadata
|
||||
c.Assert(s.repo.Sign("targets.json"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new root version
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
version := client.rootVer
|
||||
c.Assert(version > 0, Equals, true)
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > version, Equals, true)
|
||||
|
||||
// check old keys are not in db
|
||||
for _, id := range s.keyIDs {
|
||||
c.Assert(client.db.GetKey(id), IsNil)
|
||||
}
|
||||
|
||||
// check new keys are in db
|
||||
for name, id := range newKeyIDs {
|
||||
key := client.db.GetKey(id)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, id)
|
||||
role := client.db.GetRole(name)
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, map[string]struct{}{id: {}})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTargets(c *C) {
|
||||
client := s.newClient(c)
|
||||
files, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/foo.txt"})
|
||||
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
s.addRemoteTarget(c, "baz.txt")
|
||||
|
||||
files, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/bar.txt", "/baz.txt"})
|
||||
|
||||
// Adding the same exact file should not lead to an update
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
files, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(files, HasLen, 0)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTimestampKey(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace key
|
||||
oldID := s.keyIDs["timestamp"]
|
||||
c.Assert(s.repo.RevokeKey("timestamp", oldID), IsNil)
|
||||
newID := s.genKey(c, "timestamp")
|
||||
|
||||
// generate new snapshot (because root has changed) and timestamp
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new root and timestamp
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
rootVer := client.rootVer
|
||||
timestampVer := client.timestampVer
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > rootVer, Equals, true)
|
||||
c.Assert(client.timestampVer > timestampVer, Equals, true)
|
||||
|
||||
// check key has been replaced in db
|
||||
c.Assert(client.db.GetKey(oldID), IsNil)
|
||||
key := client.db.GetKey(newID)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, newID)
|
||||
role := client.db.GetRole("timestamp")
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, map[string]struct{}{newID: {}})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewSnapshotKey(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace key
|
||||
oldID := s.keyIDs["snapshot"]
|
||||
c.Assert(s.repo.RevokeKey("snapshot", oldID), IsNil)
|
||||
newID := s.genKey(c, "snapshot")
|
||||
|
||||
// generate new snapshot and timestamp
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new root, snapshot and timestamp
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
rootVer := client.rootVer
|
||||
snapshotVer := client.snapshotVer
|
||||
timestampVer := client.timestampVer
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > rootVer, Equals, true)
|
||||
c.Assert(client.snapshotVer > snapshotVer, Equals, true)
|
||||
c.Assert(client.timestampVer > timestampVer, Equals, true)
|
||||
|
||||
// check key has been replaced in db
|
||||
c.Assert(client.db.GetKey(oldID), IsNil)
|
||||
key := client.db.GetKey(newID)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, newID)
|
||||
role := client.db.GetRole("snapshot")
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, map[string]struct{}{newID: {}})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTargetsKey(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace key
|
||||
oldID := s.keyIDs["targets"]
|
||||
c.Assert(s.repo.RevokeKey("targets", oldID), IsNil)
|
||||
newID := s.genKey(c, "targets")
|
||||
|
||||
// re-sign targets and generate new snapshot and timestamp
|
||||
c.Assert(s.repo.Sign("targets.json"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new metadata
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
rootVer := client.rootVer
|
||||
targetsVer := client.targetsVer
|
||||
snapshotVer := client.snapshotVer
|
||||
timestampVer := client.timestampVer
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > rootVer, Equals, true)
|
||||
c.Assert(client.targetsVer > targetsVer, Equals, true)
|
||||
c.Assert(client.snapshotVer > snapshotVer, Equals, true)
|
||||
c.Assert(client.timestampVer > timestampVer, Equals, true)
|
||||
|
||||
// check key has been replaced in db
|
||||
c.Assert(client.db.GetKey(oldID), IsNil)
|
||||
key := client.db.GetKey(newID)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, newID)
|
||||
role := client.db.GetRole("targets")
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, map[string]struct{}{newID: {}})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestLocalExpired(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// locally expired timestamp.json is ok
|
||||
version := client.timestampVer
|
||||
c.Assert(s.repo.TimestampWithExpires(s.expiredTime), IsNil)
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
c.Assert(client.timestampVer > version, Equals, true)
|
||||
})
|
||||
|
||||
// locally expired snapshot.json is ok
|
||||
version = client.snapshotVer
|
||||
c.Assert(s.repo.SnapshotWithExpires(tuf.CompressionTypeNone, s.expiredTime), IsNil)
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
c.Assert(client.snapshotVer > version, Equals, true)
|
||||
})
|
||||
|
||||
// locally expired targets.json is ok
|
||||
version = client.targetsVer
|
||||
c.Assert(s.repo.AddTargetWithExpires("foo.txt", nil, s.expiredTime), IsNil)
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
c.Assert(client.targetsVer > version, Equals, true)
|
||||
})
|
||||
|
||||
// locally expired root.json is not ok
|
||||
version = client.rootVer
|
||||
s.genKeyExpired(c, "targets")
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
err := client.getLocalMeta()
|
||||
if _, ok := err.(signed.ErrExpired); !ok {
|
||||
c.Fatalf("expected err to have type signed.ErrExpired, got %T", err)
|
||||
}
|
||||
c.Assert(client.rootVer, Equals, version)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestTimestampTooLarge(c *C) {
|
||||
s.remote.meta["timestamp.json"] = newFakeFile(make([]byte, maxMetaSize+1))
|
||||
_, err := s.newClient(c).Update()
|
||||
c.Assert(err, Equals, ErrMetaTooLarge{"timestamp.json", maxMetaSize + 1})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateLocalRootExpired(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// add soon to expire root.json to local storage
|
||||
s.genKeyExpired(c, "timestamp")
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncLocal(c)
|
||||
|
||||
// add far expiring root.json to remote storage
|
||||
s.genKey(c, "timestamp")
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
s.syncRemote(c)
|
||||
|
||||
// check the update downloads the non expired remote root.json and
|
||||
// restarts itself, thus successfully updating
|
||||
s.withMetaExpired(func() {
|
||||
err := client.getLocalMeta()
|
||||
if _, ok := err.(signed.ErrExpired); !ok {
|
||||
c.Fatalf("expected err to have type signed.ErrExpired, got %T", err)
|
||||
}
|
||||
_, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateRemoteExpired(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
|
||||
// expired remote metadata should always be rejected
|
||||
c.Assert(s.repo.TimestampWithExpires(s.expiredTime), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "timestamp.json")
|
||||
})
|
||||
|
||||
c.Assert(s.repo.SnapshotWithExpires(tuf.CompressionTypeNone, s.expiredTime), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "snapshot.json")
|
||||
})
|
||||
|
||||
c.Assert(s.repo.AddTargetWithExpires("bar.txt", nil, s.expiredTime), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "targets.json")
|
||||
})
|
||||
|
||||
s.genKeyExpired(c, "timestamp")
|
||||
c.Assert(s.repo.RemoveTarget("bar.txt"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "root.json")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateLocalRootExpiredKeyChange(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// add soon to expire root.json to local storage
|
||||
s.genKeyExpired(c, "timestamp")
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncLocal(c)
|
||||
|
||||
// replace all keys
|
||||
newKeyIDs := make(map[string]string)
|
||||
for role, id := range s.keyIDs {
|
||||
c.Assert(s.repo.RevokeKey(role, id), IsNil)
|
||||
newKeyIDs[role] = s.genKey(c, role)
|
||||
}
|
||||
|
||||
// update metadata
|
||||
c.Assert(s.repo.Sign("targets.json"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check the update downloads the non expired remote root.json and
|
||||
// restarts itself, thus successfully updating
|
||||
s.withMetaExpired(func() {
|
||||
err := client.getLocalMeta()
|
||||
c.Assert(err, FitsTypeOf, signed.ErrExpired{})
|
||||
|
||||
_, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateMixAndMatchAttack(c *C) {
|
||||
// generate metadata with an explicit expires so we can make predictable changes
|
||||
expires := time.Now().Add(time.Hour)
|
||||
c.Assert(s.repo.AddTargetWithExpires("foo.txt", nil, expires), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
client := s.updatedClient(c)
|
||||
|
||||
// grab the remote targets.json
|
||||
oldTargets, ok := s.remote.meta["targets.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing remote targets.json")
|
||||
}
|
||||
|
||||
// generate new remote metadata, but replace targets.json with the old one
|
||||
c.Assert(s.repo.AddTargetWithExpires("bar.txt", nil, expires), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
newTargets, ok := s.remote.meta["targets.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing remote targets.json")
|
||||
}
|
||||
s.remote.meta["targets.json"] = oldTargets
|
||||
|
||||
// check update returns ErrWrongSize for targets.json
|
||||
_, err := client.Update()
|
||||
c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", oldTargets.size, newTargets.size})
|
||||
|
||||
// do the same but keep the size the same
|
||||
c.Assert(s.repo.RemoveTargetWithExpires("foo.txt", expires), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.remote.meta["targets.json"] = oldTargets
|
||||
|
||||
// check update returns ErrWrongHash
|
||||
_, err = client.Update()
|
||||
assertWrongHash(c, err)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateReplayAttack(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
|
||||
// grab the remote timestamp.json
|
||||
oldTimestamp, ok := s.remote.meta["timestamp.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing remote timestamp.json")
|
||||
}
|
||||
|
||||
// generate a new timestamp and sync with the client
|
||||
version := client.timestampVer
|
||||
c.Assert(version > 0, Equals, true)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(IsLatestSnapshot(err), Equals, true)
|
||||
c.Assert(client.timestampVer > version, Equals, true)
|
||||
|
||||
// replace remote timestamp.json with the old one
|
||||
s.remote.meta["timestamp.json"] = oldTimestamp
|
||||
|
||||
// check update returns ErrLowVersion
|
||||
_, err = client.Update()
|
||||
c.Assert(err, DeepEquals, ErrDecodeFailed{"timestamp.json", signed.ErrLowVersion{version, client.timestampVer}})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateTamperedTargets(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// get local targets.json
|
||||
meta, err := s.store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
targetsJSON, ok := meta["targets.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing targets.json")
|
||||
}
|
||||
targets := &data.Signed{}
|
||||
c.Assert(json.Unmarshal(targetsJSON, targets), IsNil)
|
||||
|
||||
// update remote targets.json to have different content but same size
|
||||
c.Assert(targets.Signatures, HasLen, 1)
|
||||
targets.Signatures[0].Method = "xxxxxxx"
|
||||
tamperedJSON, err := json.Marshal(targets)
|
||||
c.Assert(err, IsNil)
|
||||
s.store.SetMeta("targets.json", tamperedJSON)
|
||||
s.syncRemote(c)
|
||||
_, err = client.Update()
|
||||
assertWrongHash(c, err)
|
||||
|
||||
// update remote targets.json to have the wrong size
|
||||
targets.Signatures[0].Method = "xxx"
|
||||
tamperedJSON, err = json.Marshal(targets)
|
||||
c.Assert(err, IsNil)
|
||||
s.store.SetMeta("targets.json", tamperedJSON)
|
||||
s.syncRemote(c)
|
||||
_, err = client.Update()
|
||||
c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", int64(len(tamperedJSON)), int64(len(targetsJSON))})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateHTTP(c *C) {
|
||||
tmp := c.MkDir()
|
||||
|
||||
// start file server
|
||||
addr, cleanup := startFileServer(c, tmp)
|
||||
defer cleanup()
|
||||
|
||||
for _, consistentSnapshot := range []bool{false, true} {
|
||||
dir := fmt.Sprintf("consistent-snapshot-%t", consistentSnapshot)
|
||||
|
||||
// generate repository
|
||||
repo := generateRepoFS(c, filepath.Join(tmp, dir), targetFiles, consistentSnapshot)
|
||||
|
||||
// initialize a client
|
||||
remote, err := HTTPRemoteStore(fmt.Sprintf("http://%s/%s/repository", addr, dir), nil)
|
||||
c.Assert(err, IsNil)
|
||||
client := NewClient(MemoryLocalStore(), remote)
|
||||
rootKeys, err := repo.RootKeys()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(rootKeys, HasLen, 1)
|
||||
c.Assert(client.Init(rootKeys, 1), IsNil)
|
||||
|
||||
// check update is ok
|
||||
targets, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, targets, []string{"/foo.txt", "/bar.txt", "/baz.txt"})
|
||||
|
||||
// check can download files
|
||||
for name, data := range targetFiles {
|
||||
var dest testDestination
|
||||
c.Assert(client.Download(name, &dest), IsNil)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testDestination struct {
|
||||
bytes.Buffer
|
||||
deleted bool
|
||||
}
|
||||
|
||||
func (t *testDestination) Delete() error {
|
||||
t.deleted = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadUnknownTarget(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/nonexistent", &dest), Equals, ErrUnknownTarget{"/nonexistent"})
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadNoExist(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
delete(s.remote.targets, "/foo.txt")
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), Equals, ErrNotFound{"/foo.txt"})
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadOK(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
// the filename is normalized if necessary
|
||||
for _, name := range []string{"/foo.txt", "foo.txt"} {
|
||||
var dest testDestination
|
||||
c.Assert(client.Download(name, &dest), IsNil)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, "foo")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadWrongSize(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := &fakeFile{buf: bytes.NewReader([]byte("wrong-size")), size: 10}
|
||||
s.remote.targets["/foo.txt"] = remoteFile
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), DeepEquals, ErrWrongSize{"/foo.txt", 10, 3})
|
||||
c.Assert(remoteFile.bytesRead, Equals, 0)
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadTargetTooLong(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := s.remote.targets["/foo.txt"]
|
||||
remoteFile.buf = bytes.NewReader([]byte("foo-ooo"))
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), IsNil)
|
||||
c.Assert(remoteFile.bytesRead, Equals, 3)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, "foo")
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadTargetTooShort(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := s.remote.targets["/foo.txt"]
|
||||
remoteFile.buf = bytes.NewReader([]byte("fo"))
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), DeepEquals, ErrWrongSize{"/foo.txt", 2, 3})
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadTargetCorruptData(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := s.remote.targets["/foo.txt"]
|
||||
remoteFile.buf = bytes.NewReader([]byte("corrupt"))
|
||||
var dest testDestination
|
||||
assertWrongHash(c, client.Download("/foo.txt", &dest))
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestAvailableTargets(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
files, err := client.Targets()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/foo.txt"})
|
||||
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
s.addRemoteTarget(c, "baz.txt")
|
||||
_, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
files, err = client.Targets()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/foo.txt", "/bar.txt", "/baz.txt"})
|
||||
}
|
106
Godeps/_workspace/src/github.com/endophage/go-tuf/client/errors.go
generated
vendored
Normal file
106
Godeps/_workspace/src/github.com/endophage/go-tuf/client/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoRootKeys = errors.New("tuf: no root keys found in local meta store")
|
||||
ErrInsufficientKeys = errors.New("tuf: insufficient keys to meet threshold")
|
||||
)
|
||||
|
||||
type ErrMissingRemoteMetadata struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (e ErrMissingRemoteMetadata) Error() string {
|
||||
return fmt.Sprintf("tuf: missing remote metadata %s", e.Name)
|
||||
}
|
||||
|
||||
type ErrDownloadFailed struct {
|
||||
File string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e ErrDownloadFailed) Error() string {
|
||||
return fmt.Sprintf("tuf: failed to download %s: %s", e.File, e.Err)
|
||||
}
|
||||
|
||||
type ErrDecodeFailed struct {
|
||||
File string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e ErrDecodeFailed) Error() string {
|
||||
return fmt.Sprintf("tuf: failed to decode %s: %s", e.File, e.Err)
|
||||
}
|
||||
|
||||
func isDecodeFailedWithErr(err, expected error) bool {
|
||||
e, ok := err.(ErrDecodeFailed)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return e.Err == expected
|
||||
}
|
||||
|
||||
type ErrNotFound struct {
|
||||
File string
|
||||
}
|
||||
|
||||
func (e ErrNotFound) Error() string {
|
||||
return fmt.Sprintf("tuf: file not found: %s", e.File)
|
||||
}
|
||||
|
||||
func IsNotFound(err error) bool {
|
||||
_, ok := err.(ErrNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrWrongSize struct {
|
||||
File string
|
||||
Actual int64
|
||||
Expected int64
|
||||
}
|
||||
|
||||
func (e ErrWrongSize) Error() string {
|
||||
return fmt.Sprintf("tuf: unexpected file size: %s (expected %d bytes, got %d bytes)", e.File, e.Expected, e.Actual)
|
||||
}
|
||||
|
||||
type ErrLatestSnapshot struct {
|
||||
Version int
|
||||
}
|
||||
|
||||
func (e ErrLatestSnapshot) Error() string {
|
||||
return fmt.Sprintf("tuf: the local snapshot version (%d) is the latest", e.Version)
|
||||
}
|
||||
|
||||
func IsLatestSnapshot(err error) bool {
|
||||
_, ok := err.(ErrLatestSnapshot)
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrUnknownTarget struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (e ErrUnknownTarget) Error() string {
|
||||
return fmt.Sprintf("tuf: unknown target file: %s", e.Name)
|
||||
}
|
||||
|
||||
type ErrMetaTooLarge struct {
|
||||
Name string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func (e ErrMetaTooLarge) Error() string {
|
||||
return fmt.Sprintf("tuf: %s size %d bytes greater than maximum %d bytes", e.Name, e.Size, maxMetaSize)
|
||||
}
|
||||
|
||||
type ErrInvalidURL struct {
|
||||
URL string
|
||||
}
|
||||
|
||||
func (e ErrInvalidURL) Error() string {
|
||||
return fmt.Sprintf("tuf: invalid repository URL %s", e.URL)
|
||||
}
|
180
Godeps/_workspace/src/github.com/endophage/go-tuf/client/interop_test.go
generated
vendored
Normal file
180
Godeps/_workspace/src/github.com/endophage/go-tuf/client/interop_test.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/data"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type InteropSuite struct{}
|
||||
|
||||
var _ = Suite(&InteropSuite{})
|
||||
|
||||
var pythonTargets = map[string][]byte{
|
||||
"/file1.txt": []byte("file1.txt"),
|
||||
"/dir/file2.txt": []byte("file2.txt"),
|
||||
}
|
||||
|
||||
func (InteropSuite) TestGoClientPythonGenerated(c *C) {
|
||||
// start file server
|
||||
cwd, err := os.Getwd()
|
||||
c.Assert(err, IsNil)
|
||||
testDataDir := filepath.Join(cwd, "testdata")
|
||||
addr, cleanup := startFileServer(c, testDataDir)
|
||||
defer cleanup()
|
||||
|
||||
for _, dir := range []string{"with-consistent-snapshot", "without-consistent-snapshot"} {
|
||||
remote, err := HTTPRemoteStore(
|
||||
fmt.Sprintf("http://%s/%s/repository", addr, dir),
|
||||
&HTTPRemoteOptions{MetadataPath: "metadata", TargetsPath: "targets"},
|
||||
)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// initiate a client with the root keys
|
||||
f, err := os.Open(filepath.Join("testdata", dir, "keystore", "root_key.pub"))
|
||||
c.Assert(err, IsNil)
|
||||
key := &data.Key{}
|
||||
c.Assert(json.NewDecoder(f).Decode(key), IsNil)
|
||||
c.Assert(key.Type, Equals, "ed25519")
|
||||
c.Assert(key.Value.Public, HasLen, ed25519.PublicKeySize)
|
||||
client := NewClient(MemoryLocalStore(), remote)
|
||||
c.Assert(client.Init([]*data.Key{key}, 1), IsNil)
|
||||
|
||||
// check update returns the correct updated targets
|
||||
files, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(files, HasLen, len(pythonTargets))
|
||||
for name, data := range pythonTargets {
|
||||
file, ok := files[name]
|
||||
if !ok {
|
||||
c.Fatalf("expected updated targets to contain %s", name)
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(data), file.HashAlgorithms()...)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(util.FileMetaEqual(file, meta), IsNil)
|
||||
}
|
||||
|
||||
// download the files and check they have the correct content
|
||||
for name, data := range pythonTargets {
|
||||
var dest testDestination
|
||||
c.Assert(client.Download(name, &dest), IsNil)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateRepoFS(c *C, dir string, files map[string][]byte, consistentSnapshot bool) *tuf.Repo {
|
||||
repo, err := tuf.NewRepo(tuf.FileSystemStore(dir, nil))
|
||||
c.Assert(err, IsNil)
|
||||
if !consistentSnapshot {
|
||||
c.Assert(repo.Init(false), IsNil)
|
||||
}
|
||||
for _, role := range []string{"root", "snapshot", "targets", "timestamp"} {
|
||||
_, err := repo.GenKey(role)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
for file, data := range files {
|
||||
path := filepath.Join(dir, "staged", "targets", file)
|
||||
c.Assert(os.MkdirAll(filepath.Dir(path), 0755), IsNil)
|
||||
c.Assert(ioutil.WriteFile(path, data, 0644), IsNil)
|
||||
c.Assert(repo.AddTarget(file, nil), IsNil)
|
||||
}
|
||||
c.Assert(repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(repo.Timestamp(), IsNil)
|
||||
c.Assert(repo.Commit(), IsNil)
|
||||
return repo
|
||||
}
|
||||
|
||||
func (InteropSuite) TestPythonClientGoGenerated(c *C) {
|
||||
// clone the Python client if necessary
|
||||
cwd, err := os.Getwd()
|
||||
c.Assert(err, IsNil)
|
||||
tufDir := filepath.Join(cwd, "testdata", "tuf")
|
||||
if _, err := os.Stat(tufDir); os.IsNotExist(err) {
|
||||
c.Assert(exec.Command(
|
||||
"git",
|
||||
"clone",
|
||||
"--quiet",
|
||||
"--branch=v0.9.9",
|
||||
"--depth=1",
|
||||
"https://github.com/theupdateframework/tuf.git",
|
||||
tufDir,
|
||||
).Run(), IsNil)
|
||||
}
|
||||
|
||||
tmp := c.MkDir()
|
||||
files := map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"bar/baz.txt": []byte("baz"),
|
||||
}
|
||||
|
||||
// start file server
|
||||
addr, cleanup := startFileServer(c, tmp)
|
||||
defer cleanup()
|
||||
|
||||
// setup Python env
|
||||
environ := os.Environ()
|
||||
pythonEnv := make([]string, 0, len(environ)+1)
|
||||
// remove any existing PYTHONPATH from the environment
|
||||
for _, e := range environ {
|
||||
if strings.HasPrefix(e, "PYTHONPATH=") {
|
||||
continue
|
||||
}
|
||||
pythonEnv = append(pythonEnv, e)
|
||||
}
|
||||
pythonEnv = append(pythonEnv, "PYTHONPATH="+tufDir)
|
||||
|
||||
for _, consistentSnapshot := range []bool{false, true} {
|
||||
// generate repository
|
||||
name := fmt.Sprintf("consistent-snapshot-%t", consistentSnapshot)
|
||||
dir := filepath.Join(tmp, name)
|
||||
generateRepoFS(c, dir, files, consistentSnapshot)
|
||||
|
||||
// create initial files for Python client
|
||||
clientDir := filepath.Join(dir, "client")
|
||||
currDir := filepath.Join(clientDir, "metadata", "current")
|
||||
prevDir := filepath.Join(clientDir, "metadata", "previous")
|
||||
c.Assert(os.MkdirAll(currDir, 0755), IsNil)
|
||||
c.Assert(os.MkdirAll(prevDir, 0755), IsNil)
|
||||
rootJSON, err := ioutil.ReadFile(filepath.Join(dir, "repository", "root.json"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(ioutil.WriteFile(filepath.Join(currDir, "root.json"), rootJSON, 0644), IsNil)
|
||||
|
||||
// run Python client update
|
||||
cmd := exec.Command("python", filepath.Join(cwd, "testdata", "client.py"), "--repo=http://"+addr+"/"+name)
|
||||
cmd.Env = pythonEnv
|
||||
cmd.Dir = clientDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
c.Assert(cmd.Run(), IsNil)
|
||||
|
||||
// check the target files got downloaded
|
||||
for path, expected := range files {
|
||||
actual, err := ioutil.ReadFile(filepath.Join(clientDir, "targets", path))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(actual, DeepEquals, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startFileServer(c *C, dir string) (string, func() error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
c.Assert(err, IsNil)
|
||||
addr := l.Addr().String()
|
||||
go http.Serve(l, http.FileServer(http.Dir(dir)))
|
||||
return addr, l.Close
|
||||
}
|
65
Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store.go
generated
vendored
Normal file
65
Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func MemoryLocalStore() LocalStore {
|
||||
return make(memoryLocalStore)
|
||||
}
|
||||
|
||||
type memoryLocalStore map[string]json.RawMessage
|
||||
|
||||
func (m memoryLocalStore) GetMeta() (map[string]json.RawMessage, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m memoryLocalStore) SetMeta(name string, meta json.RawMessage) error {
|
||||
m[name] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
const dbBucket = "tuf-client"
|
||||
|
||||
func FileLocalStore(path string) (LocalStore, error) {
|
||||
db, err := bolt.Open(path, 0600, &bolt.Options{Timeout: time.Second})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(dbBucket))
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileLocalStore{db: db}, nil
|
||||
}
|
||||
|
||||
type fileLocalStore struct {
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func (f *fileLocalStore) GetMeta() (map[string]json.RawMessage, error) {
|
||||
meta := make(map[string]json.RawMessage)
|
||||
if err := f.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(dbBucket))
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
meta[string(k)] = v
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func (f *fileLocalStore) SetMeta(name string, meta json.RawMessage) error {
|
||||
return f.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(dbBucket))
|
||||
return b.Put([]byte(name), meta)
|
||||
})
|
||||
}
|
46
Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store_test.go
generated
vendored
Normal file
46
Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store_test.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type LocalStoreSuite struct{}
|
||||
|
||||
var _ = Suite(&LocalStoreSuite{})
|
||||
|
||||
func (LocalStoreSuite) TestFileLocalStore(c *C) {
|
||||
tmp := c.MkDir()
|
||||
path := filepath.Join(tmp, "tuf.db")
|
||||
store, err := FileLocalStore(path)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
type meta map[string]json.RawMessage
|
||||
|
||||
assertGet := func(expected meta) {
|
||||
actual, err := store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(meta(actual), DeepEquals, expected)
|
||||
}
|
||||
|
||||
// initial GetMeta should return empty meta
|
||||
assertGet(meta{})
|
||||
|
||||
// SetMeta should persist
|
||||
rootJSON := []byte(`{"_type":"Root"}`)
|
||||
c.Assert(store.SetMeta("root.json", rootJSON), IsNil)
|
||||
assertGet(meta{"root.json": rootJSON})
|
||||
|
||||
// SetMeta should add to existing meta
|
||||
targetsJSON := []byte(`{"_type":"Target"}`)
|
||||
c.Assert(store.SetMeta("targets.json", targetsJSON), IsNil)
|
||||
assertGet(meta{"root.json": rootJSON, "targets.json": targetsJSON})
|
||||
|
||||
// a new store should get the same meta
|
||||
c.Assert(store.(*fileLocalStore).db.Close(), IsNil)
|
||||
store, err = FileLocalStore(path)
|
||||
c.Assert(err, IsNil)
|
||||
assertGet(meta{"root.json": rootJSON, "targets.json": targetsJSON})
|
||||
}
|
83
Godeps/_workspace/src/github.com/endophage/go-tuf/client/remote_store.go
generated
vendored
Normal file
83
Godeps/_workspace/src/github.com/endophage/go-tuf/client/remote_store.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type HTTPRemoteOptions struct {
|
||||
MetadataPath string
|
||||
TargetsPath string
|
||||
UserAgent string
|
||||
}
|
||||
|
||||
func HTTPRemoteStore(baseURL string, opts *HTTPRemoteOptions) (RemoteStore, error) {
|
||||
if !strings.HasPrefix(baseURL, "http") {
|
||||
return nil, ErrInvalidURL{baseURL}
|
||||
}
|
||||
if opts == nil {
|
||||
opts = &HTTPRemoteOptions{}
|
||||
}
|
||||
if opts.TargetsPath == "" {
|
||||
opts.TargetsPath = "targets"
|
||||
}
|
||||
return &httpRemoteStore{baseURL, opts}, nil
|
||||
}
|
||||
|
||||
type httpRemoteStore struct {
|
||||
baseURL string
|
||||
opts *HTTPRemoteOptions
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) {
|
||||
return h.get(path.Join(h.opts.MetadataPath, name))
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) {
|
||||
return h.get(path.Join(h.opts.TargetsPath, name))
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) get(s string) (io.ReadCloser, int64, error) {
|
||||
u := h.url(s)
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if h.opts.UserAgent != "" {
|
||||
req.Header.Set("User-Agent", h.opts.UserAgent)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return nil, 0, ErrNotFound{s}
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
res.Body.Close()
|
||||
return nil, 0, &url.Error{
|
||||
Op: "GET",
|
||||
URL: u,
|
||||
Err: fmt.Errorf("unexpected HTTP status %d", res.StatusCode),
|
||||
}
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 0)
|
||||
if err != nil {
|
||||
return res.Body, -1, nil
|
||||
}
|
||||
return res.Body, size, nil
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) url(path string) string {
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
return h.baseURL + path
|
||||
}
|
2
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
tuf.log
|
||||
tuf
|
66
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/LICENSE.txt
generated
vendored
Normal file
66
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
This file contains the license for TUF: The Update Framework.
|
||||
|
||||
It also lists license information for components and source
|
||||
code used by TUF: The Update Framework.
|
||||
|
||||
If you got this file as a part of a larger bundle,
|
||||
there may be other license terms that you should be aware of.
|
||||
|
||||
===============================================================================
|
||||
TUF: The Update Framework is distributed under this license:
|
||||
|
||||
Copyright (c) 2010, Justin Samuel and Justin Cappos.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and/or hardware specification (the “Work”) to deal in the Work
|
||||
without restriction, including without limitation the rights to use, copy,
|
||||
modify, merge, publish, distribute, sublicense, and/or sell copies of the Work,
|
||||
and to permit persons to whom the Work is furnished to do so, subject to the
|
||||
following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Work.
|
||||
|
||||
THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER
|
||||
DEALINGS IN THE WORK.
|
||||
===============================================================================
|
||||
Many files are modified from Thandy and are licensed under the
|
||||
following license:
|
||||
|
||||
Thandy is distributed under this license:
|
||||
|
||||
Copyright (c) 2008, The Tor Project, Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
* Neither the names of the copyright owners nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
===============================================================================
|
8
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/Makefile
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
all:
|
||||
docker build -t tuf-gen ./generate
|
||||
docker run tuf-gen | tar x
|
||||
|
||||
clean:
|
||||
rm -rf with{,out}-consistent-snapshot
|
||||
|
||||
.PHONY: all clean
|
47
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/README.md
generated
vendored
Normal file
47
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/README.md
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
# TUF testdata
|
||||
|
||||
TUF testdata generated by the Python implementation which is used to test that
|
||||
the Go client is compatible with files generated by the Python repository tool.
|
||||
|
||||
## Generate
|
||||
|
||||
The `generate` directory contains scripts and a Dockerfile for generating the
|
||||
test data files.
|
||||
|
||||
Run `make` to regenerate the test files:
|
||||
|
||||
```
|
||||
$ make clean
|
||||
rm -rf keystore repository
|
||||
|
||||
$ make
|
||||
docker build -t tuf-gen ./generate
|
||||
...
|
||||
Successfully built ac1fba1d0b3b
|
||||
docker run tuf-gen | tar x
|
||||
Files generated:
|
||||
.
|
||||
|-- keystore
|
||||
| |-- root_key
|
||||
| |-- root_key.pub
|
||||
| |-- snapshot_key
|
||||
| |-- snapshot_key.pub
|
||||
| |-- targets_key
|
||||
| |-- targets_key.pub
|
||||
| |-- timestamp_key
|
||||
| `-- timestamp_key.pub
|
||||
|-- repository
|
||||
| |-- metadata
|
||||
| | |-- root.json
|
||||
| | |-- snapshot.json
|
||||
| | |-- targets.json
|
||||
| | |-- targets.json.gz
|
||||
| | `-- timestamp.json
|
||||
| `-- targets
|
||||
| |-- dir
|
||||
| | `-- file2.txt
|
||||
| `-- file1.txt
|
||||
`-- tuf.log
|
||||
|
||||
5 directories, 16 files
|
||||
```
|
232
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/client.py
generated
vendored
Normal file
232
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/client.py
generated
vendored
Normal file
|
@ -0,0 +1,232 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# A script to download updates from a remote TUF repository.
|
||||
#
|
||||
# A modification of basic_client.py from the Python implementation:
|
||||
# https://github.com/theupdateframework/tuf/blob/v0.9.9/tuf/client/basic_client.py
|
||||
|
||||
"""
|
||||
<Program Name>
|
||||
basic_client.py
|
||||
|
||||
<Author>
|
||||
Vladimir Diaz <vladimir.v.diaz@gmail.com>
|
||||
|
||||
<Started>
|
||||
September 2012
|
||||
|
||||
<Copyright>
|
||||
See LICENSE for licensing information.
|
||||
|
||||
<Purpose>
|
||||
Provide a basic TUF client that can update all of the metatada and target
|
||||
files provided by the user-specified repository mirror. Updated files are
|
||||
saved to the 'targets' directory in the current working directory. The
|
||||
repository mirror is specified by the user through the '--repo' command-
|
||||
line option.
|
||||
|
||||
Normally, a software updater integrating TUF will develop their own costum
|
||||
client module by importing 'tuf.client.updater', instantiating the required
|
||||
object, and calling the desired methods to perform an update. This basic
|
||||
client is provided to users who wish to give TUF a quick test run without
|
||||
the hassle of writing client code. This module can also used by updaters that
|
||||
do not need the customization and only require their clients to perform an
|
||||
update of all the files provided by their repository mirror(s).
|
||||
|
||||
For software updaters that DO require customization, see the 'example_client.py'
|
||||
script. The 'example_client.py' script provides an outline of the client code
|
||||
that software updaters may develop and then tailor to their specific software
|
||||
updater or package manager.
|
||||
|
||||
Additional tools for clients running legacy applications will also be made
|
||||
available. These tools will allow secure software updates using The Update
|
||||
Framework without the need to modify the original application.
|
||||
|
||||
<Usage>
|
||||
$ python basic_client.py --repo http://localhost:8001
|
||||
$ python basic_client.py --repo http://localhost:8001 --verbose 3
|
||||
|
||||
<Options>
|
||||
--verbose:
|
||||
Set the verbosity level of logging messages. Accepts values 1-5.
|
||||
|
||||
--repo:
|
||||
Set the repository mirror that will be responding to client requests.
|
||||
E.g., 'http://locahost:8001'.
|
||||
"""
|
||||
|
||||
# Help with Python 3 compatibility, where the print statement is a function, an
|
||||
# implicit relative import is invalid, and the '/' operator performs true
|
||||
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
import optparse
|
||||
import logging
|
||||
|
||||
import tuf
|
||||
import tuf.formats
|
||||
import tuf.client.updater
|
||||
import tuf.log
|
||||
|
||||
# See 'log.py' to learn how logging is handled in TUF.
|
||||
logger = logging.getLogger('tuf.basic_client')
|
||||
|
||||
|
||||
def update_client(repository_mirror):
|
||||
"""
|
||||
<Purpose>
|
||||
Perform an update of the metadata and target files located at
|
||||
'repository_mirror'. Target files are saved to the 'targets' directory
|
||||
in the current working directory. The current directory must already
|
||||
include a 'metadata' directory, which in turn must contain the 'current'
|
||||
and 'previous' directories. At a minimum, these two directories require
|
||||
the 'root.json' metadata file.
|
||||
|
||||
<Arguments>
|
||||
repository_mirror:
|
||||
The URL to the repository mirror hosting the metadata and target
|
||||
files. E.g., 'http://localhost:8001'
|
||||
|
||||
<Exceptions>
|
||||
tuf.RepositoryError, if 'repository_mirror' is improperly formatted.
|
||||
|
||||
<Side Effects>
|
||||
Connects to a repository mirror and updates the metadata files and
|
||||
any target files. Obsolete targets are also removed locally.
|
||||
|
||||
<Returns>
|
||||
None.
|
||||
"""
|
||||
|
||||
# Does 'repository_mirror' have the correct format?
|
||||
try:
|
||||
tuf.formats.URL_SCHEMA.check_match(repository_mirror)
|
||||
except tuf.FormatError as e:
|
||||
message = 'The repository mirror supplied is invalid.'
|
||||
raise tuf.RepositoryError(message)
|
||||
|
||||
# Set the local repository directory containing all of the metadata files.
|
||||
tuf.conf.repository_directory = '.'
|
||||
|
||||
# Set the repository mirrors. This dictionary is needed by the Updater
|
||||
# class of updater.py.
|
||||
repository_mirrors = {'mirror': {'url_prefix': repository_mirror,
|
||||
'metadata_path': 'repository',
|
||||
'targets_path': 'repository/targets',
|
||||
'confined_target_dirs': ['']}}
|
||||
|
||||
# Create the repository object using the repository name 'repository'
|
||||
# and the repository mirrors defined above.
|
||||
updater = tuf.client.updater.Updater('repository', repository_mirrors)
|
||||
|
||||
# The local destination directory to save the target files.
|
||||
destination_directory = './targets'
|
||||
|
||||
# Refresh the repository's top-level roles, store the target information for
|
||||
# all the targets tracked, and determine which of these targets have been
|
||||
# updated.
|
||||
updater.refresh()
|
||||
all_targets = updater.all_targets()
|
||||
updated_targets = updater.updated_targets(all_targets, destination_directory)
|
||||
|
||||
# Download each of these updated targets and save them locally.
|
||||
for target in updated_targets:
|
||||
try:
|
||||
updater.download_target(target, destination_directory)
|
||||
except tuf.DownloadError as e:
|
||||
pass
|
||||
|
||||
# Remove any files from the destination directory that are no longer being
|
||||
# tracked.
|
||||
updater.remove_obsolete_targets(destination_directory)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def parse_options():
|
||||
"""
|
||||
<Purpose>
|
||||
Parse the command-line options and set the logging level
|
||||
as specified by the user through the --verbose option.
|
||||
'basic_client' expects the '--repo' to be set by the user.
|
||||
|
||||
Example:
|
||||
$ python basic_client.py --repo http://localhost:8001
|
||||
|
||||
If the required option is unset, a parser error is printed
|
||||
and the scripts exits.
|
||||
|
||||
<Arguments>
|
||||
None.
|
||||
|
||||
<Exceptions>
|
||||
None.
|
||||
|
||||
<Side Effects>
|
||||
Sets the logging level for TUF logging.
|
||||
|
||||
<Returns>
|
||||
The 'options.REPOSITORY_MIRROR' string.
|
||||
"""
|
||||
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
# Add the options supported by 'basic_client' to the option parser.
|
||||
parser.add_option('--verbose', dest='VERBOSE', type=int, default=2,
|
||||
help='Set the verbosity level of logging messages.'
|
||||
'The lower the setting, the greater the verbosity.')
|
||||
|
||||
parser.add_option('--repo', dest='REPOSITORY_MIRROR', type='string',
|
||||
help='Specifiy the repository mirror\'s URL prefix '
|
||||
'(e.g., http://www.example.com:8001/tuf/).'
|
||||
' The client will download updates from this mirror.')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
# Set the logging level.
|
||||
if options.VERBOSE == 5:
|
||||
tuf.log.set_log_level(logging.CRITICAL)
|
||||
elif options.VERBOSE == 4:
|
||||
tuf.log.set_log_level(logging.ERROR)
|
||||
elif options.VERBOSE == 3:
|
||||
tuf.log.set_log_level(logging.WARNING)
|
||||
elif options.VERBOSE == 2:
|
||||
tuf.log.set_log_level(logging.INFO)
|
||||
elif options.VERBOSE == 1:
|
||||
tuf.log.set_log_level(logging.DEBUG)
|
||||
else:
|
||||
tuf.log.set_log_level(logging.NOTSET)
|
||||
|
||||
# Ensure the '--repo' option was set by the user.
|
||||
if options.REPOSITORY_MIRROR is None:
|
||||
message = '"--repo" must be set on the command-line.'
|
||||
parser.error(message)
|
||||
|
||||
# Return the repository mirror containing the metadata and target files.
|
||||
return options.REPOSITORY_MIRROR
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# Parse the options and set the logging level.
|
||||
repository_mirror = parse_options()
|
||||
|
||||
# Perform an update of all the files in the 'targets' directory located in
|
||||
# the current directory.
|
||||
try:
|
||||
update_client(repository_mirror)
|
||||
|
||||
except (tuf.NoWorkingMirrorError, tuf.RepositoryError) as e:
|
||||
traceback.print_exc()
|
||||
sys.stderr.write('Error: '+str(e)+'\n')
|
||||
sys.exit(1)
|
||||
|
||||
# Successfully updated the client's target files.
|
||||
sys.exit(0)
|
12
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y python python-dev python-pip libffi-dev tree
|
||||
|
||||
# Use the develop branch of tuf for the following fix:
|
||||
# https://github.com/theupdateframework/tuf/commit/38005fe
|
||||
RUN apt-get install -y git
|
||||
RUN pip install --no-use-wheel git+https://github.com/theupdateframework/tuf.git@develop && pip install tuf[tools]
|
||||
|
||||
ADD generate.py generate.sh /
|
||||
CMD /generate.sh
|
82
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py
generated
vendored
Normal file
82
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# A script to generate TUF repository files.
|
||||
#
|
||||
# A modification of generate.py from the Python implementation:
|
||||
# https://github.com/theupdateframework/tuf/blob/v0.9.9/tests/repository_data/generate.py
|
||||
|
||||
import shutil
|
||||
import datetime
|
||||
import optparse
|
||||
import stat
|
||||
|
||||
from tuf.repository_tool import *
|
||||
import tuf.util
|
||||
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option("-c","--consistent-snapshot", action='store_true', dest="consistent_snapshot",
|
||||
help="Generate consistent snapshot", default=False)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
repository = create_new_repository('repository')
|
||||
|
||||
root_key_file = 'keystore/root_key'
|
||||
targets_key_file = 'keystore/targets_key'
|
||||
snapshot_key_file = 'keystore/snapshot_key'
|
||||
timestamp_key_file = 'keystore/timestamp_key'
|
||||
|
||||
generate_and_write_ed25519_keypair(root_key_file, password='password')
|
||||
generate_and_write_ed25519_keypair(targets_key_file, password='password')
|
||||
generate_and_write_ed25519_keypair(snapshot_key_file, password='password')
|
||||
generate_and_write_ed25519_keypair(timestamp_key_file, password='password')
|
||||
|
||||
root_public = import_ed25519_publickey_from_file(root_key_file+'.pub')
|
||||
targets_public = import_ed25519_publickey_from_file(targets_key_file+'.pub')
|
||||
snapshot_public = import_ed25519_publickey_from_file(snapshot_key_file+'.pub')
|
||||
timestamp_public = import_ed25519_publickey_from_file(timestamp_key_file+'.pub')
|
||||
|
||||
root_private = import_ed25519_privatekey_from_file(root_key_file, 'password')
|
||||
targets_private = import_ed25519_privatekey_from_file(targets_key_file, 'password')
|
||||
snapshot_private = import_ed25519_privatekey_from_file(snapshot_key_file, 'password')
|
||||
timestamp_private = import_ed25519_privatekey_from_file(timestamp_key_file, 'password')
|
||||
|
||||
repository.root.add_verification_key(root_public)
|
||||
repository.targets.add_verification_key(targets_public)
|
||||
repository.snapshot.add_verification_key(snapshot_public)
|
||||
repository.timestamp.add_verification_key(timestamp_public)
|
||||
|
||||
repository.root.load_signing_key(root_private)
|
||||
repository.targets.load_signing_key(targets_private)
|
||||
repository.snapshot.load_signing_key(snapshot_private)
|
||||
repository.timestamp.load_signing_key(timestamp_private)
|
||||
|
||||
target1_filepath = 'repository/targets/file1.txt'
|
||||
tuf.util.ensure_parent_dir(target1_filepath)
|
||||
target2_filepath = 'repository/targets/dir/file2.txt'
|
||||
tuf.util.ensure_parent_dir(target2_filepath)
|
||||
|
||||
with open(target1_filepath, 'wt') as file_object:
|
||||
file_object.write('file1.txt')
|
||||
|
||||
with open(target2_filepath, 'wt') as file_object:
|
||||
file_object.write('file2.txt')
|
||||
|
||||
octal_file_permissions = oct(os.stat(target1_filepath).st_mode)[4:]
|
||||
file_permissions = {'file_permissions': octal_file_permissions}
|
||||
repository.targets.add_target(target1_filepath, file_permissions)
|
||||
repository.targets.add_target(target2_filepath)
|
||||
|
||||
repository.root.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
repository.targets.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
repository.snapshot.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
|
||||
repository.targets.compressions = ['gz']
|
||||
|
||||
if options.consistent_snapshot:
|
||||
repository.write(False, True)
|
||||
|
||||
else:
|
||||
repository.write()
|
||||
|
||||
shutil.move('repository/metadata.staged', 'repository/metadata')
|
40
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# A script to generate TUF repository files using the Python implementation.
|
||||
#
|
||||
# A list of generated files is printed to STDERR and a tar of the files to STDOUT.
|
||||
|
||||
set -e
|
||||
|
||||
main() {
|
||||
local dir="$(mktemp -d)"
|
||||
trap "rm -rf ${dir}" EXIT
|
||||
|
||||
pushd "${dir}" >/dev/null
|
||||
generate_consistent
|
||||
generate_non_consistent
|
||||
list_files >&2
|
||||
tar c .
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
generate_consistent() {
|
||||
mkdir "with-consistent-snapshot"
|
||||
pushd "with-consistent-snapshot" >/dev/null
|
||||
/generate.py --consistent-snapshot
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
generate_non_consistent() {
|
||||
mkdir "without-consistent-snapshot"
|
||||
pushd "without-consistent-snapshot" >/dev/null
|
||||
/generate.py
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
list_files() {
|
||||
echo "Files generated:"
|
||||
tree
|
||||
}
|
||||
|
||||
main $@
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
4fc13ddb4979dbe54ff8ac93cab9b307@@@@100000@@@@e837119fd0a754046e1175445effdf8cdeda587fc94787e6ab27bc468dfb8bb0@@@@0335c7f8953301f91aa11d3be991096d@@@@d55c63737dbc5de581f6814fa37a341723465b8ea5157eca4302a2271b0cee93d48c4e48707a4ab34ecb649e5879577eb5e7bdf95627c8cbdf611fbc7cfa360d48b819525f20050ba7829ff016fc348916ce3154f031d7aed9cd91cbf89bc2d7e03ec4b5f98c4e4b2e4e576dbd9eefb3736aa94a6753875bf328727bbefb582ced865ff2512bd2c46b8c15d4a81ff244a296307949a8e58013588b47c65ae8334fd4df0c25d95778dc03a728969ce121d63f5dc34cd21d71b8ee6c05d85eeab4f4ff7c153b896f459304aa43ac9ef3a4b34701156e8cff3ddcaf91f6bef1dadba2f275cc1c8f675a8bc026023d25428b5a5a6730e76fb3d9a0630f1eb9662b06ef6523816f4e8b71966aa6e2
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"}}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
412601f80d76e03115cdc5620d803bf6@@@@100000@@@@90e058b5b1db460fb28e81105228bfd2b0a4ea748b524e9e349f8e24c8564fad@@@@9922a40a7b682a052b20cea3043018b2@@@@e83737f842c4a847b0302eb8cfba70b790083ce588e8c1fedf1f2a27590ef3a656b4abd0c1bec83c46a907083447b84d64d4307b98d4bbc673f9a12ef05814f550540ca187dc210de3d4147f36700da721433efcde095c98dc8ef0bc39bd40785842b6541c678b5d77b14f9a1170fabcf21dc4c86980776a721d2ac5068fcaa0703d636a60f8f6575e23b2238dd2d603ccaaeb8d4d2ca5794c0036811f0dd09409f07c137361a84358e0eeeba8e7d870652a17a5891c4f7e830672b61bd73b56f04c5e694caf87ecd255c3d7ec263a7e72c13d2fb62d97ec07b4b981776c9cc767d778e38ba1f36964a744114acd081ef7c442086eadd03d3875ad5ce04d273e685547a14b73aff739128873
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"}}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
6c8f2240657a414a478625eb6d86bb81@@@@100000@@@@bc6067a008c1c02592178927c8e6f561a3727827ea801291798b964bf31b2680@@@@384fb8f6e7e050f54ceffbd5e070581d@@@@4c5c8d7eeae6db767bd53e8dacfbd7d6db36584280b3cf3740fadb885442bf038650908ff9cb17f9d2c746d326a17ec7ce513ffb7e3c11abd875b17c92caa71ea556d71a69427741f0e80989df402e75ed23cfb85c79441a7cdf92e20e489abd09977d9028aae944ddc63116a5170dbdbd8607523a1338c61559fa106c164aee8c58d5961ed02195a46fcff615939c4c4adfc49d37b3cb2760b53dfe5961a63a29d2261310f23e568a58fcf71bf61db5816b00284bf15b7c89f1e9b929e1f3374119c0fd201b40c491e30542b5f547eb4a6828aae416bf6ea1c8b3c52ee0a98cc306f3725868e356143869bda527aee680b56abf660579b0a7539032b97b4266015a0ea6693904ef77002e39
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"}}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
59dc296f796a2049c9205a473e05b332@@@@100000@@@@50cda771fbc7d9f2e7209e16565289d73ad43a0b3ee620a9553f911f5ba38ca1@@@@9546236c4cd50458127fc69278037f8c@@@@63170139aa164fa9cb8f03e4015bae2bdee27f05cf22e140d931cded959c51104912eb58df06d5bcc422c28e368e80c2fbaa20a0618841fe650c88b1fde72b7cef32e07aca0d963a293c6c6db7d8e0885c6a17450e8307fc92be36d80e5c168b0abdc214dfa9048b5c44a05f17899176a128c7b8307130e085530a07258ac5047b5f439245b0eceeb0e61bd96315b6386282d40b4977fcc04c6098b7390fb4d538c1f0650e62298b235e4a38840254d7033eff9dddce55c347659632c29cc49ed828d9eba5a8e5b4b75956006014a57c8fc5c7f54d232a8eb78bb49423dc54997e7768d07186b295a5be1518be6c76777e55fd2d227070fece6cf2530d7e40e42468da7cc7413fcdf4091ec2
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"}}
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55",
|
||||
"method": "ed25519",
|
||||
"sig": "0b021b054508971b8636ea6aceca0aaef0a9245c278e2b05aa5c20cd7ff055ba45473ab47e4c8a2475a257c432fff52c4ed551f1b7362461669d85c8f7a67e04"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Targets",
|
||||
"delegations": {
|
||||
"keys": {},
|
||||
"roles": []
|
||||
},
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"targets": {
|
||||
"/dir/file2.txt": {
|
||||
"hashes": {
|
||||
"sha256": "04e2f59431a9d219321baf7d21b8cc797d7615dc3e9515c782c49d2075658701"
|
||||
},
|
||||
"length": 9
|
||||
},
|
||||
"/file1.txt": {
|
||||
"custom": {
|
||||
"file_permissions": "644"
|
||||
},
|
||||
"hashes": {
|
||||
"sha256": "55ae75d991c770d8f3ef07cbfde124ffce9c420da5db6203afab700b27e10cf9"
|
||||
},
|
||||
"length": 9
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b",
|
||||
"method": "ed25519",
|
||||
"sig": "7c9f8155cd074a9666c76cde1a1f83a9b6d965e4e7f6afa95ece5bedf68ce5caea137099110e9ca16aba5b6fd4a554c0c42032a436c8ab37fd89e596144b230e"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Root",
|
||||
"consistent_snapshot": true,
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"keys": {
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"
|
||||
}
|
||||
},
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"
|
||||
}
|
||||
},
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"
|
||||
}
|
||||
},
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"root": {
|
||||
"keyids": [
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"snapshot": {
|
||||
"keyids": [
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"targets": {
|
||||
"keyids": [
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"timestamp": {
|
||||
"keyids": [
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1"
|
||||
],
|
||||
"threshold": 1
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5",
|
||||
"method": "ed25519",
|
||||
"sig": "006082dcb87a9f1c2d312d28886cad12eec8f9dcd7b3028b7ba83424261381c8323df331be9e0e54c5aca830cd2637dc9bfcfe6b7a01f312a9786d0e54c0a600"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Snapshot",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"root.json": {
|
||||
"hashes": {
|
||||
"sha256": "a68b4847c117ec84f3787b9adabd607785bf30d3a9a4646661761bddc1a11e62"
|
||||
},
|
||||
"length": 1808
|
||||
},
|
||||
"targets.json": {
|
||||
"hashes": {
|
||||
"sha256": "32f37ab8ba96d5a3b2d10cc716ce408c860d82b4ba00e6a7a479df6bcfee2864"
|
||||
},
|
||||
"length": 790
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1",
|
||||
"method": "ed25519",
|
||||
"sig": "ca207b22bf344aac04c25a09cb5c78a416123ace5064599e13df925d63cce835477b37933c3d6a72af19bb547ce108df9154dcf9b3cb4df733fed5ac9d1cb60d"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Timestamp",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"snapshot.json": {
|
||||
"hashes": {
|
||||
"sha256": "d03b00f125367bcd2237c6a65c442f865b3aac0ba11864d64c0f69ced766e011"
|
||||
},
|
||||
"length": 682
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
67
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/root.json
generated
vendored
Normal file
67
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/root.json
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b",
|
||||
"method": "ed25519",
|
||||
"sig": "7c9f8155cd074a9666c76cde1a1f83a9b6d965e4e7f6afa95ece5bedf68ce5caea137099110e9ca16aba5b6fd4a554c0c42032a436c8ab37fd89e596144b230e"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Root",
|
||||
"consistent_snapshot": true,
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"keys": {
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"
|
||||
}
|
||||
},
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"
|
||||
}
|
||||
},
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"
|
||||
}
|
||||
},
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"root": {
|
||||
"keyids": [
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"snapshot": {
|
||||
"keyids": [
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"targets": {
|
||||
"keyids": [
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"timestamp": {
|
||||
"keyids": [
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1"
|
||||
],
|
||||
"threshold": 1
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
22
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/timestamp.json
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/timestamp.json
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1",
|
||||
"method": "ed25519",
|
||||
"sig": "ca207b22bf344aac04c25a09cb5c78a416123ace5064599e13df925d63cce835477b37933c3d6a72af19bb547ce108df9154dcf9b3cb4df733fed5ac9d1cb60d"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Timestamp",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"snapshot.json": {
|
||||
"hashes": {
|
||||
"sha256": "d03b00f125367bcd2237c6a65c442f865b3aac0ba11864d64c0f69ced766e011"
|
||||
},
|
||||
"length": 682
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
file1.txt
|
|
@ -0,0 +1 @@
|
|||
file2.txt
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/dir/file2.txt
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/dir/file2.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
file2.txt
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/file1.txt
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/file1.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
file1.txt
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/root_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/root_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
9d2587d3e9e964aa50595d39535c1cdc@@@@100000@@@@ea2bb63b57beb3a1802547f9b1e36742c9a59d651fdc44cdd60c1e1d9003a1ec@@@@731ffdec7d29dd53a17a8e35560fd117@@@@3af4ea3d8458d0652396f422e5fefbc08e7183526b1631eed4bec2f30c5c6847a17b25c3bb30482eb6df7448f8286abb6aad38d0bd9224679ffb5b9c2fc1ee4ed1b61167a04d2d3b31118cbd0a29737dcffe3a828b7f934ef7deecce72e5424da0eef199201ee1b351c7c91dc01a3f7aa6483ac8e58df245934681aa850ce16e8ad878fde8d20fcee3282bd01fb92050b361ed5d5bd1949232d075d43af87a0af0f37a5231c03f5a864da0f7e91dfe2a40a64b427405ad00c0cf344b1f712cecd005d31798b58b8e5f1708e2c30fd3000588cdfe5162733680046f3b73ce82827f6b10ee76971f790c407631858bfac860fc46c240b9a998724efc56fa1dfc9ef3fe5d344f168dc11f6e77fe
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/root_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/root_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "12230f2d671ebaa1df573b83c8d9eafe643bc75dd69475314e658862b708f654"}}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/snapshot_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/snapshot_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
3201c015530f11df6f4805a825277245@@@@100000@@@@75faa825155045e47f461b3f3dbecea299e095e0b2eb326721859b6583f82648@@@@e045a7a8cfddd3b395f5085cea1c0f91@@@@b52c8e28543277aa692de6b04d88918cbb41157b7e6e7814bf3a1c9f8a82d93a0e708ef99e4183d8c768be1bc919af36e542b64511c2cd66a3ba4c8fd9715abb0257162ca35b0bfc4b03c71059f93d1a8e86c7d0dec84f5a05092f510c6cb65cee701a2f109b329c16adcb0cf4ea7d5e6b22ba794176882814e5a98b586c9dc9ed36c96929d3bc8b395af242229432691508a713aa961037f896548a3fa17213ec644553b94ca193f34ad7e281650f3ac105708f384ddf5167cd2a9eb38e42819e1a137d9a18369efa28dbc7b3b7abdc8e00243a8c8a67ae160efcec847ee4ff0f5c4f64e9528c40017ad100d33568bafdbca76e3d6023bf1ea1a2c7869dba40ed278ab10ed536ce6e6e5143
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/snapshot_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/snapshot_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "17627894686328e24f3bca469cd5edc3a70b5cd58c76fdd7c7d582d3db3d6a8d"}}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/targets_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/targets_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
c37f71aa8441503da8e0cab4e69c4144@@@@100000@@@@d78abf885fd898ae7cb4fe359413ccbd362eb9aacf22ffaab11aeda84aa6e1ad@@@@6780cab4d7843366c06b057243dc1f0f@@@@bde05980a1f3b3675db97549aff5c235dfc4898f3dd060ee09799801114a1a6f9450d95dbc0c57a2dbd52a6264d92e5e58d4aeba9a6a4dc6d85d8bf62ba8e9bc2338525892609eb66575af0c838613a2c007bb483a3d254fee14aac210a43b419cf77794bc28f5fabccc81d20cd726d1f8b5914ea9caef432728447c54ce76cac87b2d6df5fc2b2ea606d0d6a1d83226452c11a8ea3a0a677c0131225707609818b2b1f10556393ee797da690bc526fca990dd6c7940052d1832dceda3c1c59ba9b5700ca39b9425af77b9726f5531bc9d47a5d1381f740d96a8119f4469a012a73156e353f173a86ca508e953bd569bd7c28a8eb8d7b49b515ecaeac1f64511fe9b3a0bfb345c5bb2dd0602
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/targets_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/targets_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "7e344543b8cb1cae3f930b8a50bf47104a1f6fcf044105643b32ba98916c59f6"}}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/timestamp_key
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/timestamp_key
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
367ecd4b77a9c279871f62a31ea06982@@@@100000@@@@64c698fdc72778ef46ce4e93177e04c23e07b3dea315eb752003b9050964d94c@@@@5c67079066f6d258fa48a7b3df3fd0e4@@@@28cb044ce969cc57a51f5567f0e560525ada56cb44bbbc9858f998d289639e0103bc522bc304b2187647d3eef00d05da209f8e220e586504eaf6402faecbe1a584cd7c0f9369f9464ea4f5ca8fc68561c368f10b4eb70d841f76c730c9c8051ae56fbb90c1acca27d73f8e7c7374bc07eb1676cdb0755c015b29faf9d1a0188df9b782b8e15dba2ff2be987aa7dffd7d8f821183180076960a982e537e10cb13a613b8ccf9baa6aab3de2634af82ad9ee6a7b908483420d3900caea1bfdddde0eac89d5b4583352828c335a8b849d23ab53cc7ca7f43220a72f0e7d9df8bb07294f915ad294cdbe4936735515586ab788160d1c4d7d70e941efdc7a7ac5524e790d7c2606f001e2619ee0750
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/timestamp_key.pub
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/timestamp_key.pub
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "776f245fe7c5626ca0dc3c24644f7345a35741bab9a25031a7042bfcb3c4356b"}}
|
67
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/root.json
generated
vendored
Normal file
67
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/root.json
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "7bd4a38edbb838e77fd65857ce1b8da742a3ec143e285b1029e1091ab10a52fa",
|
||||
"method": "ed25519",
|
||||
"sig": "dd0d0dbb3be0c259562990582919d480ba41dd752c0c529c2989ed09fd09c99ddfabab21362d6f2b717ca736cd8827180b68dcf85715e80a6e1345d621d6dd08"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Root",
|
||||
"consistent_snapshot": false,
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"keys": {
|
||||
"253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "776f245fe7c5626ca0dc3c24644f7345a35741bab9a25031a7042bfcb3c4356b"
|
||||
}
|
||||
},
|
||||
"7bd4a38edbb838e77fd65857ce1b8da742a3ec143e285b1029e1091ab10a52fa": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "12230f2d671ebaa1df573b83c8d9eafe643bc75dd69475314e658862b708f654"
|
||||
}
|
||||
},
|
||||
"9117d84fd79e2f7db3044a447e3399dd58600af8fcc03369c86641b89a797906": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "17627894686328e24f3bca469cd5edc3a70b5cd58c76fdd7c7d582d3db3d6a8d"
|
||||
}
|
||||
},
|
||||
"ecc259a2126ff1c1f175a137fada7e2d5bb93fdafeb6b0577b3b5c68af184ff8": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "7e344543b8cb1cae3f930b8a50bf47104a1f6fcf044105643b32ba98916c59f6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"root": {
|
||||
"keyids": [
|
||||
"7bd4a38edbb838e77fd65857ce1b8da742a3ec143e285b1029e1091ab10a52fa"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"snapshot": {
|
||||
"keyids": [
|
||||
"9117d84fd79e2f7db3044a447e3399dd58600af8fcc03369c86641b89a797906"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"targets": {
|
||||
"keyids": [
|
||||
"ecc259a2126ff1c1f175a137fada7e2d5bb93fdafeb6b0577b3b5c68af184ff8"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"timestamp": {
|
||||
"keyids": [
|
||||
"253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62"
|
||||
],
|
||||
"threshold": 1
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
34
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/snapshot.json
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/snapshot.json
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "9117d84fd79e2f7db3044a447e3399dd58600af8fcc03369c86641b89a797906",
|
||||
"method": "ed25519",
|
||||
"sig": "ffa7e6ea81f87ec7b3c65b7501a25ecc27bd3b6c400b54b2da49a7a2992f5b0faac3126236b889dd2462a68d1dba6c82622480bb06dd52a429b8ce061fb86b04"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Snapshot",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"root.json": {
|
||||
"hashes": {
|
||||
"sha256": "0afc32c79ab3dbcd29477ef1749f859349d2f78bf6305f012bc7c6ca93143300"
|
||||
},
|
||||
"length": 1809
|
||||
},
|
||||
"targets.json": {
|
||||
"hashes": {
|
||||
"sha256": "b9a821a57d4d61a23ba70e1c7d1681497aaf31c86c3eb9dd9cda023a8057528b"
|
||||
},
|
||||
"length": 790
|
||||
},
|
||||
"targets.json.gz": {
|
||||
"hashes": {
|
||||
"sha256": "8964b7a9166a437d0b9db4b07f020a1afeab266d41af8d3599aea4b03913e092"
|
||||
},
|
||||
"length": 460
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
35
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/targets.json
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/targets.json
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "ecc259a2126ff1c1f175a137fada7e2d5bb93fdafeb6b0577b3b5c68af184ff8",
|
||||
"method": "ed25519",
|
||||
"sig": "92ed3e32c061c87ddf41c9ee606fa88a320513458b634ac259c6f2383bffe2e983d53c00ab78991c6ed965f21284c24246907e79d96100d955087a517761c10d"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Targets",
|
||||
"delegations": {
|
||||
"keys": {},
|
||||
"roles": []
|
||||
},
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"targets": {
|
||||
"/dir/file2.txt": {
|
||||
"hashes": {
|
||||
"sha256": "04e2f59431a9d219321baf7d21b8cc797d7615dc3e9515c782c49d2075658701"
|
||||
},
|
||||
"length": 9
|
||||
},
|
||||
"/file1.txt": {
|
||||
"custom": {
|
||||
"file_permissions": "644"
|
||||
},
|
||||
"hashes": {
|
||||
"sha256": "55ae75d991c770d8f3ef07cbfde124ffce9c420da5db6203afab700b27e10cf9"
|
||||
},
|
||||
"length": 9
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
BIN
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/targets.json.gz
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/targets.json.gz
generated
vendored
Normal file
Binary file not shown.
22
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/timestamp.json
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/timestamp.json
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62",
|
||||
"method": "ed25519",
|
||||
"sig": "b5e3aad4caad2aef5b6ffbe4547e181a8c3c73382271ded933a6eed5754ff09890e826460e90d0032371def25a7c16ede4622758b91c87105f20f83864b4b601"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Timestamp",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"snapshot.json": {
|
||||
"hashes": {
|
||||
"sha256": "f56dd748c9c0a7dd3c81f575795d72d788b9743687a9fcc1c0e178296ebd2800"
|
||||
},
|
||||
"length": 835
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/dir/file2.txt
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/dir/file2.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
file2.txt
|
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/file1.txt
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/file1.txt
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
file1.txt
|
82
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/main.go
generated
vendored
Normal file
82
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/main.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
usage := `usage: tuftools [-h|--help] <command> [<args>...]
|
||||
|
||||
Options:
|
||||
-h, --help
|
||||
|
||||
Commands:
|
||||
help Show usage for a specific command
|
||||
meta Generate metadata from the given file path
|
||||
|
||||
See "tuf help <command>" for more information on a specific command
|
||||
`
|
||||
|
||||
args, _ := docopt.Parse(usage, nil, true, "", true)
|
||||
cmd := args.String["<command>"]
|
||||
cmdArgs := args.All["<args>"].([]string)
|
||||
|
||||
if cmd == "help" {
|
||||
if len(cmdArgs) == 0 { // `tuf help`
|
||||
fmt.Println(usage)
|
||||
return
|
||||
} else { // `tuf help <command>`
|
||||
cmd = cmdArgs[0]
|
||||
cmdArgs = []string{"--help"}
|
||||
}
|
||||
}
|
||||
|
||||
if err := runCommand(cmd, cmdArgs); err != nil {
|
||||
log.Fatalln("ERROR:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type cmdFunc func(*docopt.Args, *tuf.Repo) error
|
||||
|
||||
type command struct {
|
||||
usage string
|
||||
f cmdFunc
|
||||
}
|
||||
|
||||
var commands = make(map[string]*command)
|
||||
|
||||
func register(name string, f cmdFunc, usage string) {
|
||||
commands[name] = &command{usage: usage, f: f}
|
||||
}
|
||||
|
||||
func runCommand(name string, args []string) error {
|
||||
argv := make([]string, 1, 1+len(args))
|
||||
argv[0] = name
|
||||
argv = append(argv, args...)
|
||||
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s is not a tuf command. See 'tuf help'", name)
|
||||
}
|
||||
|
||||
parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db := util.GetSqliteDB()
|
||||
local := store.DBStore(db, "")
|
||||
repo, err := tuf.NewRepo(local)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.f(parsedArgs, repo)
|
||||
}
|
39
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/meta.go
generated
vendored
Normal file
39
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/meta.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("meta", cmdMeta, `
|
||||
usage: tuftools meta [<path>...]
|
||||
|
||||
Generate sample metadata for file(s) given by path.
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdMeta(args *docopt.Args, repo *tuf.Repo) error {
|
||||
paths := args.All["<path>"].([]string)
|
||||
for _, file := range paths {
|
||||
reader, _ := os.Open(file)
|
||||
meta, _ := util.GenerateFileMeta(reader, "sha256")
|
||||
jsonBytes, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%s.meta.json", file)
|
||||
err = ioutil.WriteFile(filename, jsonBytes, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
48
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/README.md
generated
vendored
Normal file
48
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/README.md
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
# go-tuf client CLI
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
go get github.com/flynn/go-tuf/cmd/tuf-client
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The CLI provides three commands:
|
||||
|
||||
* `tuf-client init` - initialize a local file store using root keys (e.g. from
|
||||
the output of `tuf root-keys`)
|
||||
* `tuf-client list` - list available targets and their file sizes
|
||||
* `tuf-client get` - get a target file and write to STDOUT
|
||||
|
||||
All commands require the base URL of the TUF repository as the first non-flag
|
||||
argument, and accept an optional `--store` flag which is the path to the local
|
||||
storage.
|
||||
|
||||
Run `tuf-client help` from the command line to get more detailed usage
|
||||
information.
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
# init
|
||||
$ tuf-client init https://example.com/path/to/repo
|
||||
|
||||
# init with a custom store path
|
||||
$ tuf-client init --store /tmp/tuf.db https://example.com/path/to/repo
|
||||
|
||||
# list available targets
|
||||
$ tuf-client list https://example.com/path/to/repo
|
||||
PATH SIZE
|
||||
/foo.txt 1.6KB
|
||||
/bar.txt 336B
|
||||
/baz.txt 1.5KB
|
||||
|
||||
# get a target
|
||||
$ tuf-client get https://example.com/path/to/repo /foo.txt
|
||||
the contents of foo.txt
|
||||
|
||||
# the prefixed / is optional
|
||||
$ tuf-client get https://example.com/path/to/repo foo.txt
|
||||
the contents of foo.txt
|
||||
```
|
52
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/get.go
generated
vendored
Normal file
52
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/get.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("get", cmdGet, `
|
||||
usage: tuf-client get [-s|--store=<path>] <url> <target>
|
||||
|
||||
Options:
|
||||
-s <path> The path to the local file store [default: tuf.db]
|
||||
|
||||
Get a target from the repository.
|
||||
`)
|
||||
}
|
||||
|
||||
type tmpFile struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (t *tmpFile) Delete() error {
|
||||
t.Close()
|
||||
return os.Remove(t.Name())
|
||||
}
|
||||
|
||||
func cmdGet(args *docopt.Args, client *tuf.Client) error {
|
||||
if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) {
|
||||
return err
|
||||
}
|
||||
target := util.NormalizeTarget(args.String["<target>"])
|
||||
file, err := ioutil.TempFile("", "go-tuf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp := tmpFile{file}
|
||||
if err := client.Download(target, &tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
defer tmp.Delete()
|
||||
if _, err := tmp.Seek(0, os.SEEK_SET); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(os.Stdout, file)
|
||||
return err
|
||||
}
|
41
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/init.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/init.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
"github.com/flynn/go-tuf/data"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("init", cmdInit, `
|
||||
usage: tuf-client init [-s|--store=<path>] <url> [<root-keys-file>]
|
||||
|
||||
Options:
|
||||
-s <path> The path to the local file store [default: tuf.db]
|
||||
|
||||
Initialize the local file store with root keys.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdInit(args *docopt.Args, client *tuf.Client) error {
|
||||
file := args.String["<root-keys-file>"]
|
||||
var in io.Reader
|
||||
if file == "" || file == "-" {
|
||||
in = os.Stdin
|
||||
} else {
|
||||
var err error
|
||||
in, err = os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var rootKeys []*data.Key
|
||||
if err := json.NewDecoder(in).Decode(&rootKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
return client.Init(rootKeys, len(rootKeys))
|
||||
}
|
39
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/list.go
generated
vendored
Normal file
39
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/list.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("list", cmdList, `
|
||||
usage: tuf-client list [-s|--store=<path>] <url>
|
||||
|
||||
Options:
|
||||
-s <path> The path to the local file store [default: tuf.db]
|
||||
|
||||
List available target files.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdList(args *docopt.Args, client *tuf.Client) error {
|
||||
if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) {
|
||||
return err
|
||||
}
|
||||
targets, err := client.Targets()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintln(w, "PATH\tSIZE")
|
||||
for path, meta := range targets {
|
||||
fmt.Fprintf(w, "%s\t%s\n", path, humanize.Bytes(uint64(meta.Length)))
|
||||
}
|
||||
return nil
|
||||
}
|
96
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/main.go
generated
vendored
Normal file
96
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/main.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
tuf "github.com/flynn/go-tuf/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
usage := `usage: tuf-client [-h|--help] <command> [<args>...]
|
||||
|
||||
Options:
|
||||
-h, --help
|
||||
|
||||
Commands:
|
||||
help Show usage for a specific command
|
||||
init Initialize with root keys
|
||||
list List available target files
|
||||
get Get a target file
|
||||
|
||||
See "tuf-client help <command>" for more information on a specific command.
|
||||
`
|
||||
|
||||
args, _ := docopt.Parse(usage, nil, true, "", true)
|
||||
cmd := args.String["<command>"]
|
||||
cmdArgs := args.All["<args>"].([]string)
|
||||
|
||||
if cmd == "help" {
|
||||
if len(cmdArgs) == 0 { // `tuf-client help`
|
||||
fmt.Println(usage)
|
||||
return
|
||||
} else { // `tuf-client help <command>`
|
||||
cmd = cmdArgs[0]
|
||||
cmdArgs = []string{"--help"}
|
||||
}
|
||||
}
|
||||
|
||||
if err := runCommand(cmd, cmdArgs); err != nil {
|
||||
log.Fatalln("ERROR:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type cmdFunc func(*docopt.Args, *tuf.Client) error
|
||||
|
||||
type command struct {
|
||||
usage string
|
||||
f cmdFunc
|
||||
}
|
||||
|
||||
var commands = make(map[string]*command)
|
||||
|
||||
func register(name string, f cmdFunc, usage string) {
|
||||
commands[name] = &command{usage: usage, f: f}
|
||||
}
|
||||
|
||||
func runCommand(name string, args []string) error {
|
||||
argv := make([]string, 1, 1+len(args))
|
||||
argv[0] = name
|
||||
argv = append(argv, args...)
|
||||
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s is not a tuf-client command. See 'tuf-client help'", name)
|
||||
}
|
||||
|
||||
parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := tufClient(parsedArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.f(parsedArgs, client)
|
||||
}
|
||||
|
||||
func tufClient(args *docopt.Args) (*tuf.Client, error) {
|
||||
store, ok := args.String["--store"]
|
||||
if !ok {
|
||||
store = args.String["-s"]
|
||||
}
|
||||
local, err := tuf.FileLocalStore(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote, err := tuf.HTTPRemoteStore(args.String["<url>"], nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tuf.NewClient(local, remote), nil
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("add", cmdAdd, `
|
||||
usage: tuf add [--expires=<days>] [--custom=<data>] [<path>...]
|
||||
|
||||
Add target file(s).
|
||||
|
||||
Options:
|
||||
--expires=<days> Set the targets manifest to expire <days> days from now.
|
||||
--custom=<data> Set custom JSON data for the target(s).
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdAdd(args *docopt.Args, repo *tuf.Repo) error {
|
||||
var custom json.RawMessage
|
||||
if c := args.String["--custom"]; c != "" {
|
||||
custom = json.RawMessage(c)
|
||||
}
|
||||
paths := args.All["<path>"].([]string)
|
||||
if arg := args.String["--expires"]; arg != "" {
|
||||
expires, err := parseExpires(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.AddTargetsWithExpires(paths, custom, expires)
|
||||
}
|
||||
return repo.AddTargets(paths, custom)
|
||||
}
|
18
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/clean.go
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/clean.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("clean", cmdClean, `
|
||||
usage: tuf clean
|
||||
|
||||
Remove all staged manifests.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdClean(args *docopt.Args, repo *tuf.Repo) error {
|
||||
return repo.Clean()
|
||||
}
|
18
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/commit.go
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/commit.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("commit", cmdCommit, `
|
||||
usage: tuf commit
|
||||
|
||||
Commit staged files to the repository.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdCommit(args *docopt.Args, repo *tuf.Repo) error {
|
||||
return repo.Commit()
|
||||
}
|
43
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/gen_key.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/gen_key.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("gen-key", cmdGenKey, `
|
||||
usage: tuf gen-key [--expires=<days>] <role>
|
||||
|
||||
Generate a new signing key for the given role.
|
||||
|
||||
The key will be serialized to JSON and written to the "keys" directory with
|
||||
filename pattern "ROLE-KEYID.json". The root manifest will also be staged
|
||||
with the addition of the key's ID to the role's list of key IDs.
|
||||
|
||||
Options:
|
||||
--expires=<days> Set the root manifest to expire <days> days from now.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdGenKey(args *docopt.Args, repo *tuf.Repo) error {
|
||||
role := args.String["<role>"]
|
||||
var id string
|
||||
var err error
|
||||
if arg := args.String["--expires"]; arg != "" {
|
||||
expires, err := parseExpires(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err = repo.GenKeyWithExpires(role, expires)
|
||||
} else {
|
||||
id, err = repo.GenKey(role)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Generated", role, "key with ID", id)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("init", cmdInit, `
|
||||
usage: tuf init [--consistent-snapshot=false]
|
||||
|
||||
Initialize a new repository.
|
||||
|
||||
This is only required if the repository should not generate consistent
|
||||
snapshots (i.e. by passing "--consistent-snapshot=false"). If consistent
|
||||
snapshots should be generated, the repository will be implicitly
|
||||
initialized to do so when generating keys.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdInit(args *docopt.Args, repo *tuf.Repo) error {
|
||||
return repo.Init(args.String["--consistent-snapshot"] != "false")
|
||||
}
|
163
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/main.go
generated
vendored
Normal file
163
Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/main.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/flynn/go-tuf"
|
||||
"github.com/flynn/go-tuf/util"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
usage := `usage: tuf [-h|--help] [-d|--dir=<dir>] [--insecure-plaintext] <command> [<args>...]
|
||||
|
||||
Options:
|
||||
-h, --help
|
||||
-d <dir> The path to the repository (defaults to the current working directory)
|
||||
--insecure-plaintext Don't encrypt signing keys
|
||||
|
||||
Commands:
|
||||
help Show usage for a specific command
|
||||
gen-key Generate a new signing key for a specific manifest
|
||||
revoke-key Revoke a signing key
|
||||
add Add target file(s)
|
||||
remove Remove a target file
|
||||
snapshot Update the snapshot manifest
|
||||
timestamp Update the timestamp manifest
|
||||
sign Sign a manifest
|
||||
commit Commit staged files to the repository
|
||||
regenerate Recreate the targets manifest
|
||||
clean Remove all staged manifests
|
||||
root-keys Output a JSON serialized array of root keys to STDOUT
|
||||
|
||||
See "tuf help <command>" for more information on a specific command
|
||||
`
|
||||
|
||||
args, _ := docopt.Parse(usage, nil, true, "", true)
|
||||
cmd := args.String["<command>"]
|
||||
cmdArgs := args.All["<args>"].([]string)
|
||||
|
||||
if cmd == "help" {
|
||||
if len(cmdArgs) == 0 { // `tuf help`
|
||||
fmt.Println(usage)
|
||||
return
|
||||
} else { // `tuf help <command>`
|
||||
cmd = cmdArgs[0]
|
||||
cmdArgs = []string{"--help"}
|
||||
}
|
||||
}
|
||||
|
||||
dir, ok := args.String["-d"]
|
||||
if !ok {
|
||||
dir = args.String["--dir"]
|
||||
}
|
||||
if dir == "" {
|
||||
var err error
|
||||
dir, err = os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := runCommand(cmd, cmdArgs, dir, args.Bool["--insecure-plaintext"]); err != nil {
|
||||
log.Fatalln("ERROR:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type cmdFunc func(*docopt.Args, *tuf.Repo) error
|
||||
|
||||
type command struct {
|
||||
usage string
|
||||
f cmdFunc
|
||||
}
|
||||
|
||||
var commands = make(map[string]*command)
|
||||
|
||||
func register(name string, f cmdFunc, usage string) {
|
||||
commands[name] = &command{usage: usage, f: f}
|
||||
}
|
||||
|
||||
func runCommand(name string, args []string, dir string, insecure bool) error {
|
||||
argv := make([]string, 1, 1+len(args))
|
||||
argv[0] = name
|
||||
argv = append(argv, args...)
|
||||
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s is not a tuf command. See 'tuf help'", name)
|
||||
}
|
||||
|
||||
parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var p util.PassphraseFunc
|
||||
if !insecure {
|
||||
p = getPassphrase
|
||||
}
|
||||
repo, err := tuf.NewRepo(tuf.FileSystemStore(dir, p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.f(parsedArgs, repo)
|
||||
}
|
||||
|
||||
func parseExpires(arg string) (time.Time, error) {
|
||||
days, err := strconv.Atoi(arg)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to parse --expires arg: %s", err)
|
||||
}
|
||||
return time.Now().AddDate(0, 0, days).UTC(), nil
|
||||
}
|
||||
|
||||
func getPassphrase(role string, confirm bool) ([]byte, error) {
|
||||
if pass := os.Getenv(fmt.Sprintf("TUF_%s_PASSPHRASE", strings.ToUpper(role))); pass != "" {
|
||||
return []byte(pass), nil
|
||||
}
|
||||
|
||||
state, err := term.SaveState(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
term.DisableEcho(0, state)
|
||||
defer term.RestoreTerminal(0, state)
|
||||
|
||||
stdin := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Printf("Enter %s keys passphrase: ", role)
|
||||
passphrase, err := stdin.ReadBytes('\n')
|
||||
fmt.Println()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passphrase = passphrase[0 : len(passphrase)-1]
|
||||
|
||||
if !confirm {
|
||||
return passphrase, nil
|
||||
}
|
||||
|
||||
fmt.Printf("Repeat %s keys passphrase: ", role)
|
||||
confirmation, err := stdin.ReadBytes('\n')
|
||||
fmt.Println()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
confirmation = confirmation[0 : len(confirmation)-1]
|
||||
|
||||
if !bytes.Equal(passphrase, confirmation) {
|
||||
return nil, errors.New("The entered passphrases do not match")
|
||||
}
|
||||
return passphrase, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue