mirror of https://github.com/docker/docs.git
rewriting imports to 'gotuf', adding config to set log level, making
restart/shutdown cleanup connections after timeout, updating godeps
This commit is contained in:
parent
953d79888a
commit
cc0782d3d2
|
@ -6,9 +6,13 @@
|
|||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "code.google.com/p/gosqlite/sqlite3",
|
||||
"Comment": "null-16",
|
||||
"Rev": "74691fb6f83716190870cde1b658538dd4b18eb0"
|
||||
"ImportPath": "code.google.com/p/go-uuid/uuid",
|
||||
"Comment": "null-15",
|
||||
"Rev": "35bc42037350f0078e3c974c6ea690f1926603ab"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/agl/ed25519",
|
||||
|
@ -18,6 +22,21 @@
|
|||
"ImportPath": "github.com/bradfitz/http2",
|
||||
"Rev": "97124afb234048ae0c91b8883c59fcd890bf8145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/context",
|
||||
"Comment": "v2.0.0-228-gb230183",
|
||||
"Rev": "b230183b0fe8b8ed3c9ae2898c47c8c8618dc80f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/registry/auth",
|
||||
"Comment": "v2.0.0-228-gb230183",
|
||||
"Rev": "b230183b0fe8b8ed3c9ae2898c47c8c8618dc80f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/uuid",
|
||||
"Comment": "v2.0.0-228-gb230183",
|
||||
"Rev": "b230183b0fe8b8ed3c9ae2898c47c8c8618dc80f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/libtrust",
|
||||
"Rev": "fa567046d9b14f6aa788882a950d69651d230b21"
|
||||
|
@ -27,8 +46,20 @@
|
|||
"Rev": "61b53384b24bfa83e8e0a5f11f28ae83457fd80c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/go-tuf",
|
||||
"Rev": "73f8528774f93cfe707851ab2ff7503a8756ff4b"
|
||||
"ImportPath": "github.com/endophage/gotuf/data",
|
||||
"Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/gotuf/errors",
|
||||
"Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/gotuf/keys",
|
||||
"Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/endophage/gotuf/signed",
|
||||
"Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
|
@ -42,34 +73,10 @@
|
|||
"ImportPath": "github.com/gorilla/mux",
|
||||
"Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tent/canonical-json-go",
|
||||
"Rev": "96e4ba3a7613a1216cbd1badca4efe382adea337"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/poly1305",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974"
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
Flynn is a trademark of Prime Directive, Inc.
|
||||
|
||||
Copyright (c) 2014-2015 Prime Directive, Inc. All rights reserved.
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -12,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Prime Directive, Inc. nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) UUID {
|
||||
uuid := NewUUID()
|
||||
if uuid != nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCEPerson(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() UUID {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCEGroup(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() UUID {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID or false.
|
||||
func (uuid UUID) Domain() (Domain, bool) {
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
return 0, false
|
||||
}
|
||||
return Domain(uuid[9]), true
|
||||
}
|
||||
|
||||
// Id returns the id for a Version 2 UUID or false.
|
||||
func (uuid UUID) Id() (uint32, bool) {
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
return 0, false
|
||||
}
|
||||
return binary.BigEndian.Uint32(uuid[0:4]), true
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The uuid package generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
|
||||
package uuid
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known Name Space IDs and UUIDs
|
||||
var (
|
||||
NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
|
||||
NIL = Parse("00000000-0000-0000-0000-000000000000")
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID dervied from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space)
|
||||
h.Write([]byte(data))
|
||||
s := h.Sum(nil)
|
||||
uuid := make([]byte, 16)
|
||||
copy(uuid, s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data.
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data.
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "errors"
|
||||
|
||||
func (u UUID) MarshalJSON() ([]byte, error) {
|
||||
if len(u) == 0 {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
return []byte(`"` + u.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (u *UUID) UnmarshalJSON(data []byte) error {
|
||||
if len(data) == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
uu := Parse(string(data))
|
||||
if uu == nil {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
*u = uu
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
type S struct {
|
||||
ID1 UUID
|
||||
ID2 UUID
|
||||
}
|
||||
s1 := S{ID1: testUUID}
|
||||
data, err := json.Marshal(&s1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var s2 S
|
||||
if err := json.Unmarshal(data, &s2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(&s1, &s2) {
|
||||
t.Errorf("got %#v, want %#v", s2, s1)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var (
|
||||
interfaces []net.Interface // cached list of interfaces
|
||||
ifname string // name of interface being used
|
||||
nodeID []byte // hardware for version 1 UUIDs
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil && name != "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
if setNodeID(ifs.HardwareAddr) {
|
||||
ifname = ifs.Name
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
if nodeID == nil {
|
||||
nodeID = make([]byte, 6)
|
||||
}
|
||||
randomBits(nodeID)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
if nodeID == nil {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
nid := make([]byte, 6)
|
||||
copy(nid, nodeID)
|
||||
return nid
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if setNodeID(id) {
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
if nodeID == nil {
|
||||
nodeID = make([]byte, 6)
|
||||
}
|
||||
copy(nodeID, id)
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
if len(uuid) != 16 {
|
||||
return nil
|
||||
}
|
||||
node := make([]byte, 6)
|
||||
copy(node, uuid[10:])
|
||||
return node
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This test is only run when --regressions is passed on the go test line.
|
||||
var regressions = flag.Bool("regressions", false, "run uuid regression tests")
|
||||
|
||||
// TestClockSeqRace tests for a particular race condition of returning two
|
||||
// identical Version1 UUIDs. The duration of 1 minute was chosen as the race
|
||||
// condition, before being fixed, nearly always occured in under 30 seconds.
|
||||
func TestClockSeqRace(t *testing.T) {
|
||||
if !*regressions {
|
||||
t.Skip("skipping regression tests")
|
||||
}
|
||||
duration := time.Minute
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
ch := make(chan UUID, 10000)
|
||||
ncpu := runtime.NumCPU()
|
||||
switch ncpu {
|
||||
case 0, 1:
|
||||
// We can't run the test effectively.
|
||||
t.Skip("skipping race test, only one CPU detected")
|
||||
return
|
||||
default:
|
||||
runtime.GOMAXPROCS(ncpu)
|
||||
}
|
||||
for i := 0; i < ncpu; i++ {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case ch <- NewUUID():
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
uuids := make(map[string]bool)
|
||||
cnt := 0
|
||||
start := time.Now()
|
||||
for u := range ch {
|
||||
s := u.String()
|
||||
if uuids[s] {
|
||||
t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
|
||||
return
|
||||
}
|
||||
uuids[s] = true
|
||||
if time.Since(start) > duration {
|
||||
return
|
||||
}
|
||||
cnt++
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clock_seq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clock_seq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clock_seq, nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence a new random
|
||||
// clock sequence is generated the first time a clock sequence is requested by
|
||||
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
|
||||
// for
|
||||
func ClockSequence() int {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clock_seq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clock_seq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
old_seq := clock_seq
|
||||
clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if old_seq != clock_seq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. It returns false if uuid is not valid. The time is only well defined
|
||||
// for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) Time() (Time, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
return Time(time), true
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid. It returns false
|
||||
// if uuid is not valid. The clock sequence is only well defined for version 1
|
||||
// and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() (int, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = []byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts the the first two hex bytes of x into a byte.
|
||||
func xtob(x string) (byte, bool) {
|
||||
b1 := xvalues[x[0]]
|
||||
b2 := xvalues[x[1]]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID []byte
|
||||
|
||||
// A Version represents a UUIDs version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUIDs variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
|
||||
// New returns a new random (version 4) UUID as a string. It is a convenience
|
||||
// function for NewRandom().String().
|
||||
func New() string {
|
||||
return NewRandom().String()
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns nil. Both the UUID form of
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
|
||||
func Parse(s string) UUID {
|
||||
if len(s) == 36+9 {
|
||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
||||
return nil
|
||||
}
|
||||
s = s[9:]
|
||||
} else if len(s) != 36 {
|
||||
return nil
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return nil
|
||||
}
|
||||
uuid := make([]byte, 16)
|
||||
for i, x := range []int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
if v, ok := xtob(s[x:]); !ok {
|
||||
return nil
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// Equal returns true if uuid1 and uuid2 are equal.
|
||||
func Equal(uuid1, uuid2 UUID) bool {
|
||||
return bytes.Equal(uuid1, uuid2)
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid. It returns Invalid if
|
||||
// uuid is invalid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
if len(uuid) != 16 {
|
||||
return Invalid
|
||||
}
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Version returns the verison of uuid. It returns false if uuid is not
|
||||
// valid.
|
||||
func (uuid UUID) Version() (Version, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
return Version(uuid[6] >> 4), true
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implents io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
390
Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
generated
vendored
Normal file
390
Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
generated
vendored
Normal file
|
@ -0,0 +1,390 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type test struct {
|
||||
in string
|
||||
version Version
|
||||
variant Variant
|
||||
isuuid bool
|
||||
}
|
||||
|
||||
var tests = []test{
|
||||
{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
|
||||
{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
|
||||
{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
|
||||
{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
|
||||
{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
|
||||
{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
|
||||
{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
|
||||
{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
|
||||
{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
|
||||
{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
|
||||
{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
|
||||
{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
|
||||
{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
|
||||
{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
|
||||
|
||||
{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
|
||||
{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
|
||||
{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
|
||||
{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
|
||||
|
||||
{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
|
||||
{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
}
|
||||
|
||||
var constants = []struct {
|
||||
c interface{}
|
||||
name string
|
||||
}{
|
||||
{Person, "Person"},
|
||||
{Group, "Group"},
|
||||
{Org, "Org"},
|
||||
{Invalid, "Invalid"},
|
||||
{RFC4122, "RFC4122"},
|
||||
{Reserved, "Reserved"},
|
||||
{Microsoft, "Microsoft"},
|
||||
{Future, "Future"},
|
||||
{Domain(17), "Domain17"},
|
||||
{Variant(42), "BadVariant42"},
|
||||
}
|
||||
|
||||
func testTest(t *testing.T, in string, tt test) {
|
||||
uuid := Parse(in)
|
||||
if ok := (uuid != nil); ok != tt.isuuid {
|
||||
t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
|
||||
}
|
||||
if uuid == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := uuid.Variant(); v != tt.variant {
|
||||
t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
|
||||
}
|
||||
if v, _ := uuid.Version(); v != tt.version {
|
||||
t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUUID(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
testTest(t, tt.in, tt)
|
||||
testTest(t, strings.ToUpper(tt.in), tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstants(t *testing.T) {
|
||||
for x, tt := range constants {
|
||||
v, ok := tt.c.(fmt.Stringer)
|
||||
if !ok {
|
||||
t.Errorf("%x: %v: not a stringer", x, v)
|
||||
} else if s := v.String(); s != tt.name {
|
||||
v, _ := tt.c.(int)
|
||||
t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomUUID(t *testing.T) {
|
||||
m := make(map[string]bool)
|
||||
for x := 1; x < 32; x++ {
|
||||
uuid := NewRandom()
|
||||
s := uuid.String()
|
||||
if m[s] {
|
||||
t.Errorf("NewRandom returned duplicated UUID %s\n", s)
|
||||
}
|
||||
m[s] = true
|
||||
if v, _ := uuid.Version(); v != 4 {
|
||||
t.Errorf("Random UUID of version %s\n", v)
|
||||
}
|
||||
if uuid.Variant() != RFC4122 {
|
||||
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
m := make(map[string]bool)
|
||||
for x := 1; x < 32; x++ {
|
||||
s := New()
|
||||
if m[s] {
|
||||
t.Errorf("New returned duplicated UUID %s\n", s)
|
||||
}
|
||||
m[s] = true
|
||||
uuid := Parse(s)
|
||||
if uuid == nil {
|
||||
t.Errorf("New returned %q which does not decode\n", s)
|
||||
continue
|
||||
}
|
||||
if v, _ := uuid.Version(); v != 4 {
|
||||
t.Errorf("Random UUID of version %s\n", v)
|
||||
}
|
||||
if uuid.Variant() != RFC4122 {
|
||||
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clockSeq(t *testing.T, uuid UUID) int {
|
||||
seq, ok := uuid.ClockSequence()
|
||||
if !ok {
|
||||
t.Fatalf("%s: invalid clock sequence\n", uuid)
|
||||
}
|
||||
return seq
|
||||
}
|
||||
|
||||
func TestClockSeq(t *testing.T) {
|
||||
// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
|
||||
defer func(orig func() time.Time) { timeNow = orig }(timeNow)
|
||||
monTime := time.Now()
|
||||
timeNow = func() time.Time {
|
||||
monTime = monTime.Add(1 * time.Second)
|
||||
return monTime
|
||||
}
|
||||
|
||||
SetClockSequence(-1)
|
||||
uuid1 := NewUUID()
|
||||
uuid2 := NewUUID()
|
||||
|
||||
if clockSeq(t, uuid1) != clockSeq(t, uuid2) {
|
||||
t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2))
|
||||
}
|
||||
|
||||
SetClockSequence(-1)
|
||||
uuid2 = NewUUID()
|
||||
|
||||
// Just on the very off chance we generated the same sequence
|
||||
// two times we try again.
|
||||
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
|
||||
SetClockSequence(-1)
|
||||
uuid2 = NewUUID()
|
||||
}
|
||||
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
|
||||
t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1))
|
||||
}
|
||||
|
||||
SetClockSequence(0x1234)
|
||||
uuid1 = NewUUID()
|
||||
if seq := clockSeq(t, uuid1); seq != 0x1234 {
|
||||
t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoding(t *testing.T) {
|
||||
text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
|
||||
urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
|
||||
data := UUID{
|
||||
0x7d, 0x44, 0x48, 0x40,
|
||||
0x9d, 0xc0,
|
||||
0x11, 0xd1,
|
||||
0xb2, 0x45,
|
||||
0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
|
||||
}
|
||||
if v := data.String(); v != text {
|
||||
t.Errorf("%x: encoded to %s, expected %s\n", data, v, text)
|
||||
}
|
||||
if v := data.URN(); v != urn {
|
||||
t.Errorf("%x: urn is %s, expected %s\n", data, v, urn)
|
||||
}
|
||||
|
||||
uuid := Parse(text)
|
||||
if !Equal(uuid, data) {
|
||||
t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersion1(t *testing.T) {
|
||||
uuid1 := NewUUID()
|
||||
uuid2 := NewUUID()
|
||||
|
||||
if Equal(uuid1, uuid2) {
|
||||
t.Errorf("%s:duplicate uuid\n", uuid1)
|
||||
}
|
||||
if v, _ := uuid1.Version(); v != 1 {
|
||||
t.Errorf("%s: version %s expected 1\n", uuid1, v)
|
||||
}
|
||||
if v, _ := uuid2.Version(); v != 1 {
|
||||
t.Errorf("%s: version %s expected 1\n", uuid2, v)
|
||||
}
|
||||
n1 := uuid1.NodeID()
|
||||
n2 := uuid2.NodeID()
|
||||
if !bytes.Equal(n1, n2) {
|
||||
t.Errorf("Different nodes %x != %x\n", n1, n2)
|
||||
}
|
||||
t1, ok := uuid1.Time()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid time\n", uuid1)
|
||||
}
|
||||
t2, ok := uuid2.Time()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid time\n", uuid2)
|
||||
}
|
||||
q1, ok := uuid1.ClockSequence()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid clock sequence\n", uuid1)
|
||||
}
|
||||
q2, ok := uuid2.ClockSequence()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid clock sequence", uuid2)
|
||||
}
|
||||
|
||||
switch {
|
||||
case t1 == t2 && q1 == q2:
|
||||
t.Errorf("time stopped\n")
|
||||
case t1 > t2 && q1 == q2:
|
||||
t.Errorf("time reversed\n")
|
||||
case t1 < t2 && q1 != q2:
|
||||
t.Errorf("clock sequence chaned unexpectedly\n")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAndTime(t *testing.T) {
|
||||
// Time is February 5, 1998 12:30:23.136364800 AM GMT
|
||||
|
||||
uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
|
||||
node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
|
||||
|
||||
ts, ok := uuid.Time()
|
||||
if ok {
|
||||
c := time.Unix(ts.UnixTime())
|
||||
want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
|
||||
if !c.Equal(want) {
|
||||
t.Errorf("Got time %v, want %v", c, want)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("%s: bad time\n", uuid)
|
||||
}
|
||||
if !bytes.Equal(node, uuid.NodeID()) {
|
||||
t.Errorf("Expected node %v got %v\n", node, uuid.NodeID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMD5(t *testing.T) {
|
||||
uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String()
|
||||
want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
|
||||
if uuid != want {
|
||||
t.Errorf("MD5: got %q expected %q\n", uuid, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSHA1(t *testing.T) {
|
||||
uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String()
|
||||
want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
|
||||
if uuid != want {
|
||||
t.Errorf("SHA1: got %q expected %q\n", uuid, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeID(t *testing.T) {
|
||||
nid := []byte{1, 2, 3, 4, 5, 6}
|
||||
SetNodeInterface("")
|
||||
s := NodeInterface()
|
||||
if s == "" || s == "user" {
|
||||
t.Errorf("NodeInterface %q after SetInteface\n", s)
|
||||
}
|
||||
node1 := NodeID()
|
||||
if node1 == nil {
|
||||
t.Errorf("NodeID nil after SetNodeInterface\n", s)
|
||||
}
|
||||
SetNodeID(nid)
|
||||
s = NodeInterface()
|
||||
if s != "user" {
|
||||
t.Errorf("Expected NodeInterface %q got %q\n", "user", s)
|
||||
}
|
||||
node2 := NodeID()
|
||||
if node2 == nil {
|
||||
t.Errorf("NodeID nil after SetNodeID\n", s)
|
||||
}
|
||||
if bytes.Equal(node1, node2) {
|
||||
t.Errorf("NodeID not changed after SetNodeID\n", s)
|
||||
} else if !bytes.Equal(nid, node2) {
|
||||
t.Errorf("NodeID is %x, expected %x\n", node2, nid)
|
||||
}
|
||||
}
|
||||
|
||||
func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) {
|
||||
if uuid == nil {
|
||||
t.Errorf("%s failed\n", name)
|
||||
return
|
||||
}
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v)
|
||||
return
|
||||
}
|
||||
if v, ok := uuid.Domain(); !ok || v != domain {
|
||||
if !ok {
|
||||
t.Errorf("%s: %d: Domain failed\n", name, uuid)
|
||||
} else {
|
||||
t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v)
|
||||
}
|
||||
}
|
||||
if v, ok := uuid.Id(); !ok || v != id {
|
||||
if !ok {
|
||||
t.Errorf("%s: %d: Id failed\n", name, uuid)
|
||||
} else {
|
||||
t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDCE(t *testing.T) {
|
||||
testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678)
|
||||
testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid()))
|
||||
testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
type badRand struct{}
|
||||
|
||||
func (r badRand) Read(buf []byte) (int, error) {
|
||||
for i, _ := range buf {
|
||||
buf[i] = byte(i)
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func TestBadRand(t *testing.T) {
|
||||
SetRand(badRand{})
|
||||
uuid1 := New()
|
||||
uuid2 := New()
|
||||
if uuid1 != uuid2 {
|
||||
t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2)
|
||||
}
|
||||
SetRand(nil)
|
||||
uuid1 = New()
|
||||
uuid2 = New()
|
||||
if uuid1 == uuid2 {
|
||||
t.Errorf("unexecpted duplicates, got %q\n", uuid1)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil.
|
||||
func NewUUID() UUID {
|
||||
if nodeID == nil {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
uuid := make([]byte, 16)
|
||||
|
||||
time_low := uint32(now & 0xffffffff)
|
||||
time_mid := uint16((now >> 32) & 0xffff)
|
||||
time_hi := uint16((now >> 48) & 0x0fff)
|
||||
time_hi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], time_low)
|
||||
binary.BigEndian.PutUint16(uuid[4:], time_mid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], time_hi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
copy(uuid[10:], nodeID)
|
||||
|
||||
return uuid
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
// Random returns a Random (Version 4) UUID or panics.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// A note about uniqueness derived from from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() UUID {
|
||||
uuid := make([]byte, 16)
|
||||
randomBits([]byte(uuid))
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid
|
||||
}
|
|
@ -1,498 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sqlite3 provides access to the SQLite library, version 3.
|
||||
//
|
||||
// The package has no exported API.
|
||||
// It registers a driver for the standard Go database/sql package.
|
||||
//
|
||||
// import _ "code.google.com/p/gosqlite/sqlite3"
|
||||
//
|
||||
// (For an alternate, earlier API, see the code.google.com/p/gosqlite/sqlite package.)
|
||||
package sqlite
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lsqlite3
|
||||
|
||||
#include <sqlite3.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
// These wrappers are necessary because SQLITE_TRANSIENT
|
||||
// is a pointer constant, and cgo doesn't translate them correctly.
|
||||
// The definition in sqlite3.h is:
|
||||
//
|
||||
// typedef void (*sqlite3_destructor_type)(void*);
|
||||
// #define SQLITE_STATIC ((sqlite3_destructor_type)0)
|
||||
// #define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1)
|
||||
|
||||
static int my_bind_text(sqlite3_stmt *stmt, int n, char *p, int np) {
|
||||
return sqlite3_bind_text(stmt, n, p, np, SQLITE_TRANSIENT);
|
||||
}
|
||||
static int my_bind_blob(sqlite3_stmt *stmt, int n, void *p, int np) {
|
||||
return sqlite3_bind_blob(stmt, n, p, np, SQLITE_TRANSIENT);
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sql.Register("sqlite3", impl{})
|
||||
}
|
||||
|
||||
type errno int
|
||||
|
||||
func (e errno) Error() string {
|
||||
s := errText[e]
|
||||
if s == "" {
|
||||
return fmt.Sprintf("errno %d", int(e))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
errError error = errno(1) // /* SQL error or missing database */
|
||||
errInternal error = errno(2) // /* Internal logic error in SQLite */
|
||||
errPerm error = errno(3) // /* Access permission denied */
|
||||
errAbort error = errno(4) // /* Callback routine requested an abort */
|
||||
errBusy error = errno(5) // /* The database file is locked */
|
||||
errLocked error = errno(6) // /* A table in the database is locked */
|
||||
errNoMem error = errno(7) // /* A malloc() failed */
|
||||
errReadOnly error = errno(8) // /* Attempt to write a readonly database */
|
||||
errInterrupt error = errno(9) // /* Operation terminated by sqlite3_interrupt()*/
|
||||
errIOErr error = errno(10) // /* Some kind of disk I/O error occurred */
|
||||
errCorrupt error = errno(11) // /* The database disk image is malformed */
|
||||
errFull error = errno(13) // /* Insertion failed because database is full */
|
||||
errCantOpen error = errno(14) // /* Unable to open the database file */
|
||||
errEmpty error = errno(16) // /* Database is empty */
|
||||
errSchema error = errno(17) // /* The database schema changed */
|
||||
errTooBig error = errno(18) // /* String or BLOB exceeds size limit */
|
||||
errConstraint error = errno(19) // /* Abort due to constraint violation */
|
||||
errMismatch error = errno(20) // /* Data type mismatch */
|
||||
errMisuse error = errno(21) // /* Library used incorrectly */
|
||||
errNolfs error = errno(22) // /* Uses OS features not supported on host */
|
||||
errAuth error = errno(23) // /* Authorization denied */
|
||||
errFormat error = errno(24) // /* Auxiliary database format error */
|
||||
errRange error = errno(25) // /* 2nd parameter to sqlite3_bind out of range */
|
||||
errNotDB error = errno(26) // /* File opened that is not a database file */
|
||||
stepRow = errno(100) // /* sqlite3_step() has another row ready */
|
||||
stepDone = errno(101) // /* sqlite3_step() has finished executing */
|
||||
)
|
||||
|
||||
var errText = map[errno]string{
|
||||
1: "SQL error or missing database",
|
||||
2: "Internal logic error in SQLite",
|
||||
3: "Access permission denied",
|
||||
4: "Callback routine requested an abort",
|
||||
5: "The database file is locked",
|
||||
6: "A table in the database is locked",
|
||||
7: "A malloc() failed",
|
||||
8: "Attempt to write a readonly database",
|
||||
9: "Operation terminated by sqlite3_interrupt()*/",
|
||||
10: "Some kind of disk I/O error occurred",
|
||||
11: "The database disk image is malformed",
|
||||
12: "NOT USED. Table or record not found",
|
||||
13: "Insertion failed because database is full",
|
||||
14: "Unable to open the database file",
|
||||
15: "NOT USED. Database lock protocol error",
|
||||
16: "Database is empty",
|
||||
17: "The database schema changed",
|
||||
18: "String or BLOB exceeds size limit",
|
||||
19: "Abort due to constraint violation",
|
||||
20: "Data type mismatch",
|
||||
21: "Library used incorrectly",
|
||||
22: "Uses OS features not supported on host",
|
||||
23: "Authorization denied",
|
||||
24: "Auxiliary database format error",
|
||||
25: "2nd parameter to sqlite3_bind out of range",
|
||||
26: "File opened that is not a database file",
|
||||
100: "sqlite3_step() has another row ready",
|
||||
101: "sqlite3_step() has finished executing",
|
||||
}
|
||||
|
||||
type impl struct{}
|
||||
|
||||
func (impl) Open(name string) (driver.Conn, error) {
|
||||
if C.sqlite3_threadsafe() == 0 {
|
||||
return nil, errors.New("sqlite library was not compiled for thread-safe operation")
|
||||
}
|
||||
|
||||
var db *C.sqlite3
|
||||
cname := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cname))
|
||||
rv := C.sqlite3_open_v2(cname, &db,
|
||||
C.SQLITE_OPEN_FULLMUTEX|
|
||||
C.SQLITE_OPEN_READWRITE|
|
||||
C.SQLITE_OPEN_CREATE,
|
||||
nil)
|
||||
if rv != 0 {
|
||||
return nil, errno(rv)
|
||||
}
|
||||
if db == nil {
|
||||
return nil, errors.New("sqlite succeeded without returning a database")
|
||||
}
|
||||
return &conn{db: db}, nil
|
||||
}
|
||||
|
||||
type conn struct {
|
||||
db *C.sqlite3
|
||||
closed bool
|
||||
tx bool
|
||||
}
|
||||
|
||||
func (c *conn) error(rv C.int) error {
|
||||
if rv == 0 {
|
||||
return nil
|
||||
}
|
||||
if rv == 21 || c.closed {
|
||||
return errno(rv)
|
||||
}
|
||||
return errors.New(errno(rv).Error() + ": " + C.GoString(C.sqlite3_errmsg(c.db)))
|
||||
}
|
||||
|
||||
func (c *conn) Prepare(cmd string) (driver.Stmt, error) {
|
||||
if c.closed {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Prepare after Close")
|
||||
}
|
||||
cmdstr := C.CString(cmd)
|
||||
defer C.free(unsafe.Pointer(cmdstr))
|
||||
var s *C.sqlite3_stmt
|
||||
var tail *C.char
|
||||
rv := C.sqlite3_prepare_v2(c.db, cmdstr, C.int(len(cmd)+1), &s, &tail)
|
||||
if rv != 0 {
|
||||
return nil, c.error(rv)
|
||||
}
|
||||
return &stmt{c: c, stmt: s, sql: cmd, t0: time.Now()}, nil
|
||||
}
|
||||
|
||||
func (c *conn) Close() error {
|
||||
if c.closed {
|
||||
panic("database/sql/driver: misuse of sqlite driver: multiple Close")
|
||||
}
|
||||
c.closed = true
|
||||
rv := C.sqlite3_close(c.db)
|
||||
c.db = nil
|
||||
return c.error(rv)
|
||||
}
|
||||
|
||||
func (c *conn) exec(cmd string) error {
|
||||
cstring := C.CString(cmd)
|
||||
defer C.free(unsafe.Pointer(cstring))
|
||||
rv := C.sqlite3_exec(c.db, cstring, nil, nil, nil)
|
||||
return c.error(rv)
|
||||
}
|
||||
|
||||
func (c *conn) Begin() (driver.Tx, error) {
|
||||
if c.tx {
|
||||
panic("database/sql/driver: misuse of sqlite driver: multiple Tx")
|
||||
}
|
||||
if err := c.exec("BEGIN TRANSACTION"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.tx = true
|
||||
return &tx{c}, nil
|
||||
}
|
||||
|
||||
type tx struct {
|
||||
c *conn
|
||||
}
|
||||
|
||||
func (t *tx) Commit() error {
|
||||
if t.c == nil || !t.c.tx {
|
||||
panic("database/sql/driver: misuse of sqlite driver: extra Commit")
|
||||
}
|
||||
t.c.tx = false
|
||||
err := t.c.exec("COMMIT TRANSACTION")
|
||||
t.c = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *tx) Rollback() error {
|
||||
if t.c == nil || !t.c.tx {
|
||||
panic("database/sql/driver: misuse of sqlite driver: extra Rollback")
|
||||
}
|
||||
t.c.tx = false
|
||||
err := t.c.exec("ROLLBACK")
|
||||
t.c = nil
|
||||
return err
|
||||
}
|
||||
|
||||
type stmt struct {
|
||||
c *conn
|
||||
stmt *C.sqlite3_stmt
|
||||
err error
|
||||
t0 time.Time
|
||||
sql string
|
||||
args string
|
||||
closed bool
|
||||
rows bool
|
||||
colnames []string
|
||||
coltypes []string
|
||||
}
|
||||
|
||||
func (s *stmt) Close() error {
|
||||
if s.rows {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Close with active Rows")
|
||||
}
|
||||
if s.closed {
|
||||
panic("database/sql/driver: misuse of sqlite driver: double Close of Stmt")
|
||||
}
|
||||
s.closed = true
|
||||
rv := C.sqlite3_finalize(s.stmt)
|
||||
if rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stmt) NumInput() int {
|
||||
if s.closed {
|
||||
panic("database/sql/driver: misuse of sqlite driver: NumInput after Close")
|
||||
}
|
||||
return int(C.sqlite3_bind_parameter_count(s.stmt))
|
||||
}
|
||||
|
||||
func (s *stmt) reset() error {
|
||||
return s.c.error(C.sqlite3_reset(s.stmt))
|
||||
}
|
||||
|
||||
func (s *stmt) start(args []driver.Value) error {
|
||||
if err := s.reset(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := int(C.sqlite3_bind_parameter_count(s.stmt))
|
||||
if n != len(args) {
|
||||
return fmt.Errorf("incorrect argument count for command: have %d want %d", len(args), n)
|
||||
}
|
||||
|
||||
for i, v := range args {
|
||||
var str string
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
if rv := C.sqlite3_bind_null(s.stmt, C.int(i+1)); rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
continue
|
||||
|
||||
case float64:
|
||||
if rv := C.sqlite3_bind_double(s.stmt, C.int(i+1), C.double(v)); rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
continue
|
||||
|
||||
case int64:
|
||||
if rv := C.sqlite3_bind_int64(s.stmt, C.int(i+1), C.sqlite3_int64(v)); rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
continue
|
||||
|
||||
case []byte:
|
||||
var p *byte
|
||||
if len(v) > 0 {
|
||||
p = &v[0]
|
||||
}
|
||||
if rv := C.my_bind_blob(s.stmt, C.int(i+1), unsafe.Pointer(p), C.int(len(v))); rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
continue
|
||||
|
||||
case bool:
|
||||
var vi int64
|
||||
if v {
|
||||
vi = 1
|
||||
}
|
||||
if rv := C.sqlite3_bind_int64(s.stmt, C.int(i+1), C.sqlite3_int64(vi)); rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
continue
|
||||
|
||||
case time.Time:
|
||||
str = v.UTC().Format(timefmt[0])
|
||||
|
||||
case string:
|
||||
str = v
|
||||
|
||||
default:
|
||||
str = fmt.Sprint(v)
|
||||
}
|
||||
|
||||
cstr := C.CString(str)
|
||||
rv := C.my_bind_text(s.stmt, C.int(i+1), cstr, C.int(len(str)))
|
||||
C.free(unsafe.Pointer(cstr))
|
||||
if rv != 0 {
|
||||
return s.c.error(rv)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
if s.closed {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Exec after Close")
|
||||
}
|
||||
if s.rows {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Exec with active Rows")
|
||||
}
|
||||
|
||||
err := s.start(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rv := C.sqlite3_step(s.stmt)
|
||||
if errno(rv) != stepDone {
|
||||
if rv == 0 {
|
||||
rv = 21 // errMisuse
|
||||
}
|
||||
return nil, s.c.error(rv)
|
||||
}
|
||||
|
||||
id := int64(C.sqlite3_last_insert_rowid(s.c.db))
|
||||
rows := int64(C.sqlite3_changes(s.c.db))
|
||||
return &result{id, rows}, nil
|
||||
}
|
||||
|
||||
func (s *stmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
if s.closed {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Query after Close")
|
||||
}
|
||||
if s.rows {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Query with active Rows")
|
||||
}
|
||||
|
||||
err := s.start(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.rows = true
|
||||
if s.colnames == nil {
|
||||
n := int64(C.sqlite3_column_count(s.stmt))
|
||||
s.colnames = make([]string, n)
|
||||
s.coltypes = make([]string, n)
|
||||
for i := range s.colnames {
|
||||
s.colnames[i] = C.GoString(C.sqlite3_column_name(s.stmt, C.int(i)))
|
||||
s.coltypes[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(s.stmt, C.int(i))))
|
||||
}
|
||||
}
|
||||
return &rows{s}, nil
|
||||
}
|
||||
|
||||
type rows struct {
|
||||
s *stmt
|
||||
}
|
||||
|
||||
func (r *rows) Columns() []string {
|
||||
if r.s == nil {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Columns of closed Rows")
|
||||
}
|
||||
return r.s.colnames
|
||||
}
|
||||
|
||||
const maxslice = 1<<31 - 1
|
||||
|
||||
var timefmt = []string{
|
||||
"2006-01-02 15:04:05.999999999",
|
||||
"2006-01-02T15:04:05.999999999",
|
||||
"2006-01-02 15:04:05",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02 15:04",
|
||||
"2006-01-02T15:04",
|
||||
"2006-01-02",
|
||||
}
|
||||
|
||||
func (r *rows) Next(dst []driver.Value) error {
|
||||
if r.s == nil {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Next of closed Rows")
|
||||
}
|
||||
|
||||
rv := C.sqlite3_step(r.s.stmt)
|
||||
if errno(rv) != stepRow {
|
||||
if errno(rv) == stepDone {
|
||||
return io.EOF
|
||||
}
|
||||
if rv == 0 {
|
||||
rv = 21
|
||||
}
|
||||
return r.s.c.error(rv)
|
||||
}
|
||||
|
||||
for i := range dst {
|
||||
switch typ := C.sqlite3_column_type(r.s.stmt, C.int(i)); typ {
|
||||
default:
|
||||
return fmt.Errorf("unexpected sqlite3 column type %d", typ)
|
||||
case C.SQLITE_INTEGER:
|
||||
val := int64(C.sqlite3_column_int64(r.s.stmt, C.int(i)))
|
||||
switch r.s.coltypes[i] {
|
||||
case "timestamp", "datetime":
|
||||
dst[i] = time.Unix(val, 0).UTC()
|
||||
case "boolean":
|
||||
dst[i] = val > 0
|
||||
default:
|
||||
dst[i] = val
|
||||
}
|
||||
|
||||
case C.SQLITE_FLOAT:
|
||||
dst[i] = float64(C.sqlite3_column_double(r.s.stmt, C.int(i)))
|
||||
|
||||
case C.SQLITE_BLOB, C.SQLITE_TEXT:
|
||||
n := int(C.sqlite3_column_bytes(r.s.stmt, C.int(i)))
|
||||
var b []byte
|
||||
if n > 0 {
|
||||
p := C.sqlite3_column_blob(r.s.stmt, C.int(i))
|
||||
b = (*[maxslice]byte)(unsafe.Pointer(p))[:n]
|
||||
}
|
||||
dst[i] = b
|
||||
switch r.s.coltypes[i] {
|
||||
case "timestamp", "datetime":
|
||||
dst[i] = time.Time{}
|
||||
s := string(b)
|
||||
for _, f := range timefmt {
|
||||
if t, err := time.Parse(f, s); err == nil {
|
||||
dst[i] = t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case C.SQLITE_NULL:
|
||||
dst[i] = nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rows) Close() error {
|
||||
if r.s == nil {
|
||||
panic("database/sql/driver: misuse of sqlite driver: Close of closed Rows")
|
||||
}
|
||||
r.s.rows = false
|
||||
r.s = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type result struct {
|
||||
id int64
|
||||
rows int64
|
||||
}
|
||||
|
||||
func (r *result) LastInsertId() (int64, error) {
|
||||
return r.id, nil
|
||||
}
|
||||
|
||||
func (r *result) RowsAffected() (int64, error) {
|
||||
return r.rows, nil
|
||||
}
|
76
Godeps/_workspace/src/github.com/docker/distribution/context/context.go
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/docker/distribution/context/context.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/uuid"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Context is a copy of Context from the golang.org/x/net/context package.
|
||||
type Context interface {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// instanceContext is a context that provides only an instance id. It is
|
||||
// provided as the main background context.
|
||||
type instanceContext struct {
|
||||
Context
|
||||
id string // id of context, logged as "instance.id"
|
||||
}
|
||||
|
||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
||||
if key == "instance.id" {
|
||||
return ic.id
|
||||
}
|
||||
|
||||
return ic.Context.Value(key)
|
||||
}
|
||||
|
||||
var background = &instanceContext{
|
||||
Context: context.Background(),
|
||||
id: uuid.Generate().String(),
|
||||
}
|
||||
|
||||
// Background returns a non-nil, empty Context. The background context
|
||||
// provides a single key, "instance.id" that is globally unique to the
|
||||
// process.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val. Use context Values only for request-scoped data that transits processes
|
||||
// and APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
||||
// stringMapContext is a simple context implementation that checks a map for a
|
||||
// key, falling back to a parent if not present.
|
||||
type stringMapContext struct {
|
||||
context.Context
|
||||
m map[string]interface{}
|
||||
}
|
||||
|
||||
// WithValues returns a context that proxies lookups through a map. Only
|
||||
// supports string keys.
|
||||
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
|
||||
mo := make(map[string]interface{}, len(m)) // make our own copy.
|
||||
for k, v := range m {
|
||||
mo[k] = v
|
||||
}
|
||||
|
||||
return stringMapContext{
|
||||
Context: ctx,
|
||||
m: mo,
|
||||
}
|
||||
}
|
||||
|
||||
func (smc stringMapContext) Value(key interface{}) interface{} {
|
||||
if ks, ok := key.(string); ok {
|
||||
if v, ok := smc.m[ks]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return smc.Context.Value(key)
|
||||
}
|
76
Godeps/_workspace/src/github.com/docker/distribution/context/doc.go
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/docker/distribution/context/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Package context provides several utilities for working with
|
||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
||||
// logging relevent request information but this package is not limited to
|
||||
// that purpose.
|
||||
//
|
||||
// Logging
|
||||
//
|
||||
// The most useful aspect of this package is GetLogger. This function takes
|
||||
// any context.Context interface and returns the current logger from the
|
||||
// context. Canonical usage looks like this:
|
||||
//
|
||||
// GetLogger(ctx).Infof("something interesting happened")
|
||||
//
|
||||
// GetLogger also takes optional key arguments. The keys will be looked up in
|
||||
// the context and reported with the logger. The following example would
|
||||
// return a logger that prints the version with each log message:
|
||||
//
|
||||
// ctx := context.Context(context.Background(), "version", version)
|
||||
// GetLogger(ctx, "version").Infof("this log message has a version field")
|
||||
//
|
||||
// The above would print out a log message like this:
|
||||
//
|
||||
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// When used with WithLogger, we gain the ability to decorate the context with
|
||||
// loggers that have information from disparate parts of the call stack.
|
||||
// Following from the version example, we can build a new context with the
|
||||
// configured logger such that we always print the version field:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
//
|
||||
// Since the logger has been pushed to the context, we can now get the version
|
||||
// field for free with our log messages. Future calls to GetLogger on the new
|
||||
// context will have the version field:
|
||||
//
|
||||
// GetLogger(ctx).Infof("this log message has a version field")
|
||||
//
|
||||
// This becomes more powerful when we start stacking loggers. Let's say we
|
||||
// have the version logger from above but also want a request id. Using the
|
||||
// context above, in our request scoped function, we place another logger in
|
||||
// the context:
|
||||
//
|
||||
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
|
||||
//
|
||||
// When GetLogger is called on the new context, "http.request.id" will be
|
||||
// included as a logger field, along with the original "version" field:
|
||||
//
|
||||
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// Note that this only affects the new context, the previous context, with the
|
||||
// version field, can be used independently. Put another way, the new logger,
|
||||
// added to the request context, is unique to that context and can have
|
||||
// request scoped varaibles.
|
||||
//
|
||||
// HTTP Requests
|
||||
//
|
||||
// This package also contains several methods for working with http requests.
|
||||
// The concepts are very similar to those described above. We simply place the
|
||||
// request in the context using WithRequest. This makes the request variables
|
||||
// available. GetRequestLogger can then be called to get request specific
|
||||
// variables in a log line:
|
||||
//
|
||||
// ctx = WithRequest(ctx, req)
|
||||
// GetRequestLogger(ctx).Infof("request variables")
|
||||
//
|
||||
// Like above, if we want to include the request data in all log messages in
|
||||
// the context, we push the logger to a new context and use that one:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
|
||||
//
|
||||
// The concept is fairly powerful and ensures that calls throughout the stack
|
||||
// can be traced in log messages. Using the fields like "http.request.id", one
|
||||
// can analyze call flow for a particular request with a simple grep of the
|
||||
// logs.
|
||||
package context
|
336
Godeps/_workspace/src/github.com/docker/distribution/context/http.go
generated
vendored
Normal file
336
Godeps/_workspace/src/github.com/docker/distribution/context/http.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// Common errors used with this package.
|
||||
var (
|
||||
ErrNoRequestContext = errors.New("no http request in context")
|
||||
ErrNoResponseWriterContext = errors.New("no http response in context")
|
||||
)
|
||||
|
||||
func parseIP(ipStr string) net.IP {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
log.Warnf("invalid remote IP address: %q", ipStr)
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// RemoteAddr extracts the remote address of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteAddr(r *http.Request) string {
|
||||
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
|
||||
proxies := strings.Split(prior, ",")
|
||||
if len(proxies) > 0 {
|
||||
remoteAddr := strings.Trim(proxies[0], " ")
|
||||
if parseIP(remoteAddr) != nil {
|
||||
return remoteAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
// X-Real-Ip is less supported, but worth checking in the
|
||||
// absence of X-Forwarded-For
|
||||
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
|
||||
if parseIP(realIP) != nil {
|
||||
return realIP
|
||||
}
|
||||
}
|
||||
|
||||
return r.RemoteAddr
|
||||
}
|
||||
|
||||
// RemoteIP extracts the remote IP of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteIP(r *http.Request) string {
|
||||
addr := RemoteAddr(r)
|
||||
|
||||
// Try parsing it as "IP:port"
|
||||
if ip, _, err := net.SplitHostPort(addr); err == nil {
|
||||
return ip
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// WithRequest places the request on the context. The context of the request
|
||||
// is assigned a unique id, available at "http.request.id". The request itself
|
||||
// is available at "http.request". Other common attributes are available under
|
||||
// the prefix "http.request.". If a request is already present on the context,
|
||||
// this method will panic.
|
||||
func WithRequest(ctx Context, r *http.Request) Context {
|
||||
if ctx.Value("http.request") != nil {
|
||||
// NOTE(stevvooe): This needs to be considered a programming error. It
|
||||
// is unlikely that we'd want to have more than one request in
|
||||
// context.
|
||||
panic("only one request per context")
|
||||
}
|
||||
|
||||
return &httpRequestContext{
|
||||
Context: ctx,
|
||||
startedAt: time.Now(),
|
||||
id: uuid.Generate().String(),
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequest returns the http request in the given context. Returns
|
||||
// ErrNoRequestContext if the context does not have an http request associated
|
||||
// with it.
|
||||
func GetRequest(ctx Context) (*http.Request, error) {
|
||||
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
|
||||
return r, nil
|
||||
}
|
||||
return nil, ErrNoRequestContext
|
||||
}
|
||||
|
||||
// GetRequestID attempts to resolve the current request id, if possible. An
|
||||
// error is return if it is not available on the context.
|
||||
func GetRequestID(ctx Context) string {
|
||||
return GetStringValue(ctx, "http.request.id")
|
||||
}
|
||||
|
||||
// WithResponseWriter returns a new context and response writer that makes
|
||||
// interesting response statistics available within the context.
|
||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
||||
irw := &instrumentedResponseWriter{
|
||||
ResponseWriter: w,
|
||||
Context: ctx,
|
||||
}
|
||||
|
||||
return irw, irw
|
||||
}
|
||||
|
||||
// GetResponseWriter returns the http.ResponseWriter from the provided
|
||||
// context. If not present, ErrNoResponseWriterContext is returned. The
|
||||
// returned instance provides instrumentation in the context.
|
||||
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
|
||||
v := ctx.Value("http.response")
|
||||
|
||||
rw, ok := v.(http.ResponseWriter)
|
||||
if !ok || rw == nil {
|
||||
return nil, ErrNoResponseWriterContext
|
||||
}
|
||||
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
// getVarsFromRequest let's us change request vars implementation for testing
|
||||
// and maybe future changes.
|
||||
var getVarsFromRequest = mux.Vars
|
||||
|
||||
// WithVars extracts gorilla/mux vars and makes them available on the returned
|
||||
// context. Variables are available at keys with the prefix "vars.". For
|
||||
// example, if looking for the variable "name", it can be accessed as
|
||||
// "vars.name". Implementations that are accessing values need not know that
|
||||
// the underlying context is implemented with gorilla/mux vars.
|
||||
func WithVars(ctx Context, r *http.Request) Context {
|
||||
return &muxVarsContext{
|
||||
Context: ctx,
|
||||
vars: getVarsFromRequest(r),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestLogger returns a logger that contains fields from the request in
|
||||
// the current context. If the request is not available in the context, no
|
||||
// fields will display. Request loggers can safely be pushed onto the context.
|
||||
func GetRequestLogger(ctx Context) Logger {
|
||||
return GetLogger(ctx,
|
||||
"http.request.id",
|
||||
"http.request.method",
|
||||
"http.request.host",
|
||||
"http.request.uri",
|
||||
"http.request.referer",
|
||||
"http.request.useragent",
|
||||
"http.request.remoteaddr",
|
||||
"http.request.contenttype")
|
||||
}
|
||||
|
||||
// GetResponseLogger reads the current response stats and builds a logger.
|
||||
// Because the values are read at call time, pushing a logger returned from
|
||||
// this function on the context will lead to missing or invalid data. Only
|
||||
// call this at the end of a request, after the response has been written.
|
||||
func GetResponseLogger(ctx Context) Logger {
|
||||
l := getLogrusLogger(ctx,
|
||||
"http.response.written",
|
||||
"http.response.status",
|
||||
"http.response.contenttype")
|
||||
|
||||
duration := Since(ctx, "http.request.startedat")
|
||||
|
||||
if duration > 0 {
|
||||
l = l.WithField("http.response.duration", duration.String())
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// httpRequestContext makes information about a request available to context.
|
||||
type httpRequestContext struct {
|
||||
Context
|
||||
|
||||
startedAt time.Time
|
||||
id string
|
||||
r *http.Request
|
||||
}
|
||||
|
||||
// Value returns a keyed element of the request for use in the context. To get
|
||||
// the request itself, query "request". For other components, access them as
|
||||
// "request.<component>". For example, r.RequestURI
|
||||
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.request" {
|
||||
return ctx.r
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.request.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
switch parts[2] {
|
||||
case "uri":
|
||||
return ctx.r.RequestURI
|
||||
case "remoteaddr":
|
||||
return RemoteAddr(ctx.r)
|
||||
case "method":
|
||||
return ctx.r.Method
|
||||
case "host":
|
||||
return ctx.r.Host
|
||||
case "referer":
|
||||
referer := ctx.r.Referer()
|
||||
if referer != "" {
|
||||
return referer
|
||||
}
|
||||
case "useragent":
|
||||
return ctx.r.UserAgent()
|
||||
case "id":
|
||||
return ctx.id
|
||||
case "startedat":
|
||||
return ctx.startedAt
|
||||
case "contenttype":
|
||||
ct := ctx.r.Header.Get("Content-Type")
|
||||
if ct != "" {
|
||||
return ct
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
type muxVarsContext struct {
|
||||
Context
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "vars" {
|
||||
return ctx.vars
|
||||
}
|
||||
|
||||
if strings.HasPrefix(keyStr, "vars.") {
|
||||
keyStr = strings.TrimPrefix(keyStr, "vars.")
|
||||
}
|
||||
|
||||
if v, ok := ctx.vars[keyStr]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
// instrumentedResponseWriter provides response writer information in a
|
||||
// context.
|
||||
type instrumentedResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
Context
|
||||
|
||||
mu sync.Mutex
|
||||
status int
|
||||
written int64
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = irw.ResponseWriter.Write(p)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.written += int64(n)
|
||||
|
||||
// Guess the likely status if not set.
|
||||
if irw.status == 0 {
|
||||
irw.status = http.StatusOK
|
||||
}
|
||||
|
||||
irw.mu.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
|
||||
irw.ResponseWriter.WriteHeader(status)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.status = status
|
||||
irw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Flush() {
|
||||
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
irw.mu.Lock()
|
||||
defer irw.mu.Unlock()
|
||||
|
||||
switch parts[2] {
|
||||
case "written":
|
||||
return irw.written
|
||||
case "status":
|
||||
return irw.status
|
||||
case "contenttype":
|
||||
contentType := irw.Header().Get("Content-Type")
|
||||
if contentType != "" {
|
||||
return contentType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return irw.Context.Value(key)
|
||||
}
|
285
Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go
generated
vendored
Normal file
285
Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go
generated
vendored
Normal file
|
@ -0,0 +1,285 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestWithRequest(t *testing.T) {
|
||||
var req http.Request
|
||||
|
||||
start := time.Now()
|
||||
req.Method = "GET"
|
||||
req.Host = "example.com"
|
||||
req.RequestURI = "/test-test"
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Set("Referer", "foo.com/referer")
|
||||
req.Header.Set("User-Agent", "test/0.1")
|
||||
|
||||
ctx := WithRequest(Background(), &req)
|
||||
for _, testcase := range []struct {
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
key: "http.request",
|
||||
expected: &req,
|
||||
},
|
||||
{
|
||||
key: "http.request.id",
|
||||
},
|
||||
{
|
||||
key: "http.request.method",
|
||||
expected: req.Method,
|
||||
},
|
||||
{
|
||||
key: "http.request.host",
|
||||
expected: req.Host,
|
||||
},
|
||||
{
|
||||
key: "http.request.uri",
|
||||
expected: req.RequestURI,
|
||||
},
|
||||
{
|
||||
key: "http.request.referer",
|
||||
expected: req.Referer(),
|
||||
},
|
||||
{
|
||||
key: "http.request.useragent",
|
||||
expected: req.UserAgent(),
|
||||
},
|
||||
{
|
||||
key: "http.request.remoteaddr",
|
||||
expected: req.RemoteAddr,
|
||||
},
|
||||
{
|
||||
key: "http.request.startedat",
|
||||
},
|
||||
} {
|
||||
v := ctx.Value(testcase.key)
|
||||
|
||||
if v == nil {
|
||||
t.Fatalf("value not found for %q", testcase.key)
|
||||
}
|
||||
|
||||
if testcase.expected != nil && v != testcase.expected {
|
||||
t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected)
|
||||
}
|
||||
|
||||
// Key specific checks!
|
||||
switch testcase.key {
|
||||
case "http.request.id":
|
||||
if _, ok := v.(string); !ok {
|
||||
t.Fatalf("request id not a string: %v", v)
|
||||
}
|
||||
case "http.request.startedat":
|
||||
vt, ok := v.(time.Time)
|
||||
if !ok {
|
||||
t.Fatalf("value not a time: %v", v)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if vt.After(now) {
|
||||
t.Fatalf("time generated too late: %v > %v", vt, now)
|
||||
}
|
||||
|
||||
if vt.Before(start) {
|
||||
t.Fatalf("time generated too early: %v < %v", vt, start)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testResponseWriter struct {
|
||||
flushed bool
|
||||
status int
|
||||
written int64
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Header() http.Header {
|
||||
if trw.header == nil {
|
||||
trw.header = make(http.Header)
|
||||
}
|
||||
|
||||
return trw.header
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Write(p []byte) (n int, err error) {
|
||||
if trw.status == 0 {
|
||||
trw.status = http.StatusOK
|
||||
}
|
||||
|
||||
n = len(p)
|
||||
trw.written += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) WriteHeader(status int) {
|
||||
trw.status = status
|
||||
}
|
||||
|
||||
func (trw *testResponseWriter) Flush() {
|
||||
trw.flushed = true
|
||||
}
|
||||
|
||||
func TestWithResponseWriter(t *testing.T) {
|
||||
trw := testResponseWriter{}
|
||||
ctx, rw := WithResponseWriter(Background(), &trw)
|
||||
|
||||
if ctx.Value("http.response") != rw {
|
||||
t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw)
|
||||
}
|
||||
|
||||
grw, err := GetResponseWriter(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting response writer: %v", err)
|
||||
}
|
||||
|
||||
if grw != rw {
|
||||
t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.status") != 0 {
|
||||
t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status"))
|
||||
}
|
||||
|
||||
if n, err := rw.Write(make([]byte, 1024)); err != nil {
|
||||
t.Fatalf("unexpected error writing: %v", err)
|
||||
} else if n != 1024 {
|
||||
t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.status") != http.StatusOK {
|
||||
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK)
|
||||
}
|
||||
|
||||
if ctx.Value("http.response.written") != int64(1024) {
|
||||
t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024)
|
||||
}
|
||||
|
||||
// Make sure flush propagates
|
||||
rw.(http.Flusher).Flush()
|
||||
|
||||
if !trw.flushed {
|
||||
t.Fatalf("response writer not flushed")
|
||||
}
|
||||
|
||||
// Write another status and make sure context is correct. This normally
|
||||
// wouldn't work except for in this contrived testcase.
|
||||
rw.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
if ctx.Value("http.response.status") != http.StatusBadRequest {
|
||||
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithVars(t *testing.T) {
|
||||
var req http.Request
|
||||
vars := map[string]string{
|
||||
"foo": "asdf",
|
||||
"bar": "qwer",
|
||||
}
|
||||
|
||||
getVarsFromRequest = func(r *http.Request) map[string]string {
|
||||
if r != &req {
|
||||
t.Fatalf("unexpected request: %v != %v", r, req)
|
||||
}
|
||||
|
||||
return vars
|
||||
}
|
||||
|
||||
ctx := WithVars(Background(), &req)
|
||||
for _, testcase := range []struct {
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
key: "vars",
|
||||
expected: vars,
|
||||
},
|
||||
{
|
||||
key: "vars.foo",
|
||||
expected: "asdf",
|
||||
},
|
||||
{
|
||||
key: "vars.bar",
|
||||
expected: "qwer",
|
||||
},
|
||||
} {
|
||||
v := ctx.Value(testcase.key)
|
||||
|
||||
if !reflect.DeepEqual(v, testcase.expected) {
|
||||
t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test
|
||||
// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten
|
||||
// at the transport layer to 127.0.0.1:<port> . However, as the X-Forwarded-For header
|
||||
// just contains the IP address, it is different enough for testing.
|
||||
func TestRemoteAddr(t *testing.T) {
|
||||
var expectedRemote string
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.RemoteAddr == expectedRemote {
|
||||
t.Errorf("Unexpected matching remote addresses")
|
||||
}
|
||||
|
||||
actualRemote := RemoteAddr(r)
|
||||
if expectedRemote != actualRemote {
|
||||
t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote)
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
}))
|
||||
|
||||
defer backend.Close()
|
||||
backendURL, err := url.Parse(backend.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxy := httputil.NewSingleHostReverseProxy(backendURL)
|
||||
frontend := httptest.NewServer(proxy)
|
||||
defer frontend.Close()
|
||||
|
||||
// X-Forwarded-For set by proxy
|
||||
expectedRemote = "127.0.0.1"
|
||||
proxyReq, err := http.NewRequest("GET", frontend.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = http.DefaultClient.Do(proxyReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// RemoteAddr in X-Real-Ip
|
||||
getReq, err := http.NewRequest("GET", backend.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedRemote = "1.2.3.4"
|
||||
getReq.Header["X-Real-ip"] = []string{expectedRemote}
|
||||
_, err = http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Valid X-Real-Ip and invalid X-Forwarded-For
|
||||
getReq.Header["X-forwarded-for"] = []string{"1.2.3"}
|
||||
_, err = http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
108
Godeps/_workspace/src/github.com/docker/distribution/context/logger.go
generated
vendored
Normal file
108
Godeps/_workspace/src/github.com/docker/distribution/context/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Logger provides a leveled-logging interface.
|
||||
type Logger interface {
|
||||
// standard logger methods
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
|
||||
Panic(args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
|
||||
// Leveled methods, from logrus
|
||||
Debug(args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Debugln(args ...interface{})
|
||||
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
|
||||
Info(args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
|
||||
Warn(args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
}
|
||||
|
||||
// WithLogger creates a new context with provided logger.
|
||||
func WithLogger(ctx Context, logger Logger) Context {
|
||||
return WithValue(ctx, "logger", logger)
|
||||
}
|
||||
|
||||
// GetLoggerWithField returns a logger instance with the specified field key
|
||||
// and value without affecting the context. Extra specified keys will be
|
||||
// resolved from the context.
|
||||
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
|
||||
}
|
||||
|
||||
// GetLoggerWithFields returns a logger instance with the specified fields
|
||||
// without affecting the context. Extra specified keys will be resolved from
|
||||
// the context.
|
||||
func GetLoggerWithFields(ctx Context, fields map[string]interface{}, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...).WithFields(logrus.Fields(fields))
|
||||
}
|
||||
|
||||
// GetLogger returns the logger from the current context, if present. If one
|
||||
// or more keys are provided, they will be resolved on the context and
|
||||
// included in the logger. While context.Value takes an interface, any key
|
||||
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
|
||||
// a logging key field. If context keys are integer constants, for example,
|
||||
// its recommended that a String method is implemented.
|
||||
func GetLogger(ctx Context, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...)
|
||||
}
|
||||
|
||||
// GetLogrusLogger returns the logrus logger for the context. If one more keys
|
||||
// are provided, they will be resolved on the context and included in the
|
||||
// logger. Only use this function if specific logrus functionality is
|
||||
// required.
|
||||
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
||||
var logger *logrus.Entry
|
||||
|
||||
// Get a logger, if it is present.
|
||||
loggerInterface := ctx.Value("logger")
|
||||
if loggerInterface != nil {
|
||||
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
|
||||
logger = lgr
|
||||
}
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
// If no logger is found, just return the standard logger.
|
||||
logger = logrus.NewEntry(logrus.StandardLogger())
|
||||
}
|
||||
|
||||
fields := logrus.Fields{}
|
||||
|
||||
for _, key := range keys {
|
||||
v := ctx.Value(key)
|
||||
if v != nil {
|
||||
fields[fmt.Sprint(key)] = v
|
||||
}
|
||||
}
|
||||
|
||||
return logger.WithFields(fields)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// inject a logger into the uuid library.
|
||||
uuid.Loggerf = GetLogger(Background()).Warnf
|
||||
}
|
104
Godeps/_workspace/src/github.com/docker/distribution/context/trace.go
generated
vendored
Normal file
104
Godeps/_workspace/src/github.com/docker/distribution/context/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
)
|
||||
|
||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
||||
// caller to track the time between calling WithTrace and the returned done
|
||||
// function. When the done function is called, a log message is emitted with a
|
||||
// "trace.duration" field, corresponding to the elapased time and a
|
||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
||||
//
|
||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
||||
// dapper-like tracing. This function should be complemented with a WithSpan
|
||||
// method that could be used for tracing distributed RPC calls.
|
||||
//
|
||||
// The main benefit of this function is to post-process log messages or
|
||||
// intercept them in a hook to provide timing data. Trace ids and parent ids
|
||||
// can also be linked to provide call tracing, if so required.
|
||||
//
|
||||
// Here is an example of the usage:
|
||||
//
|
||||
// func timedOperation(ctx Context) {
|
||||
// ctx, done := WithTrace(ctx)
|
||||
// defer done("this will be the log message")
|
||||
// // ... function body ...
|
||||
// }
|
||||
//
|
||||
// If the function ran for roughly 1s, such a usage would emit a log message
|
||||
// as follows:
|
||||
//
|
||||
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
|
||||
//
|
||||
// Notice that the function name is automatically resolved, along with the
|
||||
// package and a trace id is emitted that can be linked with parent ids.
|
||||
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
|
||||
if ctx == nil {
|
||||
ctx = Background()
|
||||
}
|
||||
|
||||
pc, file, line, _ := runtime.Caller(1)
|
||||
f := runtime.FuncForPC(pc)
|
||||
ctx = &traced{
|
||||
Context: ctx,
|
||||
id: uuid.Generate().String(),
|
||||
start: time.Now(),
|
||||
parent: GetStringValue(ctx, "trace.id"),
|
||||
fnname: f.Name(),
|
||||
file: file,
|
||||
line: line,
|
||||
}
|
||||
|
||||
return ctx, func(format string, a ...interface{}) {
|
||||
GetLogger(ctx,
|
||||
"trace.duration",
|
||||
"trace.id",
|
||||
"trace.parent.id",
|
||||
"trace.func",
|
||||
"trace.file",
|
||||
"trace.line").
|
||||
Debugf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// traced represents a context that is traced for function call timing. It
|
||||
// also provides fast lookup for the various attributes that are available on
|
||||
// the trace.
|
||||
type traced struct {
|
||||
Context
|
||||
id string
|
||||
parent string
|
||||
start time.Time
|
||||
fnname string
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
func (ts *traced) Value(key interface{}) interface{} {
|
||||
switch key {
|
||||
case "trace.start":
|
||||
return ts.start
|
||||
case "trace.duration":
|
||||
return time.Since(ts.start)
|
||||
case "trace.id":
|
||||
return ts.id
|
||||
case "trace.parent.id":
|
||||
if ts.parent == "" {
|
||||
return nil // must return nil to signal no parent.
|
||||
}
|
||||
|
||||
return ts.parent
|
||||
case "trace.func":
|
||||
return ts.fnname
|
||||
case "trace.file":
|
||||
return ts.file
|
||||
case "trace.line":
|
||||
return ts.line
|
||||
}
|
||||
|
||||
return ts.Context.Value(key)
|
||||
}
|
85
Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go
generated
vendored
Normal file
85
Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestWithTrace ensures that tracing has the expected values in the context.
|
||||
func TestWithTrace(t *testing.T) {
|
||||
pc, file, _, _ := runtime.Caller(0) // get current caller.
|
||||
f := runtime.FuncForPC(pc)
|
||||
|
||||
base := []valueTestCase{
|
||||
{
|
||||
key: "trace.id",
|
||||
notnilorempty: true,
|
||||
},
|
||||
|
||||
{
|
||||
key: "trace.file",
|
||||
expected: file,
|
||||
notnilorempty: true,
|
||||
},
|
||||
{
|
||||
key: "trace.line",
|
||||
notnilorempty: true,
|
||||
},
|
||||
{
|
||||
key: "trace.start",
|
||||
notnilorempty: true,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, done := WithTrace(Background())
|
||||
defer done("this will be emitted at end of test")
|
||||
|
||||
checkContextForValues(t, ctx, append(base, valueTestCase{
|
||||
key: "trace.func",
|
||||
expected: f.Name(),
|
||||
}))
|
||||
|
||||
traced := func() {
|
||||
parentID := ctx.Value("trace.id") // ensure the parent trace id is correct.
|
||||
|
||||
pc, _, _, _ := runtime.Caller(0) // get current caller.
|
||||
f := runtime.FuncForPC(pc)
|
||||
ctx, done := WithTrace(ctx)
|
||||
defer done("this should be subordinate to the other trace")
|
||||
time.Sleep(time.Second)
|
||||
checkContextForValues(t, ctx, append(base, valueTestCase{
|
||||
key: "trace.func",
|
||||
expected: f.Name(),
|
||||
}, valueTestCase{
|
||||
key: "trace.parent.id",
|
||||
expected: parentID,
|
||||
}))
|
||||
}
|
||||
traced()
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
type valueTestCase struct {
|
||||
key string
|
||||
expected interface{}
|
||||
notnilorempty bool // just check not empty/not nil
|
||||
}
|
||||
|
||||
func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) {
|
||||
|
||||
for _, testcase := range values {
|
||||
v := ctx.Value(testcase.key)
|
||||
if testcase.notnilorempty {
|
||||
if v == nil || v == "" {
|
||||
t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if v != testcase.expected {
|
||||
t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected)
|
||||
}
|
||||
}
|
||||
}
|
32
Godeps/_workspace/src/github.com/docker/distribution/context/util.go
generated
vendored
Normal file
32
Godeps/_workspace/src/github.com/docker/distribution/context/util.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Since looks up key, which should be a time.Time, and returns the duration
|
||||
// since that time. If the key is not found, the value returned will be zero.
|
||||
// This is helpful when inferring metrics related to context execution times.
|
||||
func Since(ctx Context, key interface{}) time.Duration {
|
||||
startedAtI := ctx.Value(key)
|
||||
if startedAtI != nil {
|
||||
if startedAt, ok := startedAtI.(time.Time); ok {
|
||||
return time.Since(startedAt)
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetStringValue returns a string value from the context. The empty string
|
||||
// will be returned if not found.
|
||||
func GetStringValue(ctx Context, key string) (value string) {
|
||||
stringi := ctx.Value(key)
|
||||
if stringi != nil {
|
||||
if valuev, ok := stringi.(string); ok {
|
||||
value = valuev
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
142
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go
generated
vendored
Normal file
142
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
// Package auth defines a standard interface for request access controllers.
|
||||
//
|
||||
// An access controller has a simple interface with a single `Authorized`
|
||||
// method which checks that a given request is authorized to perform one or
|
||||
// more actions on one or more resources. This method should return a non-nil
|
||||
// error if the request is not authorized.
|
||||
//
|
||||
// An implementation registers its access controller by name with a constructor
|
||||
// which accepts an options map for configuring the access controller.
|
||||
//
|
||||
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
|
||||
// accessController, _ := auth.GetAccessController("silly", options)
|
||||
//
|
||||
// This `accessController` can then be used in a request handler like so:
|
||||
//
|
||||
// func updateOrder(w http.ResponseWriter, r *http.Request) {
|
||||
// orderNumber := r.FormValue("orderNumber")
|
||||
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
|
||||
// access := auth.Access{Resource: resource, Action: "update"}
|
||||
//
|
||||
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
|
||||
// if challenge, ok := err.(auth.Challenge) {
|
||||
// // Let the challenge write the response.
|
||||
// challenge.ServeHTTP(w, r)
|
||||
// } else {
|
||||
// // Some other error.
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// UserInfo carries information about
|
||||
// an autenticated/authorized client.
|
||||
type UserInfo struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// Resource describes a resource by type and name.
|
||||
type Resource struct {
|
||||
Type string
|
||||
Name string
|
||||
}
|
||||
|
||||
// Access describes a specific action that is
|
||||
// requested or allowed for a given resource.
|
||||
type Access struct {
|
||||
Resource
|
||||
Action string
|
||||
}
|
||||
|
||||
// Challenge is a special error type which is used for HTTP 401 Unauthorized
|
||||
// responses and is able to write the response with WWW-Authenticate challenge
|
||||
// header values based on the error.
|
||||
type Challenge interface {
|
||||
error
|
||||
// ServeHTTP prepares the request to conduct the appropriate challenge
|
||||
// response. For most implementations, simply calling ServeHTTP should be
|
||||
// sufficient. Because no body is written, users may write a custom body after
|
||||
// calling ServeHTTP, but any headers must be written before the call and may
|
||||
// be overwritten.
|
||||
ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// AccessController controls access to registry resources based on a request
|
||||
// and required access levels for a request. Implementations can support both
|
||||
// complete denial and http authorization challenges.
|
||||
type AccessController interface {
|
||||
// Authorized returns a non-nil error if the context is granted access and
|
||||
// returns a new authorized context. If one or more Access structs are
|
||||
// provided, the requested access will be compared with what is available
|
||||
// to the context. The given context will contain a "http.request" key with
|
||||
// a `*http.Request` value. If the error is non-nil, access should always
|
||||
// be denied. The error may be of type Challenge, in which case the caller
|
||||
// may have the Challenge handle the request or choose what action to take
|
||||
// based on the Challenge header or response status. The returned context
|
||||
// object should have a "auth.user" value set to a UserInfo struct.
|
||||
Authorized(ctx context.Context, access ...Access) (context.Context, error)
|
||||
}
|
||||
|
||||
// WithUser returns a context with the authorized user info.
|
||||
func WithUser(ctx context.Context, user UserInfo) context.Context {
|
||||
return userInfoContext{
|
||||
Context: ctx,
|
||||
user: user,
|
||||
}
|
||||
}
|
||||
|
||||
type userInfoContext struct {
|
||||
context.Context
|
||||
user UserInfo
|
||||
}
|
||||
|
||||
func (uic userInfoContext) Value(key interface{}) interface{} {
|
||||
switch key {
|
||||
case "auth.user":
|
||||
return uic.user
|
||||
case "auth.user.name":
|
||||
return uic.user.Name
|
||||
}
|
||||
|
||||
return uic.Context.Value(key)
|
||||
}
|
||||
|
||||
// InitFunc is the type of an AccessController factory function and is used
|
||||
// to register the constructor for different AccesController backends.
|
||||
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
||||
|
||||
var accessControllers map[string]InitFunc
|
||||
|
||||
func init() {
|
||||
accessControllers = make(map[string]InitFunc)
|
||||
}
|
||||
|
||||
// Register is used to register an InitFunc for
|
||||
// an AccessController backend with the given name.
|
||||
func Register(name string, initFunc InitFunc) error {
|
||||
if _, exists := accessControllers[name]; exists {
|
||||
return fmt.Errorf("name already registered: %s", name)
|
||||
}
|
||||
|
||||
accessControllers[name] = initFunc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAccessController constructs an AccessController
|
||||
// with the given options using the named backend.
|
||||
func GetAccessController(name string, options map[string]interface{}) (AccessController, error) {
|
||||
if initFunc, exists := accessControllers[name]; exists {
|
||||
return initFunc(options)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no access controller registered with name: %s", name)
|
||||
}
|
96
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go
generated
vendored
Normal file
96
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// Package silly provides a simple authentication scheme that checks for the
|
||||
// existence of an Authorization header and issues access if is present and
|
||||
// non-empty.
|
||||
//
|
||||
// This package is present as an example implementation of a minimal
|
||||
// auth.AccessController and for testing. This is not suitable for any kind of
|
||||
// production security.
|
||||
package silly
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// accessController provides a simple implementation of auth.AccessController
|
||||
// that simply checks for a non-empty Authorization header. It is useful for
|
||||
// demonstration and testing.
|
||||
type accessController struct {
|
||||
realm string
|
||||
service string
|
||||
}
|
||||
|
||||
var _ auth.AccessController = &accessController{}
|
||||
|
||||
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
||||
realm, present := options["realm"]
|
||||
if _, ok := realm.(string); !present || !ok {
|
||||
return nil, fmt.Errorf(`"realm" must be set for silly access controller`)
|
||||
}
|
||||
|
||||
service, present := options["service"]
|
||||
if _, ok := service.(string); !present || !ok {
|
||||
return nil, fmt.Errorf(`"service" must be set for silly access controller`)
|
||||
}
|
||||
|
||||
return &accessController{realm: realm.(string), service: service.(string)}, nil
|
||||
}
|
||||
|
||||
// Authorized simply checks for the existence of the authorization header,
|
||||
// responding with a bearer challenge if it doesn't exist.
|
||||
func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) {
|
||||
req, err := ctxu.GetRequest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if req.Header.Get("Authorization") == "" {
|
||||
challenge := challenge{
|
||||
realm: ac.realm,
|
||||
service: ac.service,
|
||||
}
|
||||
|
||||
if len(accessRecords) > 0 {
|
||||
var scopes []string
|
||||
for _, access := range accessRecords {
|
||||
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action))
|
||||
}
|
||||
challenge.scope = strings.Join(scopes, " ")
|
||||
}
|
||||
|
||||
return nil, &challenge
|
||||
}
|
||||
|
||||
return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil
|
||||
}
|
||||
|
||||
type challenge struct {
|
||||
realm string
|
||||
service string
|
||||
scope string
|
||||
}
|
||||
|
||||
func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service)
|
||||
|
||||
if ch.scope != "" {
|
||||
header = fmt.Sprintf("%s,scope=%q", header, ch.scope)
|
||||
}
|
||||
|
||||
w.Header().Set("WWW-Authenticate", header)
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
func (ch *challenge) Error() string {
|
||||
return fmt.Sprintf("silly authentication challenge: %#v", ch)
|
||||
}
|
||||
|
||||
// init registers the silly auth backend.
|
||||
func init() {
|
||||
auth.Register("silly", auth.InitFunc(newAccessController))
|
||||
}
|
70
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
package silly
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestSillyAccessController(t *testing.T) {
|
||||
ac := &accessController{
|
||||
realm: "test-realm",
|
||||
service: "test-service",
|
||||
}
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.WithValue(nil, "http.request", r)
|
||||
authCtx, err := ac.Authorized(ctx)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case auth.Challenge:
|
||||
err.ServeHTTP(w, r)
|
||||
return
|
||||
default:
|
||||
t.Fatalf("unexpected error authorizing request: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo)
|
||||
if !ok {
|
||||
t.Fatal("silly accessController did not set auth.user context")
|
||||
}
|
||||
|
||||
if userInfo.Name != "silly" {
|
||||
t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
|
||||
resp, err := http.Get(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during GET: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Request should not be authorized
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", server.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating new request: %v", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "seriously, anything")
|
||||
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during GET: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Request should not be authorized
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent)
|
||||
}
|
||||
}
|
274
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go
generated
vendored
Normal file
274
Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go
generated
vendored
Normal file
|
@ -0,0 +1,274 @@
|
|||
package token
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
ctxu "github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/docker/libtrust"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// accessSet maps a typed, named resource to
|
||||
// a set of actions requested or authorized.
|
||||
type accessSet map[auth.Resource]actionSet
|
||||
|
||||
// newAccessSet constructs an accessSet from
|
||||
// a variable number of auth.Access items.
|
||||
func newAccessSet(accessItems ...auth.Access) accessSet {
|
||||
accessSet := make(accessSet, len(accessItems))
|
||||
|
||||
for _, access := range accessItems {
|
||||
resource := auth.Resource{
|
||||
Type: access.Type,
|
||||
Name: access.Name,
|
||||
}
|
||||
|
||||
set, exists := accessSet[resource]
|
||||
if !exists {
|
||||
set = newActionSet()
|
||||
accessSet[resource] = set
|
||||
}
|
||||
|
||||
set.add(access.Action)
|
||||
}
|
||||
|
||||
return accessSet
|
||||
}
|
||||
|
||||
// contains returns whether or not the given access is in this accessSet.
|
||||
func (s accessSet) contains(access auth.Access) bool {
|
||||
actionSet, ok := s[access.Resource]
|
||||
if ok {
|
||||
return actionSet.contains(access.Action)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// scopeParam returns a collection of scopes which can
|
||||
// be used for a WWW-Authenticate challenge parameter.
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
func (s accessSet) scopeParam() string {
|
||||
scopes := make([]string, 0, len(s))
|
||||
|
||||
for resource, actionSet := range s {
|
||||
actions := strings.Join(actionSet.keys(), ",")
|
||||
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions))
|
||||
}
|
||||
|
||||
return strings.Join(scopes, " ")
|
||||
}
|
||||
|
||||
// Errors used and exported by this package.
|
||||
var (
|
||||
ErrInsufficientScope = errors.New("insufficient scope")
|
||||
ErrTokenRequired = errors.New("authorization token required")
|
||||
)
|
||||
|
||||
// authChallenge implements the auth.Challenge interface.
|
||||
type authChallenge struct {
|
||||
err error
|
||||
realm string
|
||||
service string
|
||||
accessSet accessSet
|
||||
}
|
||||
|
||||
// Error returns the internal error string for this authChallenge.
|
||||
func (ac *authChallenge) Error() string {
|
||||
return ac.err.Error()
|
||||
}
|
||||
|
||||
// Status returns the HTTP Response Status Code for this authChallenge.
|
||||
func (ac *authChallenge) Status() int {
|
||||
return http.StatusUnauthorized
|
||||
}
|
||||
|
||||
// challengeParams constructs the value to be used in
|
||||
// the WWW-Authenticate response challenge header.
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
func (ac *authChallenge) challengeParams() string {
|
||||
str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service)
|
||||
|
||||
if scope := ac.accessSet.scopeParam(); scope != "" {
|
||||
str = fmt.Sprintf("%s,scope=%q", str, scope)
|
||||
}
|
||||
|
||||
if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken {
|
||||
str = fmt.Sprintf("%s,error=%q", str, "invalid_token")
|
||||
} else if ac.err == ErrInsufficientScope {
|
||||
str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope")
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// SetHeader sets the WWW-Authenticate value for the given header.
|
||||
func (ac *authChallenge) SetHeader(header http.Header) {
|
||||
header.Add("WWW-Authenticate", ac.challengeParams())
|
||||
}
|
||||
|
||||
// ServeHttp handles writing the challenge response
|
||||
// by setting the challenge header and status code.
|
||||
func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ac.SetHeader(w.Header())
|
||||
w.WriteHeader(ac.Status())
|
||||
}
|
||||
|
||||
// accessController implements the auth.AccessController interface.
|
||||
type accessController struct {
|
||||
realm string
|
||||
issuer string
|
||||
service string
|
||||
rootCerts *x509.CertPool
|
||||
trustedKeys map[string]libtrust.PublicKey
|
||||
}
|
||||
|
||||
// tokenAccessOptions is a convenience type for handling
|
||||
// options to the contstructor of an accessController.
|
||||
type tokenAccessOptions struct {
|
||||
realm string
|
||||
issuer string
|
||||
service string
|
||||
rootCertBundle string
|
||||
}
|
||||
|
||||
// checkOptions gathers the necessary options
|
||||
// for an accessController from the given map.
|
||||
func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) {
|
||||
var opts tokenAccessOptions
|
||||
|
||||
keys := []string{"realm", "issuer", "service", "rootcertbundle"}
|
||||
vals := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
val, ok := options[key].(string)
|
||||
if !ok {
|
||||
return opts, fmt.Errorf("token auth requires a valid option string: %q", key)
|
||||
}
|
||||
vals = append(vals, val)
|
||||
}
|
||||
|
||||
opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3]
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// newAccessController creates an accessController using the given options.
|
||||
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
||||
config, err := checkOptions(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fp, err := os.Open(config.rootCertBundle)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
|
||||
}
|
||||
defer fp.Close()
|
||||
|
||||
rawCertBundle, err := ioutil.ReadAll(fp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err)
|
||||
}
|
||||
|
||||
var rootCerts []*x509.Certificate
|
||||
pemBlock, rawCertBundle := pem.Decode(rawCertBundle)
|
||||
for pemBlock != nil {
|
||||
cert, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err)
|
||||
}
|
||||
|
||||
rootCerts = append(rootCerts, cert)
|
||||
|
||||
pemBlock, rawCertBundle = pem.Decode(rawCertBundle)
|
||||
}
|
||||
|
||||
if len(rootCerts) == 0 {
|
||||
return nil, errors.New("token auth requires at least one token signing root certificate")
|
||||
}
|
||||
|
||||
rootPool := x509.NewCertPool()
|
||||
trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts))
|
||||
for _, rootCert := range rootCerts {
|
||||
rootPool.AddCert(rootCert)
|
||||
pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err)
|
||||
}
|
||||
trustedKeys[pubKey.KeyID()] = pubKey
|
||||
}
|
||||
|
||||
return &accessController{
|
||||
realm: config.realm,
|
||||
issuer: config.issuer,
|
||||
service: config.service,
|
||||
rootCerts: rootPool,
|
||||
trustedKeys: trustedKeys,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Authorized handles checking whether the given request is authorized
|
||||
// for actions on resources described by the given access items.
|
||||
func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) {
|
||||
challenge := &authChallenge{
|
||||
realm: ac.realm,
|
||||
service: ac.service,
|
||||
accessSet: newAccessSet(accessItems...),
|
||||
}
|
||||
|
||||
req, err := ctxu.GetRequest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parts := strings.Split(req.Header.Get("Authorization"), " ")
|
||||
|
||||
if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" {
|
||||
challenge.err = ErrTokenRequired
|
||||
return nil, challenge
|
||||
}
|
||||
|
||||
rawToken := parts[1]
|
||||
|
||||
token, err := NewToken(rawToken)
|
||||
if err != nil {
|
||||
challenge.err = err
|
||||
return nil, challenge
|
||||
}
|
||||
|
||||
verifyOpts := VerifyOptions{
|
||||
TrustedIssuers: []string{ac.issuer},
|
||||
AcceptedAudiences: []string{ac.service},
|
||||
Roots: ac.rootCerts,
|
||||
TrustedKeys: ac.trustedKeys,
|
||||
}
|
||||
|
||||
if err = token.Verify(verifyOpts); err != nil {
|
||||
challenge.err = err
|
||||
return nil, challenge
|
||||
}
|
||||
|
||||
accessSet := token.accessSet()
|
||||
for _, access := range accessItems {
|
||||
if !accessSet.contains(access) {
|
||||
challenge.err = ErrInsufficientScope
|
||||
return nil, challenge
|
||||
}
|
||||
}
|
||||
|
||||
return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil
|
||||
}
|
||||
|
||||
// init handles registering the token auth backend.
|
||||
func init() {
|
||||
auth.Register("token", auth.InitFunc(newAccessController))
|
||||
}
|
|
@ -7,13 +7,13 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libtrust"
|
||||
|
||||
"github.com/docker/vetinari/auth"
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -22,10 +22,17 @@ const (
|
|||
TokenSeparator = "."
|
||||
)
|
||||
|
||||
// Errors used by token parsing and verification.
|
||||
var (
|
||||
ErrMalformedToken = errors.New("malformed token")
|
||||
ErrInvalidToken = errors.New("invalid token")
|
||||
)
|
||||
|
||||
// ResourceActions stores allowed actions on a named and typed resource.
|
||||
type ResourceActions struct {
|
||||
auth.Resource
|
||||
Actions []auth.SimpleScope `json:"actions"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Actions []string `json:"actions"`
|
||||
}
|
||||
|
||||
// ClaimSet describes the main section of a JSON Web Token.
|
||||
|
@ -78,14 +85,14 @@ func NewToken(rawToken string) (*Token, error) {
|
|||
}
|
||||
|
||||
var (
|
||||
rawHeader, rawClaims = parts[0], parts[1]
|
||||
headerJSON, claimsJSON []byte
|
||||
err error
|
||||
)
|
||||
rawHeader, rawClaims := parts[0], parts[1]
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.Printf("error while unmarshalling raw token: %s", err)
|
||||
log.Errorf("error while unmarshalling raw token: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -125,39 +132,39 @@ func NewToken(rawToken string) (*Token, error) {
|
|||
func (t *Token) Verify(verifyOpts VerifyOptions) error {
|
||||
// Verify that the Issuer claim is a trusted authority.
|
||||
if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) {
|
||||
log.Printf("token from untrusted issuer: %q", t.Claims.Issuer)
|
||||
log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer)
|
||||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
// Verify that the Audience claim is allowed.
|
||||
if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) {
|
||||
log.Printf("token intended for another audience: %q", t.Claims.Audience)
|
||||
log.Errorf("token intended for another audience: %q", t.Claims.Audience)
|
||||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
// Verify that the token is currently usable and not expired.
|
||||
currentUnixTime := time.Now().Unix()
|
||||
if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) {
|
||||
log.Printf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
|
||||
log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime)
|
||||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
// Verify the token signature.
|
||||
if len(t.Signature) == 0 {
|
||||
log.Println("token has no signature")
|
||||
log.Error("token has no signature")
|
||||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
// Verify that the signing key is trusted.
|
||||
signingKey, err := t.verifySigningKey(verifyOpts)
|
||||
signingKey, err := t.VerifySigningKey(verifyOpts)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Error(err)
|
||||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
// Finally, verify the signature of the token using the key which signed it.
|
||||
if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil {
|
||||
log.Printf("unable to verify token signature: %s", err)
|
||||
log.Errorf("unable to verify token signature: %s", err)
|
||||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
|
@ -175,7 +182,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
|
|||
// the trustedKeys field of the given verify options.
|
||||
// Each of these methods are tried in that order of preference until the
|
||||
// signing key is found or an error is returned.
|
||||
func (t *Token) verifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {
|
||||
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {
|
||||
// First attempt to get an x509 certificate chain from the header.
|
||||
var (
|
||||
x5c = t.Header.X5c
|
||||
|
@ -304,22 +311,31 @@ func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pub
|
|||
|
||||
// accessSet returns a set of actions available for the resource
|
||||
// actions listed in the `access` section of this token.
|
||||
func (t *Token) scopes(resource auth.Resource) []auth.Scope {
|
||||
scopes := make([]auth.Scope, 0, 1)
|
||||
func (t *Token) accessSet() accessSet {
|
||||
if t.Claims == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.Claims != nil {
|
||||
for _, resourceActions := range t.Claims.Access {
|
||||
if resourceActions.Type != resource.Type || resourceActions.Name != resource.Name {
|
||||
continue
|
||||
}
|
||||
for _, act := range resourceActions.Actions {
|
||||
scopes = append(scopes, &act)
|
||||
}
|
||||
accessSet := make(accessSet, len(t.Claims.Access))
|
||||
|
||||
for _, resourceActions := range t.Claims.Access {
|
||||
resource := auth.Resource{
|
||||
Type: resourceActions.Type,
|
||||
Name: resourceActions.Name,
|
||||
}
|
||||
|
||||
set, exists := accessSet[resource]
|
||||
if !exists {
|
||||
set = newActionSet()
|
||||
accessSet[resource] = set
|
||||
}
|
||||
|
||||
for _, action := range resourceActions.Actions {
|
||||
set.add(action)
|
||||
}
|
||||
}
|
||||
|
||||
return scopes
|
||||
return accessSet
|
||||
}
|
||||
|
||||
func (t *Token) compactRaw() string {
|
|
@ -15,8 +15,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/auth"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/docker/vetinari/auth"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) {
|
||||
|
@ -149,11 +150,6 @@ func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey l
|
|||
return NewToken(tokenString)
|
||||
}
|
||||
|
||||
// Some people put this in the non-test files... I prefer to have it as a test.
|
||||
func TestAuthorizerInterface(t *testing.T) {
|
||||
var _ auth.Authorizer = &tokenAuthorizer{}
|
||||
}
|
||||
|
||||
// This test makes 4 tokens with a varying number of intermediate
|
||||
// certificates ranging from no intermediate chain to a length of 3
|
||||
// intermediates.
|
||||
|
@ -164,11 +160,9 @@ func TestTokenVerify(t *testing.T) {
|
|||
audience = "test-audience"
|
||||
access = []*ResourceActions{
|
||||
{
|
||||
Resource: auth.Resource{
|
||||
Type: "repository",
|
||||
Name: "foo/bar",
|
||||
},
|
||||
Actions: []auth.SimpleScope{auth.SimpleScope("pull"), auth.SimpleScope("push")},
|
||||
Type: "repository",
|
||||
Name: "foo/bar",
|
||||
Actions: []string{"pull", "push"},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -263,51 +257,54 @@ func TestAccessController(t *testing.T) {
|
|||
issuer := "test-issuer.example.com"
|
||||
service := "test-service.example.com"
|
||||
|
||||
jsonConf := fmt.Sprintf(
|
||||
"{\"realm\":\"%s\", \"issuer\":\"%s\", \"service\":\"%s\", \"root_cert_bundle\":\"%s\"}",
|
||||
realm,
|
||||
issuer,
|
||||
service,
|
||||
rootCertBundleFilename,
|
||||
)
|
||||
options := json.RawMessage{}
|
||||
options.UnmarshalJSON([]byte(jsonConf))
|
||||
options := map[string]interface{}{
|
||||
"realm": realm,
|
||||
"issuer": issuer,
|
||||
"service": service,
|
||||
"rootcertbundle": rootCertBundleFilename,
|
||||
}
|
||||
|
||||
accessController, err := NewTokenAuthorizer(options)
|
||||
accessController, err := newAccessController(options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 1. Make a mock http.Request with no token.
|
||||
req, err := http.NewRequest("GET", "http://example.com/foo/bar/init", nil)
|
||||
req, err := http.NewRequest("GET", "http://example.com/foo", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testAccess := auth.SimpleScope("baz")
|
||||
testResource := auth.Resource{Type: "repo", Name: "foo/bar"}
|
||||
testAccess := auth.Access{
|
||||
Resource: auth.Resource{
|
||||
Type: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Action: "baz",
|
||||
}
|
||||
|
||||
//ctx := context.WithValue(nil, "http.request", req)
|
||||
userInfo, err := accessController.Authorize(req, testAccess)
|
||||
challenge, ok := err.(*authChallenge)
|
||||
ctx := context.WithValue(nil, "http.request", req)
|
||||
authCtx, err := accessController.Authorized(ctx, testAccess)
|
||||
challenge, ok := err.(auth.Challenge)
|
||||
if !ok {
|
||||
t.Fatalf("accessController did not return a challenge")
|
||||
t.Fatal("accessController did not return a challenge")
|
||||
}
|
||||
|
||||
if challenge.Error() != ErrTokenRequired.Error() {
|
||||
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired)
|
||||
}
|
||||
|
||||
if userInfo != nil {
|
||||
t.Fatalf("expected nil User but got %s", userInfo.Name)
|
||||
if authCtx != nil {
|
||||
t.Fatalf("expected nil auth context but got %s", authCtx)
|
||||
}
|
||||
|
||||
// 2. Supply an invalid token.
|
||||
token, err := makeTestToken(
|
||||
issuer, service,
|
||||
[]*ResourceActions{{
|
||||
Resource: testResource,
|
||||
Actions: []auth.SimpleScope{testAccess},
|
||||
Type: testAccess.Type,
|
||||
Name: testAccess.Name,
|
||||
Actions: []string{testAccess.Action},
|
||||
}},
|
||||
rootKeys[1], 1, // Everything is valid except the key which signed it.
|
||||
)
|
||||
|
@ -317,8 +314,8 @@ func TestAccessController(t *testing.T) {
|
|||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
|
||||
|
||||
userInfo, err = accessController.Authorize(req, testAccess)
|
||||
challenge, ok = err.(*authChallenge)
|
||||
authCtx, err = accessController.Authorized(ctx, testAccess)
|
||||
challenge, ok = err.(auth.Challenge)
|
||||
if !ok {
|
||||
t.Fatal("accessController did not return a challenge")
|
||||
}
|
||||
|
@ -327,8 +324,8 @@ func TestAccessController(t *testing.T) {
|
|||
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired)
|
||||
}
|
||||
|
||||
if userInfo != nil {
|
||||
t.Fatalf("expected nil User but got %s", userInfo.Name)
|
||||
if authCtx != nil {
|
||||
t.Fatalf("expected nil auth context but got %s", authCtx)
|
||||
}
|
||||
|
||||
// 3. Supply a token with insufficient access.
|
||||
|
@ -343,8 +340,8 @@ func TestAccessController(t *testing.T) {
|
|||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
|
||||
|
||||
userInfo, err = accessController.Authorize(req, testAccess)
|
||||
challenge, ok = err.(*authChallenge)
|
||||
authCtx, err = accessController.Authorized(ctx, testAccess)
|
||||
challenge, ok = err.(auth.Challenge)
|
||||
if !ok {
|
||||
t.Fatal("accessController did not return a challenge")
|
||||
}
|
||||
|
@ -353,16 +350,17 @@ func TestAccessController(t *testing.T) {
|
|||
t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope)
|
||||
}
|
||||
|
||||
if userInfo != nil {
|
||||
t.Fatalf("expected nil User but got %s", userInfo.Name)
|
||||
if authCtx != nil {
|
||||
t.Fatalf("expected nil auth context but got %s", authCtx)
|
||||
}
|
||||
|
||||
// 4. Supply the token we need, or deserve, or whatever.
|
||||
token, err = makeTestToken(
|
||||
issuer, service,
|
||||
[]*ResourceActions{{
|
||||
Resource: testResource,
|
||||
Actions: []auth.SimpleScope{testAccess},
|
||||
Type: testAccess.Type,
|
||||
Name: testAccess.Name,
|
||||
Actions: []string{testAccess.Action},
|
||||
}},
|
||||
rootKeys[0], 1,
|
||||
)
|
||||
|
@ -372,16 +370,13 @@ func TestAccessController(t *testing.T) {
|
|||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw()))
|
||||
|
||||
// 5. We've tested the various failure cases, we now have to treat accessController specifically as
|
||||
// a tokenAuthorizer to test the success case via the .authorize method. This lets us inject the
|
||||
// correct parsed resource as mux will not parse the resource correctly from the test req object.
|
||||
auther, _ := accessController.(*tokenAuthorizer)
|
||||
userInfo, err = auther.authorize(token, testResource, testAccess)
|
||||
authCtx, err = accessController.Authorized(ctx, testAccess)
|
||||
if err != nil {
|
||||
t.Fatalf("accessController returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if userInfo == nil {
|
||||
userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo)
|
||||
if !ok {
|
||||
t.Fatal("token accessController did not set auth.user context")
|
||||
}
|
||||
|
112
Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go
generated
vendored
Normal file
112
Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
|
||||
// can be generated.
|
||||
//
|
||||
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Bits is the number of bits in a UUID
|
||||
Bits = 128
|
||||
|
||||
// Size is the number of bytes in a UUID
|
||||
Size = Bits / 8
|
||||
|
||||
format = "%08x-%04x-%04x-%04x-%012x"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
|
||||
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
|
||||
|
||||
// Loggerf can be used to override the default logging destination. Such
|
||||
// log messages in this library should be logged at warning or higher.
|
||||
Loggerf = log.Printf
|
||||
)
|
||||
|
||||
// UUID represents a UUID value. UUIDs can be compared and set to other values
|
||||
// and accessed by byte.
|
||||
type UUID [Size]byte
|
||||
|
||||
// Generate creates a new, version 4 uuid.
|
||||
func Generate() (u UUID) {
|
||||
const (
|
||||
// ensures we backoff for less than 450ms total. Use the following to
|
||||
// select new value, in units of 10ms:
|
||||
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
|
||||
maxretries = 9
|
||||
backoff = time.Millisecond * 10
|
||||
)
|
||||
|
||||
var (
|
||||
totalBackoff time.Duration
|
||||
retries int
|
||||
)
|
||||
|
||||
for {
|
||||
// This should never block but the read may fail. Because of this,
|
||||
// we just try to read the random number generator until we get
|
||||
// something. This is a very rare condition but may happen.
|
||||
b := time.Duration(retries) * backoff
|
||||
time.Sleep(b)
|
||||
totalBackoff += b
|
||||
|
||||
_, err := io.ReadFull(rand.Reader, u[:])
|
||||
if err != nil {
|
||||
if err == syscall.EPERM {
|
||||
// EPERM represents an entropy pool exhaustion, a condition under
|
||||
// which we backoff and retry.
|
||||
if retries < maxretries {
|
||||
retries++
|
||||
Loggerf("error generating version 4 uuid, retrying: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Any other errors represent a system problem. What did someone
|
||||
// do to /dev/urandom?
|
||||
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff, err))
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
|
||||
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// Parse attempts to extract a uuid from the string or returns an error.
|
||||
func Parse(s string) (u UUID, err error) {
|
||||
if len(s) != 36 {
|
||||
return UUID{}, ErrUUIDInvalid
|
||||
}
|
||||
|
||||
// create stack addresses for each section of the uuid.
|
||||
p := make([][]byte, 5)
|
||||
|
||||
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
|
||||
return u, err
|
||||
}
|
||||
|
||||
copy(u[0:4], p[0])
|
||||
copy(u[4:6], p[1])
|
||||
copy(u[6:8], p[2])
|
||||
copy(u[8:10], p[3])
|
||||
copy(u[10:16], p[4])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (u UUID) String() string {
|
||||
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
|
||||
}
|
48
Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go
generated
vendored
Normal file
48
Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package uuid
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
const iterations = 1000
|
||||
|
||||
func TestUUID4Generation(t *testing.T) {
|
||||
for i := 0; i < iterations; i++ {
|
||||
u := Generate()
|
||||
|
||||
if u[6]&0xf0 != 0x40 {
|
||||
t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0)
|
||||
}
|
||||
|
||||
if u[8]&0xc0 != 0x80 {
|
||||
t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndEquality(t *testing.T) {
|
||||
for i := 0; i < iterations; i++ {
|
||||
u := Generate()
|
||||
|
||||
parsed, err := Parse(u.String())
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing uuid %v: %v", u, err)
|
||||
}
|
||||
|
||||
if parsed != u {
|
||||
t.Fatalf("parsing round trip failed: %v != %v", parsed, u)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range []string{
|
||||
"bad",
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format
|
||||
" 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space
|
||||
"20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space
|
||||
"00000000-0000-0000-0000-x00000000000", // out of range character
|
||||
} {
|
||||
if _, err := Parse(c); err == nil {
|
||||
t.Fatalf("parsing %q should have failed", c)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
/db/
|
|
@ -1,30 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
sudo: false
|
||||
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
|
||||
script:
|
||||
- go test -race -cover ./...
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- "chat.freenode.net#flynn"
|
||||
use_notice: true
|
||||
skip_join: true
|
||||
on_success: change
|
||||
on_failure: always
|
||||
template:
|
||||
- "%{repository}/%{branch} - %{commit}: %{message} %{build_url}"
|
||||
email:
|
||||
on_success: never
|
||||
on_failure: always
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
|
@ -1,60 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/flynn/go-tuf",
|
||||
"GoVersion": "go1.4.1",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/agl/ed25519",
|
||||
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-19-g00c6357",
|
||||
"Rev": "00c635718fa0592764453e60194451889876eea0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term",
|
||||
"Comment": "v1.4.1-775-g70fbd45",
|
||||
"Rev": "70fbd45a5c88f6f39a07b04f81a07721bf5f3eed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dustin/go-humanize",
|
||||
"Rev": "145fabdb1ab757076a70a886d092a3af27f66f4c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/flynn/go-docopt",
|
||||
"Comment": "0.6.1-rc2-26-gf6dd2eb",
|
||||
"Rev": "f6dd2ebbb31e9721c860cf1faf5c944aa73e3844"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tent/canonical-json-go",
|
||||
"Rev": "96e4ba3a7613a1216cbd1badca4efe382adea337"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/poly1305",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -1,2 +0,0 @@
|
|||
Jonathan Rudenberg <jonathan@flynn.io> (github: titanous)
|
||||
Lewis Marshall <lewis@flynn.io> (github: lmars)
|
|
@ -1,511 +0,0 @@
|
|||
# go-tuf [](https://travis-ci.org/flynn/go-tuf)
|
||||
|
||||
This is a Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/),
|
||||
a framework for securing software update systems.
|
||||
|
||||
## Directory layout
|
||||
|
||||
A TUF repository has the following directory layout:
|
||||
|
||||
```
|
||||
.
|
||||
├── keys
|
||||
├── repository
|
||||
│ └── targets
|
||||
└── staged
|
||||
└── targets
|
||||
```
|
||||
|
||||
The directories contain the following files:
|
||||
|
||||
* `keys/` - signing keys (optionally encrypted) with filename pattern `ROLE.json`
|
||||
* `repository/` - signed manifests
|
||||
* `repository/targets/` - hashed target files
|
||||
* `staged/` - either signed, unsigned or partially signed manifests
|
||||
* `staged/targets/` - unhashed target files
|
||||
|
||||
## CLI
|
||||
|
||||
`go-tuf` provides a CLI for managing a local TUF repository.
|
||||
|
||||
### Install
|
||||
|
||||
```
|
||||
go get github.com/flynn/go-tuf/cmd/tuf
|
||||
```
|
||||
|
||||
### Commands
|
||||
|
||||
#### `tuf init [--consistent-snapshot=false]`
|
||||
|
||||
Initializes a new repository.
|
||||
|
||||
This is only required if the repository should not generate consistent
|
||||
snapshots (i.e. by passing `--consistent-snapshot=false`). If consistent
|
||||
snapshots should be generated, the repository will be implicitly
|
||||
initialized to do so when generating keys.
|
||||
|
||||
#### `tuf gen-key <role>`
|
||||
|
||||
Prompts the user for an encryption passphrase (unless the
|
||||
`--insecure-plaintext` flag is set), then generates a new signing key and
|
||||
writes it to the relevant key file in the `keys` directory. It also stages
|
||||
the addition of the new key to the `root` manifest.
|
||||
|
||||
#### `tuf add [<path>...]`
|
||||
|
||||
Hashes files in the `staged/targets` directory at the given path(s), then
|
||||
updates and stages the `targets` manifest. Specifying no paths hashes all
|
||||
files in the `staged/targets` directory.
|
||||
|
||||
#### `tuf remove [<path>...]`
|
||||
|
||||
Stages the removal of files with the given path(s) from the `targets` manifest
|
||||
(they get removed from the filesystem when the change is committed). Specifying
|
||||
no paths removes all files from the `targets` manifest.
|
||||
|
||||
#### `tuf snapshot [--compression=<format>]`
|
||||
|
||||
Expects a staged, fully signed `targets` manifest and stages an appropriate
|
||||
`snapshot` manifest. It optionally compresses the staged `targets` manifest.
|
||||
|
||||
#### `tuf timestamp`
|
||||
|
||||
Stages an appropriate `timestamp` manifest. If a `snapshot` manifest is staged,
|
||||
it must be fully signed.
|
||||
|
||||
#### `tuf sign ROLE`
|
||||
|
||||
Signs the given role's staged manifest with all keys present in the `keys`
|
||||
directory for that role.
|
||||
|
||||
#### `tuf commit`
|
||||
|
||||
Verifies that all staged changes contain the correct information and are signed
|
||||
to the correct threshold, then moves the staged files into the `repository`
|
||||
directory. It also removes any target files which are not in the `targets`
|
||||
manifest.
|
||||
|
||||
#### `tuf regenerate [--consistent-snapshot=false]`
|
||||
|
||||
Recreates the `targets` manifest based on the files in `repository/targets`.
|
||||
|
||||
#### `tuf clean`
|
||||
|
||||
Removes all staged manifests and targets.
|
||||
|
||||
#### `tuf root-keys`
|
||||
|
||||
Outputs a JSON serialized array of root keys to STDOUT. The resulting JSON
|
||||
should be distributed to clients for performing initial updates.
|
||||
|
||||
For a list of supported commands, run `tuf help` from the command line.
|
||||
|
||||
### Examples
|
||||
|
||||
The following are example workflows for managing a TUF repository with the CLI.
|
||||
|
||||
The `tree` commands do not need to be run, but their output serve as an
|
||||
illustration of what files should exist after performing certain commands.
|
||||
|
||||
Although only two machines are referenced (i.e. the "root" and "repo" boxes),
|
||||
the workflows can be trivially extended to many signing machines by copying
|
||||
staged changes and signing on each machine in turn before finally committing.
|
||||
|
||||
Some key IDs are truncated for illustrative purposes.
|
||||
|
||||
#### Create signed root manifest
|
||||
|
||||
Generate a root key on the root box:
|
||||
|
||||
```
|
||||
$ tuf gen-key root
|
||||
Enter root keys passphrase:
|
||||
Repeat root keys passphrase:
|
||||
Generated root key with ID 184b133f
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── root.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
```
|
||||
|
||||
Copy `staged/root.json` from the root box to the repo box and generate targets,
|
||||
snapshot and timestamp keys:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
|
||||
$ tuf gen-key targets
|
||||
Enter targets keys passphrase:
|
||||
Repeat targets keys passphrase:
|
||||
Generated targets key with ID 8cf4810c
|
||||
|
||||
$ tuf gen-key snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
Repeat snapshot keys passphrase:
|
||||
Generated snapshot key with ID 3e070e53
|
||||
|
||||
$ tuf gen-key timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
Repeat timestamp keys passphrase:
|
||||
Generated timestamp key with ID a3768063
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
```
|
||||
|
||||
Copy `staged/root.json` from the repo box back to the root box and sign it:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── root.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
|
||||
$ tuf sign root.json
|
||||
Enter root keys passphrase:
|
||||
```
|
||||
|
||||
The staged `root.json` can now be copied back to the repo box ready to be
|
||||
committed alongside other manifests.
|
||||
|
||||
#### Add a target file
|
||||
|
||||
Assuming a staged, signed `root` manifest and the file to add exists at
|
||||
`staged/targets/foo/bar/baz.txt`:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
└── targets
|
||||
└── foo
|
||||
└── bar
|
||||
└── baz.txt
|
||||
|
||||
$ tuf add foo/bar/baz.txt
|
||||
Enter targets keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
├── targets
|
||||
│ └── foo
|
||||
│ └── bar
|
||||
│ └── baz.txt
|
||||
└── targets.json
|
||||
|
||||
$ tuf snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
└── staged
|
||||
├── root.json
|
||||
├── snapshot.json
|
||||
├── targets
|
||||
│ └── foo
|
||||
│ └── bar
|
||||
│ └── baz.txt
|
||||
├── targets.json
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Remove a target file
|
||||
|
||||
Assuming the file to remove is at `repository/targets/foo/bar/baz.txt`:
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
|
||||
$ tuf remove foo/bar/baz.txt
|
||||
Enter targets keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
└── targets.json
|
||||
|
||||
$ tuf snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
├── snapshot.json
|
||||
├── targets.json
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Regenerate manifests based on targets tree
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
|
||||
$ tuf regenerate
|
||||
Enter targets keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
└── targets.json
|
||||
|
||||
$ tuf snapshot
|
||||
Enter snapshot keys passphrase:
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
├── snapshot.json
|
||||
├── targets.json
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Update timestamp.json
|
||||
|
||||
```
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
|
||||
$ tuf timestamp
|
||||
Enter timestamp keys passphrase:
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
└── timestamp.json
|
||||
|
||||
$ tuf commit
|
||||
|
||||
$ tree .
|
||||
.
|
||||
├── keys
|
||||
│ └── timestamp.json
|
||||
├── repository
|
||||
│ ├── root.json
|
||||
│ ├── snapshot.json
|
||||
│ ├── targets
|
||||
│ │ └── foo
|
||||
│ │ └── bar
|
||||
│ │ └── baz.txt
|
||||
│ ├── targets.json
|
||||
│ └── timestamp.json
|
||||
└── staged
|
||||
```
|
||||
|
||||
#### Modify key thresholds
|
||||
|
||||
TODO
|
||||
|
||||
## Client
|
||||
|
||||
For the client package, see https://godoc.org/github.com/flynn/go-tuf/client.
|
||||
|
||||
For the client CLI, see https://github.com/flynn/go-tuf/tree/master/cmd/tuf-client.
|
|
@ -1,627 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
)
|
||||
|
||||
// LocalStore is local storage for downloaded top-level metadata.
|
||||
type LocalStore interface {
|
||||
// GetMeta returns top-level metadata from local storage. The keys are
|
||||
// in the form `ROLE.json`, with ROLE being a valid top-level role.
|
||||
GetMeta() (map[string]json.RawMessage, error)
|
||||
|
||||
// SetMeta persists the given top-level metadata in local storage, the
|
||||
// name taking the same format as the keys returned by GetMeta.
|
||||
SetMeta(name string, meta json.RawMessage) error
|
||||
}
|
||||
|
||||
// RemoteStore downloads top-level metadata and target files from a remote
|
||||
// repository.
|
||||
type RemoteStore interface {
|
||||
// GetMeta downloads the given metadata from remote storage.
|
||||
//
|
||||
// `name` is the filename of the metadata (e.g. "root.json")
|
||||
//
|
||||
// `err` is ErrNotFound if the given file does not exist.
|
||||
//
|
||||
// `size` is the size of the stream, -1 indicating an unknown length.
|
||||
GetMeta(name string) (stream io.ReadCloser, size int64, err error)
|
||||
|
||||
// GetTarget downloads the given target file from remote storage.
|
||||
//
|
||||
// `path` is the path of the file relative to the root of the remote
|
||||
// targets directory (e.g. "/path/to/file.txt").
|
||||
//
|
||||
// `err` is ErrNotFound if the given file does not exist.
|
||||
//
|
||||
// `size` is the size of the stream, -1 indicating an unknown length.
|
||||
GetTarget(path string) (stream io.ReadCloser, size int64, err error)
|
||||
}
|
||||
|
||||
// Client provides methods for fetching updates from a remote repository and
|
||||
// downloading remote target files.
|
||||
type Client struct {
|
||||
local LocalStore
|
||||
remote RemoteStore
|
||||
|
||||
// The following four fields represent the versions of metatdata either
|
||||
// from local storage or from recently downloaded metadata
|
||||
rootVer int
|
||||
targetsVer int
|
||||
snapshotVer int
|
||||
timestampVer int
|
||||
|
||||
// targets is the list of available targets, either from local storage
|
||||
// or from recently downloaded targets metadata
|
||||
targets data.Files
|
||||
|
||||
// localMeta is the raw metadata from local storage and is used to
|
||||
// check whether remote metadata is present locally
|
||||
localMeta map[string]json.RawMessage
|
||||
|
||||
// db is a key DB used for verifying metadata
|
||||
db *keys.DB
|
||||
|
||||
// consistentSnapshot indicates whether the remote storage is using
|
||||
// consistent snapshots (as specified in root.json)
|
||||
consistentSnapshot bool
|
||||
}
|
||||
|
||||
func NewClient(local LocalStore, remote RemoteStore) *Client {
|
||||
return &Client{
|
||||
local: local,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes a local repository.
|
||||
//
|
||||
// The latest root.json is fetched from remote storage, verified using rootKeys
|
||||
// and threshold, and then saved in local storage. It is expected that rootKeys
|
||||
// were securely distributed with the software being updated.
|
||||
func (c *Client) Init(rootKeys []*data.Key, threshold int) error {
|
||||
if len(rootKeys) < threshold {
|
||||
return ErrInsufficientKeys
|
||||
}
|
||||
rootJSON, err := c.downloadMetaUnsafe("root.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.db = keys.NewDB()
|
||||
rootKeyIDs := make([]string, len(rootKeys))
|
||||
for i, key := range rootKeys {
|
||||
id := key.ID()
|
||||
pk := keys.PublicKey{*key, id}
|
||||
rootKeyIDs[i] = id
|
||||
if err := c.db.AddKey(&pk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
role := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs}
|
||||
if err := c.db.AddRole("root", role); err != nil {
|
||||
fmt.Println("Error adding role:", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.decodeRoot(rootJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.local.SetMeta("root.json", rootJSON)
|
||||
}
|
||||
|
||||
// Update downloads and verifies remote metadata and returns updated targets.
|
||||
//
|
||||
// It performs the update part of "The client application" workflow from
|
||||
// section 5.1 of the TUF spec:
|
||||
//
|
||||
// https://github.com/theupdateframework/tuf/blob/v0.9.9/docs/tuf-spec.txt#L714
|
||||
func (c *Client) Update() (data.Files, error) {
|
||||
return c.update(false)
|
||||
}
|
||||
|
||||
func (c *Client) update(latestRoot bool) (data.Files, error) {
|
||||
// Always start the update using local metadata
|
||||
fmt.Println("tuf client: update()")
|
||||
if err := c.getLocalMeta(); err != nil {
|
||||
fmt.Println("tuf client: error on getLocalMeta", err.Error())
|
||||
if !latestRoot {
|
||||
fmt.Println("tuf client: latestRoot is false, calling updateWithlatestRoot()")
|
||||
return c.updateWithLatestRoot(nil)
|
||||
} else if latestRoot && err == signed.ErrRoleThreshold {
|
||||
fmt.Println("tuf client: have latest root and err is signing threshold")
|
||||
// Root was updated with new keys, so our local metadata is no
|
||||
// longer validating. Read only the versions from the local metadata
|
||||
// and re-download everything.
|
||||
if err := c.getRootAndLocalVersionsUnsafe(); err != nil {
|
||||
fmt.Println("tuf client: err on getRootAndLocalVersionUnsafe")
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("tuf client: got other err: ", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get timestamp.json, extract snapshot.json file meta and save the
|
||||
// timestamp.json locally
|
||||
fmt.Println("tuf client: downloading timestamp")
|
||||
timestampJSON, err := c.downloadMetaUnsafe("timestamp.json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshotMeta, err := c.decodeTimestamp(timestampJSON)
|
||||
if err != nil {
|
||||
// ErrRoleThreshold could indicate timestamp keys have been
|
||||
// revoked, so retry with the latest root.json
|
||||
if isDecodeFailedWithErr(err, signed.ErrRoleThreshold) && !latestRoot {
|
||||
return c.updateWithLatestRoot(nil)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if err := c.local.SetMeta("timestamp.json", timestampJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return ErrLatestSnapshot if we already have the latest snapshot.json
|
||||
if c.hasMeta("snapshot.json", snapshotMeta) {
|
||||
return nil, ErrLatestSnapshot{c.snapshotVer}
|
||||
}
|
||||
|
||||
// Get snapshot.json, then extract root.json and targets.json file meta.
|
||||
//
|
||||
// The snapshot.json is only saved locally after checking root.json and
|
||||
// targets.json so that it will be re-downloaded on subsequent updates
|
||||
// if this update fails.
|
||||
fmt.Println("tuf client: downloading snapshot")
|
||||
snapshotJSON, err := c.downloadMeta("snapshot.json", snapshotMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootMeta, targetsMeta, err := c.decodeSnapshot(snapshotJSON)
|
||||
if err != nil {
|
||||
// ErrRoleThreshold could indicate snapshot keys have been
|
||||
// revoked, so retry with the latest root.json
|
||||
if isDecodeFailedWithErr(err, signed.ErrRoleThreshold) && !latestRoot {
|
||||
return c.updateWithLatestRoot(nil)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we don't have the root.json, download it, save it in local
|
||||
// storage and restart the update
|
||||
if !c.hasMeta("root.json", rootMeta) {
|
||||
return c.updateWithLatestRoot(&rootMeta)
|
||||
}
|
||||
|
||||
// If we don't have the targets.json, download it, determine updated
|
||||
// targets and save targets.json in local storage
|
||||
var updatedTargets data.Files
|
||||
if !c.hasMeta("targets.json", targetsMeta) {
|
||||
fmt.Println("tuf client: downloading targets")
|
||||
targetsJSON, err := c.downloadMeta("targets.json", targetsMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedTargets, err = c.decodeTargets(targetsJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.local.SetMeta("targets.json", targetsJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Save the snapshot.json now it has been processed successfully
|
||||
if err := c.local.SetMeta("snapshot.json", snapshotJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return updatedTargets, nil
|
||||
}
|
||||
|
||||
func (c *Client) updateWithLatestRoot(m *data.FileMeta) (data.Files, error) {
|
||||
var rootJSON json.RawMessage
|
||||
var err error
|
||||
if m == nil {
|
||||
rootJSON, err = c.downloadMetaUnsafe("root.json")
|
||||
} else {
|
||||
rootJSON, err = c.downloadMeta("root.json", *m)
|
||||
}
|
||||
fmt.Println("Root JSON")
|
||||
fmt.Println(string(rootJSON))
|
||||
fmt.Println("End root JSON")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.decodeRoot(rootJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.local.SetMeta("root.json", rootJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.update(true)
|
||||
}
|
||||
|
||||
// getLocalMeta decodes and verifies metadata from local storage.
|
||||
//
|
||||
// The verification of local files is purely for consistency, if an attacker
|
||||
// has compromised the local storage, there is no guarantee it can be trusted.
|
||||
func (c *Client) getLocalMeta() error {
|
||||
meta, err := c.local.GetMeta()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rootJSON, ok := meta["root.json"]; ok {
|
||||
// unmarshal root.json without verifying as we need the root
|
||||
// keys first
|
||||
s := &data.Signed{}
|
||||
if err := json.Unmarshal(rootJSON, s); err != nil {
|
||||
return err
|
||||
}
|
||||
root := &data.Root{}
|
||||
if err := json.Unmarshal(s.Signed, root); err != nil {
|
||||
return err
|
||||
}
|
||||
db := keys.NewDB()
|
||||
for _, k := range root.Keys {
|
||||
pk := keys.PublicKey{*k, k.ID()}
|
||||
if err := db.AddKey(&pk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for name, role := range root.Roles {
|
||||
fmt.Println("Adding Role:", name)
|
||||
if err := db.AddRole(name, role); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := signed.Verify(s, "root", 0, db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.consistentSnapshot = root.ConsistentSnapshot
|
||||
c.db = db
|
||||
} else {
|
||||
return ErrNoRootKeys
|
||||
}
|
||||
|
||||
if snapshotJSON, ok := meta["snapshot.json"]; ok {
|
||||
snapshot := &data.Snapshot{}
|
||||
if err := signed.UnmarshalTrusted(snapshotJSON, snapshot, "snapshot", c.db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.snapshotVer = snapshot.Version
|
||||
}
|
||||
|
||||
if targetsJSON, ok := meta["targets.json"]; ok {
|
||||
targets := &data.Targets{}
|
||||
if err := signed.UnmarshalTrusted(targetsJSON, targets, "targets", c.db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.targetsVer = targets.Version
|
||||
c.targets = targets.Targets
|
||||
}
|
||||
|
||||
if timestampJSON, ok := meta["timestamp.json"]; ok {
|
||||
timestamp := &data.Timestamp{}
|
||||
if err := signed.UnmarshalTrusted(timestampJSON, timestamp, "timestamp", c.db); err != nil {
|
||||
return err
|
||||
}
|
||||
c.timestampVer = timestamp.Version
|
||||
}
|
||||
|
||||
c.localMeta = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
// maxMetaSize is the maximum number of bytes that will be downloaded when
|
||||
// getting remote metadata without knowing it's length.
|
||||
const maxMetaSize = 50 * 1024
|
||||
|
||||
// downloadMetaUnsafe downloads top-level metadata from remote storage without
|
||||
// verifying it's length and hashes (used for example to download timestamp.json
|
||||
// which has unknown size). It will download at most maxMetaSize bytes.
|
||||
func (c *Client) downloadMetaUnsafe(name string) ([]byte, error) {
|
||||
r, size, err := c.remote.GetMeta(name)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
return nil, ErrMissingRemoteMetadata{name}
|
||||
}
|
||||
return nil, ErrDownloadFailed{name, err}
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// return ErrMetaTooLarge if the reported size is greater than maxMetaSize
|
||||
if size > maxMetaSize {
|
||||
return nil, ErrMetaTooLarge{name, size}
|
||||
}
|
||||
|
||||
// although the size has been checked above, use a LimitReader in case
|
||||
// the reported size is inaccurate, or size is -1 which indicates an
|
||||
// unknown length
|
||||
return ioutil.ReadAll(io.LimitReader(r, maxMetaSize))
|
||||
}
|
||||
|
||||
// getRootAndLocalVersionsUnsafe decodes the versions stored in the local
|
||||
// metadata without verifying signatures to protect against downgrade attacks
|
||||
// when the root is replaced and contains new keys. It also sets the local meta
|
||||
// cache to only contain the local root metadata.
|
||||
func (c *Client) getRootAndLocalVersionsUnsafe() error {
|
||||
type versionData struct {
|
||||
Signed struct {
|
||||
Version int
|
||||
}
|
||||
}
|
||||
|
||||
meta, err := c.local.GetMeta()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getVersion := func(name string) (int, error) {
|
||||
m, ok := meta[name]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
var data versionData
|
||||
if err := json.Unmarshal(m, &data); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return data.Signed.Version, nil
|
||||
}
|
||||
|
||||
c.timestampVer, err = getVersion("timestamp.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.snapshotVer, err = getVersion("snapshot.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.targetsVer, err = getVersion("targets.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
root, ok := meta["root.json"]
|
||||
if !ok {
|
||||
return errors.New("tuf: missing local root after downloading, this should not be possible")
|
||||
}
|
||||
c.localMeta = map[string]json.RawMessage{"root.json": root}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remoteGetFunc is the type of function the download method uses to download
|
||||
// remote files
|
||||
type remoteGetFunc func(string) (io.ReadCloser, int64, error)
|
||||
|
||||
// download downloads the given file from remote storage using the get function,
|
||||
// adding hashes to the path if consistent snapshots are in use
|
||||
func (c *Client) download(file string, get remoteGetFunc, hashes data.Hashes) (io.ReadCloser, int64, error) {
|
||||
if c.consistentSnapshot {
|
||||
// try each hashed path in turn, and either return the contents,
|
||||
// try the next one if a 404 is returned, or return an error
|
||||
for _, path := range util.HashedPaths(file, hashes) {
|
||||
r, size, err := get(path)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return r, size, nil
|
||||
}
|
||||
return nil, 0, ErrNotFound{file}
|
||||
} else {
|
||||
return get(file)
|
||||
}
|
||||
}
|
||||
|
||||
// downloadMeta downloads top-level metadata from remote storage and verifies
|
||||
// it using the given file metadata.
|
||||
func (c *Client) downloadMeta(name string, m data.FileMeta) ([]byte, error) {
|
||||
r, size, err := c.download(name, c.remote.GetMeta, m.Hashes)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
return nil, ErrMissingRemoteMetadata{name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// return ErrWrongSize if the reported size is known and incorrect
|
||||
if size >= 0 && size != m.Length {
|
||||
return nil, ErrWrongSize{name, size, m.Length}
|
||||
}
|
||||
|
||||
// wrap the data in a LimitReader so we download at most m.Length bytes
|
||||
stream := io.LimitReader(r, m.Length)
|
||||
|
||||
// read the data, simultaneously writing it to buf and generating metadata
|
||||
var buf bytes.Buffer
|
||||
meta, err := util.GenerateFileMeta(io.TeeReader(stream, &buf), m.HashAlgorithms()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := util.FileMetaEqual(meta, m); err != nil {
|
||||
return nil, ErrDownloadFailed{name, err}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// decodeRoot decodes and verifies root metadata.
|
||||
func (c *Client) decodeRoot(b json.RawMessage) error {
|
||||
root := &data.Root{}
|
||||
fmt.Println("tuf client: db:", c.db)
|
||||
if err := signed.Unmarshal(b, root, "root", c.rootVer, c.db); err != nil {
|
||||
return ErrDecodeFailed{"root.json", err}
|
||||
}
|
||||
c.rootVer = root.Version
|
||||
c.consistentSnapshot = root.ConsistentSnapshot
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeSnapshot decodes and verifies snapshot metadata, and returns the new
|
||||
// root and targets file meta.
|
||||
func (c *Client) decodeSnapshot(b json.RawMessage) (data.FileMeta, data.FileMeta, error) {
|
||||
snapshot := &data.Snapshot{}
|
||||
if err := signed.Unmarshal(b, snapshot, "snapshot", c.snapshotVer, c.db); err != nil {
|
||||
return data.FileMeta{}, data.FileMeta{}, ErrDecodeFailed{"snapshot.json", err}
|
||||
}
|
||||
c.snapshotVer = snapshot.Version
|
||||
return snapshot.Meta["root.json"], snapshot.Meta["targets.json"], nil
|
||||
}
|
||||
|
||||
// decodeTargets decodes and verifies targets metadata, sets c.targets and
|
||||
// returns updated targets.
|
||||
func (c *Client) decodeTargets(b json.RawMessage) (data.Files, error) {
|
||||
targets := &data.Targets{}
|
||||
if err := signed.Unmarshal(b, targets, "targets", c.targetsVer, c.db); err != nil {
|
||||
return nil, ErrDecodeFailed{"targets.json", err}
|
||||
}
|
||||
updatedTargets := make(data.Files)
|
||||
for path, meta := range targets.Targets {
|
||||
if local, ok := c.targets[path]; ok {
|
||||
if err := util.FileMetaEqual(local, meta); err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
updatedTargets[path] = meta
|
||||
}
|
||||
c.targetsVer = targets.Version
|
||||
c.targets = targets.Targets
|
||||
return updatedTargets, nil
|
||||
}
|
||||
|
||||
// decodeTimestamp decodes and verifies timestamp metadata, and returns the
|
||||
// new snapshot file meta.
|
||||
func (c *Client) decodeTimestamp(b json.RawMessage) (data.FileMeta, error) {
|
||||
timestamp := &data.Timestamp{}
|
||||
if err := signed.Unmarshal(b, timestamp, "timestamp", c.timestampVer, c.db); err != nil {
|
||||
return data.FileMeta{}, ErrDecodeFailed{"timestamp.json", err}
|
||||
}
|
||||
c.timestampVer = timestamp.Version
|
||||
return timestamp.Meta["snapshot.json"], nil
|
||||
}
|
||||
|
||||
// hasMeta checks whether local metadata has the given file meta
|
||||
func (c *Client) hasMeta(name string, m data.FileMeta) bool {
|
||||
b, ok := c.localMeta[name]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(b), m.HashAlgorithms()...)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
err = util.FileMetaEqual(meta, m)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type Destination interface {
|
||||
io.Writer
|
||||
Delete() error
|
||||
}
|
||||
|
||||
// Download downloads the given target file from remote storage into dest.
|
||||
//
|
||||
// dest will be deleted and an error returned in the following situations:
|
||||
//
|
||||
// * The target does not exist in the local targets.json
|
||||
// * The target does not exist in remote storage
|
||||
// * Metadata cannot be generated for the downloaded data
|
||||
// * Generated metadata does not match local metadata for the given file
|
||||
func (c *Client) Download(name string, dest Destination) (err error) {
|
||||
// delete dest if there is an error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
dest.Delete()
|
||||
}
|
||||
}()
|
||||
|
||||
// populate c.targets from local storage if not set
|
||||
if c.targets == nil {
|
||||
if err := c.getLocalMeta(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// return ErrUnknownTarget if the file is not in the local targets.json
|
||||
normalizedName := util.NormalizeTarget(name)
|
||||
localMeta, ok := c.targets[normalizedName]
|
||||
if !ok {
|
||||
return ErrUnknownTarget{name}
|
||||
}
|
||||
|
||||
// get the data from remote storage
|
||||
r, size, err := c.download(normalizedName, c.remote.GetTarget, localMeta.Hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
return c.Verify(name, r, size, dest)
|
||||
}
|
||||
|
||||
func (c *Client) Verify(name string, r io.Reader, size int64, dest Destination) error {
|
||||
normalizedName := util.NormalizeTarget(name)
|
||||
if c.targets == nil {
|
||||
return ErrUnknownTarget{name}
|
||||
}
|
||||
localMeta, ok := c.targets[normalizedName]
|
||||
if !ok {
|
||||
return ErrUnknownTarget{name}
|
||||
}
|
||||
|
||||
// return ErrWrongSize if the reported size is known and incorrect
|
||||
if size >= 0 && size != localMeta.Length {
|
||||
return ErrWrongSize{name, size, localMeta.Length}
|
||||
}
|
||||
|
||||
// wrap the data in a LimitReader so we download at most localMeta.Length bytes
|
||||
stream := io.LimitReader(r, localMeta.Length)
|
||||
|
||||
// read the data, simultaneously writing it to dest and generating metadata
|
||||
actual, err := util.GenerateFileMeta(io.TeeReader(stream, dest), localMeta.HashAlgorithms()...)
|
||||
for algo, hash := range actual.Hashes {
|
||||
fmt.Println("Actual hash", algo, hash.String())
|
||||
}
|
||||
if err != nil {
|
||||
return ErrDownloadFailed{name, err}
|
||||
}
|
||||
|
||||
// check the data has the correct length and hashes
|
||||
if err := util.FileMetaEqual(actual, localMeta); err != nil {
|
||||
if err == util.ErrWrongLength {
|
||||
return ErrWrongSize{name, actual.Length, localMeta.Length}
|
||||
}
|
||||
return ErrDownloadFailed{name, err}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Targets returns the complete list of available targets.
|
||||
func (c *Client) Targets() (data.Files, error) {
|
||||
// populate c.targets from local storage if not set
|
||||
if c.targets == nil {
|
||||
if err := c.getLocalMeta(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.targets, nil
|
||||
}
|
|
@ -1,838 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/keys"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type ClientSuite struct {
|
||||
store store.LocalStore
|
||||
repo *tuf.Repo
|
||||
local LocalStore
|
||||
remote *fakeRemoteStore
|
||||
expiredTime time.Time
|
||||
keyIDs map[string]string
|
||||
}
|
||||
|
||||
var _ = Suite(&ClientSuite{})
|
||||
|
||||
func newFakeRemoteStore() *fakeRemoteStore {
|
||||
return &fakeRemoteStore{
|
||||
meta: make(map[string]*fakeFile),
|
||||
targets: make(map[string]*fakeFile),
|
||||
}
|
||||
}
|
||||
|
||||
type fakeRemoteStore struct {
|
||||
meta map[string]*fakeFile
|
||||
targets map[string]*fakeFile
|
||||
}
|
||||
|
||||
func (f *fakeRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) {
|
||||
return f.get(name, f.meta)
|
||||
}
|
||||
|
||||
func (f *fakeRemoteStore) GetTarget(path string) (io.ReadCloser, int64, error) {
|
||||
return f.get(path, f.targets)
|
||||
}
|
||||
|
||||
func (f *fakeRemoteStore) get(name string, store map[string]*fakeFile) (io.ReadCloser, int64, error) {
|
||||
file, ok := store[name]
|
||||
if !ok {
|
||||
return nil, 0, ErrNotFound{name}
|
||||
}
|
||||
return file, file.size, nil
|
||||
}
|
||||
|
||||
func newFakeFile(b []byte) *fakeFile {
|
||||
return &fakeFile{buf: bytes.NewReader(b), size: int64(len(b))}
|
||||
}
|
||||
|
||||
type fakeFile struct {
|
||||
buf *bytes.Reader
|
||||
bytesRead int
|
||||
size int64
|
||||
}
|
||||
|
||||
func (f *fakeFile) Read(p []byte) (int, error) {
|
||||
n, err := f.buf.Read(p)
|
||||
f.bytesRead += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *fakeFile) Close() error {
|
||||
f.buf.Seek(0, os.SEEK_SET)
|
||||
return nil
|
||||
}
|
||||
|
||||
var targetFiles = map[string][]byte{
|
||||
"/foo.txt": []byte("foo"),
|
||||
"/bar.txt": []byte("bar"),
|
||||
"/baz.txt": []byte("baz"),
|
||||
}
|
||||
|
||||
func (s *ClientSuite) SetUpTest(c *C) {
|
||||
s.store = store.MemoryStore(nil, targetFiles)
|
||||
|
||||
// create a valid repo containing foo.txt
|
||||
var err error
|
||||
signer := signed.NewEd25519()
|
||||
s.repo, err = tuf.NewRepo(signer, s.store, "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
// don't use consistent snapshots to make testing easier (consistent
|
||||
// snapshots are tested explicitly elsewhere)
|
||||
c.Assert(s.repo.Init(false), IsNil)
|
||||
s.keyIDs = map[string]string{
|
||||
"root": s.genKey(c, "root"),
|
||||
"targets": s.genKey(c, "targets"),
|
||||
"snapshot": s.genKey(c, "snapshot"),
|
||||
"timestamp": s.genKey(c, "timestamp"),
|
||||
}
|
||||
c.Assert(s.repo.AddTarget("foo.txt", nil), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
|
||||
// create a remote store containing valid repo files
|
||||
s.remote = newFakeRemoteStore()
|
||||
s.syncRemote(c)
|
||||
for path, data := range targetFiles {
|
||||
s.remote.targets[path] = newFakeFile(data)
|
||||
}
|
||||
|
||||
s.expiredTime = time.Now().Add(time.Hour)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) genKey(c *C, role string) string {
|
||||
id, err := s.repo.GenKey(role)
|
||||
c.Assert(err, IsNil)
|
||||
return id
|
||||
}
|
||||
|
||||
func (s *ClientSuite) genKeyExpired(c *C, role string) string {
|
||||
id, err := s.repo.GenKeyWithExpires(role, s.expiredTime)
|
||||
c.Assert(err, IsNil)
|
||||
return id
|
||||
}
|
||||
|
||||
// withMetaExpired sets signed.IsExpired throughout the invocation of f so that
|
||||
// any metadata marked to expire at s.expiredTime will be expired (this avoids
|
||||
// the need to sleep in the tests).
|
||||
func (s *ClientSuite) withMetaExpired(f func()) {
|
||||
e := signed.IsExpired
|
||||
defer func() { signed.IsExpired = e }()
|
||||
signed.IsExpired = func(t time.Time) bool {
|
||||
return t.Unix() == s.expiredTime.Round(time.Second).Unix()
|
||||
}
|
||||
f()
|
||||
}
|
||||
|
||||
func (s *ClientSuite) syncLocal(c *C) {
|
||||
meta, err := s.store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
for k, v := range meta {
|
||||
c.Assert(s.local.SetMeta(k, v), IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) syncRemote(c *C) {
|
||||
meta, err := s.store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
for name, data := range meta {
|
||||
s.remote.meta[name] = newFakeFile(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) addRemoteTarget(c *C, name string) {
|
||||
c.Assert(s.repo.AddTarget(name, nil), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) rootKeys(c *C) []*data.Key {
|
||||
rootKeys, err := s.repo.RootKeys()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(rootKeys, HasLen, 1)
|
||||
return rootKeys
|
||||
}
|
||||
|
||||
func (s *ClientSuite) newClient(c *C) *Client {
|
||||
s.local = MemoryLocalStore()
|
||||
client := NewClient(s.local, s.remote)
|
||||
c.Assert(client.Init(s.rootKeys(c), 1), IsNil)
|
||||
return client
|
||||
}
|
||||
|
||||
func (s *ClientSuite) updatedClient(c *C) *Client {
|
||||
client := s.newClient(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
return client
|
||||
}
|
||||
|
||||
func assertFiles(c *C, files data.Files, names []string) {
|
||||
c.Assert(files, HasLen, len(names))
|
||||
for _, name := range names {
|
||||
target, ok := targetFiles[name]
|
||||
if !ok {
|
||||
c.Fatalf("unknown target %s", name)
|
||||
}
|
||||
file, ok := files[name]
|
||||
if !ok {
|
||||
c.Fatalf("expected files to contain %s", name)
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(target), file.HashAlgorithms()...)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(util.FileMetaEqual(file, meta), IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func assertWrongHash(c *C, err error) {
|
||||
// just test the type of err rather using DeepEquals as it contains
|
||||
// hashes we don't necessarily need to check.
|
||||
e, ok := err.(ErrDownloadFailed)
|
||||
if !ok {
|
||||
c.Fatalf("expected err to have type ErrDownloadFailed, got %T", err)
|
||||
}
|
||||
if _, ok := e.Err.(util.ErrWrongHash); !ok {
|
||||
c.Fatalf("expected err.Err to have type util.ErrWrongHash, got %T", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) assertErrExpired(c *C, err error, file string) {
|
||||
decodeErr, ok := err.(ErrDecodeFailed)
|
||||
if !ok {
|
||||
c.Fatalf("expected err to have type ErrDecodeFailed, got %T", err)
|
||||
}
|
||||
c.Assert(decodeErr.File, Equals, file)
|
||||
expiredErr, ok := decodeErr.Err.(signed.ErrExpired)
|
||||
if !ok {
|
||||
c.Fatalf("expected err.Err to have type signed.ErrExpired, got %T", err)
|
||||
}
|
||||
c.Assert(expiredErr.Expired.Unix(), Equals, s.expiredTime.Round(time.Second).Unix())
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestInitRootTooLarge(c *C) {
|
||||
client := NewClient(MemoryLocalStore(), s.remote)
|
||||
s.remote.meta["root.json"] = newFakeFile(make([]byte, maxMetaSize+1))
|
||||
c.Assert(client.Init(s.rootKeys(c), 0), Equals, ErrMetaTooLarge{"root.json", maxMetaSize + 1})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestInitRootExpired(c *C) {
|
||||
s.genKeyExpired(c, "targets")
|
||||
s.syncRemote(c)
|
||||
client := NewClient(MemoryLocalStore(), s.remote)
|
||||
s.withMetaExpired(func() {
|
||||
s.assertErrExpired(c, client.Init(s.rootKeys(c), 1), "root.json")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestInit(c *C) {
|
||||
client := NewClient(MemoryLocalStore(), s.remote)
|
||||
|
||||
// check Init() returns keys.ErrInvalidThreshold with an invalid threshold
|
||||
c.Assert(client.Init(s.rootKeys(c), 0), Equals, keys.ErrInvalidThreshold)
|
||||
|
||||
// check Init() returns signed.ErrRoleThreshold when not enough keys
|
||||
c.Assert(client.Init(s.rootKeys(c), 2), Equals, ErrInsufficientKeys)
|
||||
|
||||
// check Update() returns ErrNoRootKeys when uninitialized
|
||||
_, err := client.Update()
|
||||
c.Assert(err, Equals, ErrNoRootKeys)
|
||||
|
||||
// check Update() does not return ErrNoRootKeys after initialization
|
||||
c.Assert(client.Init(s.rootKeys(c), 1), IsNil)
|
||||
_, err = client.Update()
|
||||
c.Assert(err, Not(Equals), ErrNoRootKeys)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestFirstUpdate(c *C) {
|
||||
files, err := s.newClient(c).Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(files, HasLen, 1)
|
||||
assertFiles(c, files, []string{"/foo.txt"})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestMissingRemoteMetadata(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
delete(s.remote.meta, "targets.json")
|
||||
_, err := client.Update()
|
||||
c.Assert(err, Equals, ErrMissingRemoteMetadata{"targets.json"})
|
||||
|
||||
delete(s.remote.meta, "timestamp.json")
|
||||
_, err = client.Update()
|
||||
c.Assert(err, Equals, ErrMissingRemoteMetadata{"timestamp.json"})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNoChangeUpdate(c *C) {
|
||||
client := s.newClient(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
_, err = client.Update()
|
||||
c.Assert(IsLatestSnapshot(err), Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTimestamp(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
version := client.timestampVer
|
||||
c.Assert(version > 0, Equals, true)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(IsLatestSnapshot(err), Equals, true)
|
||||
c.Assert(client.timestampVer > version, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewRoot(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace all keys
|
||||
newKeyIDs := make(map[string]string)
|
||||
for role, id := range s.keyIDs {
|
||||
c.Assert(s.repo.RevokeKey(role, id), IsNil)
|
||||
newKeyIDs[role] = s.genKey(c, role)
|
||||
}
|
||||
|
||||
// update metadata
|
||||
c.Assert(s.repo.Sign("targets.json"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new root version
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
version := client.rootVer
|
||||
c.Assert(version > 0, Equals, true)
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > version, Equals, true)
|
||||
|
||||
// check old keys are not in db
|
||||
for _, id := range s.keyIDs {
|
||||
c.Assert(client.db.GetKey(id), IsNil)
|
||||
}
|
||||
|
||||
// check new keys are in db
|
||||
for name, id := range newKeyIDs {
|
||||
key := client.db.GetKey(id)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, id)
|
||||
role := client.db.GetRole(name)
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, []string{id})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTargets(c *C) {
|
||||
client := s.newClient(c)
|
||||
files, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/foo.txt"})
|
||||
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
s.addRemoteTarget(c, "baz.txt")
|
||||
|
||||
files, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/bar.txt", "/baz.txt"})
|
||||
|
||||
// Adding the same exact file should not lead to an update
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
files, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(files, HasLen, 0)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTimestampKey(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace key
|
||||
oldID := s.keyIDs["timestamp"]
|
||||
c.Assert(s.repo.RevokeKey("timestamp", oldID), IsNil)
|
||||
newID := s.genKey(c, "timestamp")
|
||||
|
||||
// generate new snapshot (because root has changed) and timestamp
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new root and timestamp
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
rootVer := client.rootVer
|
||||
timestampVer := client.timestampVer
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > rootVer, Equals, true)
|
||||
c.Assert(client.timestampVer > timestampVer, Equals, true)
|
||||
|
||||
// check key has been replaced in db
|
||||
c.Assert(client.db.GetKey(oldID), IsNil)
|
||||
key := client.db.GetKey(newID)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, newID)
|
||||
role := client.db.GetRole("timestamp")
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, []string{newID})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewSnapshotKey(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace key
|
||||
oldID := s.keyIDs["snapshot"]
|
||||
c.Assert(s.repo.RevokeKey("snapshot", oldID), IsNil)
|
||||
newID := s.genKey(c, "snapshot")
|
||||
|
||||
// generate new snapshot and timestamp
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new root, snapshot and timestamp
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
rootVer := client.rootVer
|
||||
snapshotVer := client.snapshotVer
|
||||
timestampVer := client.timestampVer
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > rootVer, Equals, true)
|
||||
c.Assert(client.snapshotVer > snapshotVer, Equals, true)
|
||||
c.Assert(client.timestampVer > timestampVer, Equals, true)
|
||||
|
||||
// check key has been replaced in db
|
||||
c.Assert(client.db.GetKey(oldID), IsNil)
|
||||
key := client.db.GetKey(newID)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, newID)
|
||||
role := client.db.GetRole("snapshot")
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, []string{newID})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestNewTargetsKey(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// replace key
|
||||
oldID := s.keyIDs["targets"]
|
||||
c.Assert(s.repo.RevokeKey("targets", oldID), IsNil)
|
||||
newID := s.genKey(c, "targets")
|
||||
|
||||
// re-sign targets and generate new snapshot and timestamp
|
||||
c.Assert(s.repo.Sign("targets.json"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check update gets new metadata
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
rootVer := client.rootVer
|
||||
targetsVer := client.targetsVer
|
||||
snapshotVer := client.snapshotVer
|
||||
timestampVer := client.timestampVer
|
||||
_, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(client.rootVer > rootVer, Equals, true)
|
||||
c.Assert(client.targetsVer > targetsVer, Equals, true)
|
||||
c.Assert(client.snapshotVer > snapshotVer, Equals, true)
|
||||
c.Assert(client.timestampVer > timestampVer, Equals, true)
|
||||
|
||||
// check key has been replaced in db
|
||||
c.Assert(client.db.GetKey(oldID), IsNil)
|
||||
key := client.db.GetKey(newID)
|
||||
c.Assert(key, NotNil)
|
||||
c.Assert(key.ID, Equals, newID)
|
||||
role := client.db.GetRole("targets")
|
||||
c.Assert(role, NotNil)
|
||||
c.Assert(role.KeyIDs, DeepEquals, []string{newID})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestLocalExpired(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// locally expired timestamp.json is ok
|
||||
version := client.timestampVer
|
||||
c.Assert(s.repo.TimestampWithExpires(s.expiredTime), IsNil)
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
c.Assert(client.timestampVer > version, Equals, true)
|
||||
})
|
||||
|
||||
// locally expired snapshot.json is ok
|
||||
version = client.snapshotVer
|
||||
c.Assert(s.repo.SnapshotWithExpires(tuf.CompressionTypeNone, s.expiredTime), IsNil)
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
c.Assert(client.snapshotVer > version, Equals, true)
|
||||
})
|
||||
|
||||
// locally expired targets.json is ok
|
||||
version = client.targetsVer
|
||||
c.Assert(s.repo.AddTargetWithExpires("foo.txt", nil, s.expiredTime), IsNil)
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
c.Assert(client.getLocalMeta(), IsNil)
|
||||
c.Assert(client.targetsVer > version, Equals, true)
|
||||
})
|
||||
|
||||
// locally expired root.json is not ok
|
||||
version = client.rootVer
|
||||
s.genKeyExpired(c, "targets")
|
||||
s.syncLocal(c)
|
||||
s.withMetaExpired(func() {
|
||||
err := client.getLocalMeta()
|
||||
if _, ok := err.(signed.ErrExpired); !ok {
|
||||
c.Fatalf("expected err to have type signed.ErrExpired, got %T", err)
|
||||
}
|
||||
c.Assert(client.rootVer, Equals, version)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestTimestampTooLarge(c *C) {
|
||||
s.remote.meta["timestamp.json"] = newFakeFile(make([]byte, maxMetaSize+1))
|
||||
_, err := s.newClient(c).Update()
|
||||
c.Assert(err, Equals, ErrMetaTooLarge{"timestamp.json", maxMetaSize + 1})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateLocalRootExpired(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// add soon to expire root.json to local storage
|
||||
s.genKeyExpired(c, "timestamp")
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncLocal(c)
|
||||
|
||||
// add far expiring root.json to remote storage
|
||||
s.genKey(c, "timestamp")
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
s.syncRemote(c)
|
||||
|
||||
// check the update downloads the non expired remote root.json and
|
||||
// restarts itself, thus successfully updating
|
||||
s.withMetaExpired(func() {
|
||||
err := client.getLocalMeta()
|
||||
if _, ok := err.(signed.ErrExpired); !ok {
|
||||
c.Fatalf("expected err to have type signed.ErrExpired, got %T", err)
|
||||
}
|
||||
_, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateRemoteExpired(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
|
||||
// expired remote metadata should always be rejected
|
||||
c.Assert(s.repo.TimestampWithExpires(s.expiredTime), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "timestamp.json")
|
||||
})
|
||||
|
||||
c.Assert(s.repo.SnapshotWithExpires(tuf.CompressionTypeNone, s.expiredTime), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "snapshot.json")
|
||||
})
|
||||
|
||||
c.Assert(s.repo.AddTargetWithExpires("bar.txt", nil, s.expiredTime), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "targets.json")
|
||||
})
|
||||
|
||||
s.genKeyExpired(c, "timestamp")
|
||||
c.Assert(s.repo.RemoveTarget("bar.txt"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.withMetaExpired(func() {
|
||||
_, err := client.Update()
|
||||
s.assertErrExpired(c, err, "root.json")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateLocalRootExpiredKeyChange(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// add soon to expire root.json to local storage
|
||||
s.genKeyExpired(c, "timestamp")
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncLocal(c)
|
||||
|
||||
// replace all keys
|
||||
newKeyIDs := make(map[string]string)
|
||||
for role, id := range s.keyIDs {
|
||||
c.Assert(s.repo.RevokeKey(role, id), IsNil)
|
||||
newKeyIDs[role] = s.genKey(c, role)
|
||||
}
|
||||
|
||||
// update metadata
|
||||
c.Assert(s.repo.Sign("targets.json"), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
|
||||
// check the update downloads the non expired remote root.json and
|
||||
// restarts itself, thus successfully updating
|
||||
s.withMetaExpired(func() {
|
||||
err := client.getLocalMeta()
|
||||
c.Assert(err, FitsTypeOf, signed.ErrExpired{})
|
||||
|
||||
_, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateMixAndMatchAttack(c *C) {
|
||||
// generate metadata with an explicit expires so we can make predictable changes
|
||||
expires := time.Now().Add(time.Hour)
|
||||
c.Assert(s.repo.AddTargetWithExpires("foo.txt", nil, expires), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
client := s.updatedClient(c)
|
||||
|
||||
// grab the remote targets.json
|
||||
oldTargets, ok := s.remote.meta["targets.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing remote targets.json")
|
||||
}
|
||||
|
||||
// generate new remote metadata, but replace targets.json with the old one
|
||||
c.Assert(s.repo.AddTargetWithExpires("bar.txt", nil, expires), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
newTargets, ok := s.remote.meta["targets.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing remote targets.json")
|
||||
}
|
||||
s.remote.meta["targets.json"] = oldTargets
|
||||
|
||||
// check update returns ErrWrongSize for targets.json
|
||||
_, err := client.Update()
|
||||
c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", oldTargets.size, newTargets.size})
|
||||
|
||||
// do the same but keep the size the same
|
||||
c.Assert(s.repo.RemoveTargetWithExpires("foo.txt", expires), IsNil)
|
||||
c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
s.remote.meta["targets.json"] = oldTargets
|
||||
|
||||
// check update returns ErrWrongHash
|
||||
_, err = client.Update()
|
||||
assertWrongHash(c, err)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateReplayAttack(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
|
||||
// grab the remote timestamp.json
|
||||
oldTimestamp, ok := s.remote.meta["timestamp.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing remote timestamp.json")
|
||||
}
|
||||
|
||||
// generate a new timestamp and sync with the client
|
||||
version := client.timestampVer
|
||||
c.Assert(version > 0, Equals, true)
|
||||
c.Assert(s.repo.Timestamp(), IsNil)
|
||||
s.syncRemote(c)
|
||||
_, err := client.Update()
|
||||
c.Assert(IsLatestSnapshot(err), Equals, true)
|
||||
c.Assert(client.timestampVer > version, Equals, true)
|
||||
|
||||
// replace remote timestamp.json with the old one
|
||||
s.remote.meta["timestamp.json"] = oldTimestamp
|
||||
|
||||
// check update returns ErrLowVersion
|
||||
_, err = client.Update()
|
||||
c.Assert(err, DeepEquals, ErrDecodeFailed{"timestamp.json", signed.ErrLowVersion{version, client.timestampVer}})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateTamperedTargets(c *C) {
|
||||
client := s.newClient(c)
|
||||
|
||||
// get local targets.json
|
||||
meta, err := s.store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
targetsJSON, ok := meta["targets.json"]
|
||||
if !ok {
|
||||
c.Fatal("missing targets.json")
|
||||
}
|
||||
targets := &data.Signed{}
|
||||
c.Assert(json.Unmarshal(targetsJSON, targets), IsNil)
|
||||
|
||||
// update remote targets.json to have different content but same size
|
||||
c.Assert(targets.Signatures, HasLen, 1)
|
||||
targets.Signatures[0].Method = "xxxxxxx"
|
||||
tamperedJSON, err := json.Marshal(targets)
|
||||
c.Assert(err, IsNil)
|
||||
s.store.SetMeta("targets.json", tamperedJSON)
|
||||
s.syncRemote(c)
|
||||
_, err = client.Update()
|
||||
assertWrongHash(c, err)
|
||||
|
||||
// update remote targets.json to have the wrong size
|
||||
targets.Signatures[0].Method = "xxx"
|
||||
tamperedJSON, err = json.Marshal(targets)
|
||||
c.Assert(err, IsNil)
|
||||
s.store.SetMeta("targets.json", tamperedJSON)
|
||||
s.syncRemote(c)
|
||||
_, err = client.Update()
|
||||
c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", int64(len(tamperedJSON)), int64(len(targetsJSON))})
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestUpdateHTTP(c *C) {
|
||||
tmp := c.MkDir()
|
||||
|
||||
// start file server
|
||||
addr, cleanup := startFileServer(c, tmp)
|
||||
defer cleanup()
|
||||
|
||||
for _, consistentSnapshot := range []bool{false, true} {
|
||||
dir := fmt.Sprintf("consistent-snapshot-%t", consistentSnapshot)
|
||||
|
||||
// generate repository
|
||||
repo := generateRepoFS(c, filepath.Join(tmp, dir), targetFiles, consistentSnapshot)
|
||||
|
||||
// initialize a client
|
||||
remote, err := HTTPRemoteStore(fmt.Sprintf("http://%s/%s/repository", addr, dir), nil)
|
||||
c.Assert(err, IsNil)
|
||||
client := NewClient(MemoryLocalStore(), remote)
|
||||
rootKeys, err := repo.RootKeys()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(rootKeys, HasLen, 1)
|
||||
c.Assert(client.Init(rootKeys, 1), IsNil)
|
||||
|
||||
// check update is ok
|
||||
targets, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, targets, []string{"/foo.txt", "/bar.txt", "/baz.txt"})
|
||||
|
||||
// check can download files
|
||||
for name, data := range targetFiles {
|
||||
var dest testDestination
|
||||
c.Assert(client.Download(name, &dest), IsNil)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testDestination struct {
|
||||
bytes.Buffer
|
||||
deleted bool
|
||||
}
|
||||
|
||||
func (t *testDestination) Delete() error {
|
||||
t.deleted = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadUnknownTarget(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/nonexistent", &dest), Equals, ErrUnknownTarget{"/nonexistent"})
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadNoExist(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
delete(s.remote.targets, "/foo.txt")
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), Equals, ErrNotFound{"/foo.txt"})
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadOK(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
// the filename is normalized if necessary
|
||||
for _, name := range []string{"/foo.txt", "foo.txt"} {
|
||||
var dest testDestination
|
||||
c.Assert(client.Download(name, &dest), IsNil)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, "foo")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadWrongSize(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := &fakeFile{buf: bytes.NewReader([]byte("wrong-size")), size: 10}
|
||||
s.remote.targets["/foo.txt"] = remoteFile
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), DeepEquals, ErrWrongSize{"/foo.txt", 10, 3})
|
||||
c.Assert(remoteFile.bytesRead, Equals, 0)
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadTargetTooLong(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := s.remote.targets["/foo.txt"]
|
||||
remoteFile.buf = bytes.NewReader([]byte("foo-ooo"))
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), IsNil)
|
||||
c.Assert(remoteFile.bytesRead, Equals, 3)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, "foo")
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadTargetTooShort(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := s.remote.targets["/foo.txt"]
|
||||
remoteFile.buf = bytes.NewReader([]byte("fo"))
|
||||
var dest testDestination
|
||||
c.Assert(client.Download("/foo.txt", &dest), DeepEquals, ErrWrongSize{"/foo.txt", 2, 3})
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestDownloadTargetCorruptData(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
remoteFile := s.remote.targets["/foo.txt"]
|
||||
remoteFile.buf = bytes.NewReader([]byte("corrupt"))
|
||||
var dest testDestination
|
||||
assertWrongHash(c, client.Download("/foo.txt", &dest))
|
||||
c.Assert(dest.deleted, Equals, true)
|
||||
}
|
||||
|
||||
func (s *ClientSuite) TestAvailableTargets(c *C) {
|
||||
client := s.updatedClient(c)
|
||||
files, err := client.Targets()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/foo.txt"})
|
||||
|
||||
s.addRemoteTarget(c, "bar.txt")
|
||||
s.addRemoteTarget(c, "baz.txt")
|
||||
_, err = client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
files, err = client.Targets()
|
||||
c.Assert(err, IsNil)
|
||||
assertFiles(c, files, []string{"/foo.txt", "/bar.txt", "/baz.txt"})
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoRootKeys = errors.New("tuf: no root keys found in local meta store")
|
||||
ErrInsufficientKeys = errors.New("tuf: insufficient keys to meet threshold")
|
||||
)
|
||||
|
||||
type ErrMissingRemoteMetadata struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (e ErrMissingRemoteMetadata) Error() string {
|
||||
return fmt.Sprintf("tuf: missing remote metadata %s", e.Name)
|
||||
}
|
||||
|
||||
type ErrDownloadFailed struct {
|
||||
File string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e ErrDownloadFailed) Error() string {
|
||||
return fmt.Sprintf("tuf: failed to download %s: %s", e.File, e.Err)
|
||||
}
|
||||
|
||||
type ErrDecodeFailed struct {
|
||||
File string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e ErrDecodeFailed) Error() string {
|
||||
return fmt.Sprintf("tuf: failed to decode %s: %s", e.File, e.Err)
|
||||
}
|
||||
|
||||
func isDecodeFailedWithErr(err, expected error) bool {
|
||||
e, ok := err.(ErrDecodeFailed)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return e.Err == expected
|
||||
}
|
||||
|
||||
type ErrNotFound struct {
|
||||
File string
|
||||
}
|
||||
|
||||
func (e ErrNotFound) Error() string {
|
||||
return fmt.Sprintf("tuf: file not found: %s", e.File)
|
||||
}
|
||||
|
||||
func IsNotFound(err error) bool {
|
||||
_, ok := err.(ErrNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrWrongSize struct {
|
||||
File string
|
||||
Actual int64
|
||||
Expected int64
|
||||
}
|
||||
|
||||
func (e ErrWrongSize) Error() string {
|
||||
return fmt.Sprintf("tuf: unexpected file size: %s (expected %d bytes, got %d bytes)", e.File, e.Expected, e.Actual)
|
||||
}
|
||||
|
||||
type ErrLatestSnapshot struct {
|
||||
Version int
|
||||
}
|
||||
|
||||
func (e ErrLatestSnapshot) Error() string {
|
||||
return fmt.Sprintf("tuf: the local snapshot version (%d) is the latest", e.Version)
|
||||
}
|
||||
|
||||
func IsLatestSnapshot(err error) bool {
|
||||
_, ok := err.(ErrLatestSnapshot)
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrUnknownTarget struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (e ErrUnknownTarget) Error() string {
|
||||
return fmt.Sprintf("tuf: unknown target file: %s", e.Name)
|
||||
}
|
||||
|
||||
type ErrMetaTooLarge struct {
|
||||
Name string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func (e ErrMetaTooLarge) Error() string {
|
||||
return fmt.Sprintf("tuf: %s size %d bytes greater than maximum %d bytes", e.Name, e.Size, maxMetaSize)
|
||||
}
|
||||
|
||||
type ErrInvalidURL struct {
|
||||
URL string
|
||||
}
|
||||
|
||||
func (e ErrInvalidURL) Error() string {
|
||||
return fmt.Sprintf("tuf: invalid repository URL %s", e.URL)
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/agl/ed25519"
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type InteropSuite struct{}
|
||||
|
||||
var _ = Suite(&InteropSuite{})
|
||||
|
||||
var pythonTargets = map[string][]byte{
|
||||
"/file1.txt": []byte("file1.txt"),
|
||||
"/dir/file2.txt": []byte("file2.txt"),
|
||||
}
|
||||
|
||||
func (InteropSuite) TestGoClientPythonGenerated(c *C) {
|
||||
// start file server
|
||||
cwd, err := os.Getwd()
|
||||
c.Assert(err, IsNil)
|
||||
testDataDir := filepath.Join(cwd, "testdata")
|
||||
addr, cleanup := startFileServer(c, testDataDir)
|
||||
defer cleanup()
|
||||
|
||||
for _, dir := range []string{"with-consistent-snapshot", "without-consistent-snapshot"} {
|
||||
remote, err := HTTPRemoteStore(
|
||||
fmt.Sprintf("http://%s/%s/repository", addr, dir),
|
||||
&HTTPRemoteOptions{MetadataPath: "metadata", TargetsPath: "targets"},
|
||||
)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// initiate a client with the root keys
|
||||
f, err := os.Open(filepath.Join("testdata", dir, "keystore", "root_key.pub"))
|
||||
c.Assert(err, IsNil)
|
||||
key := &data.Key{}
|
||||
c.Assert(json.NewDecoder(f).Decode(key), IsNil)
|
||||
c.Assert(key.Type, Equals, "ed25519")
|
||||
c.Assert(key.Value.Public, HasLen, ed25519.PublicKeySize)
|
||||
client := NewClient(MemoryLocalStore(), remote)
|
||||
c.Assert(client.Init([]*data.Key{key}, 1), IsNil)
|
||||
|
||||
// check update returns the correct updated targets
|
||||
files, err := client.Update()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(files, HasLen, len(pythonTargets))
|
||||
for name, data := range pythonTargets {
|
||||
file, ok := files[name]
|
||||
if !ok {
|
||||
c.Fatalf("expected updated targets to contain %s", name)
|
||||
}
|
||||
meta, err := util.GenerateFileMeta(bytes.NewReader(data), file.HashAlgorithms()...)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(util.FileMetaEqual(file, meta), IsNil)
|
||||
}
|
||||
|
||||
// download the files and check they have the correct content
|
||||
for name, data := range pythonTargets {
|
||||
var dest testDestination
|
||||
c.Assert(client.Download(name, &dest), IsNil)
|
||||
c.Assert(dest.deleted, Equals, false)
|
||||
c.Assert(dest.String(), Equals, string(data))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateRepoFS(c *C, dir string, files map[string][]byte, consistentSnapshot bool) *tuf.Repo {
|
||||
signer := signed.NewEd25519()
|
||||
repo, err := tuf.NewRepo(signer, store.FileSystemStore(dir, nil), "sha256")
|
||||
c.Assert(err, IsNil)
|
||||
if !consistentSnapshot {
|
||||
c.Assert(repo.Init(false), IsNil)
|
||||
}
|
||||
for _, role := range []string{"root", "snapshot", "targets", "timestamp"} {
|
||||
_, err := repo.GenKey(role)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
for file, data := range files {
|
||||
path := filepath.Join(dir, "staged", "targets", file)
|
||||
c.Assert(os.MkdirAll(filepath.Dir(path), 0755), IsNil)
|
||||
c.Assert(ioutil.WriteFile(path, data, 0644), IsNil)
|
||||
c.Assert(repo.AddTarget(file, nil), IsNil)
|
||||
}
|
||||
c.Assert(repo.Snapshot(tuf.CompressionTypeNone), IsNil)
|
||||
c.Assert(repo.Timestamp(), IsNil)
|
||||
c.Assert(repo.Commit(), IsNil)
|
||||
return repo
|
||||
}
|
||||
|
||||
func (InteropSuite) TestPythonClientGoGenerated(c *C) {
|
||||
// clone the Python client if necessary
|
||||
cwd, err := os.Getwd()
|
||||
c.Assert(err, IsNil)
|
||||
tufDir := filepath.Join(cwd, "testdata", "tuf")
|
||||
if _, err := os.Stat(tufDir); os.IsNotExist(err) {
|
||||
c.Assert(exec.Command(
|
||||
"git",
|
||||
"clone",
|
||||
"--quiet",
|
||||
"--branch=v0.9.9",
|
||||
"--depth=1",
|
||||
"https://github.com/theupdateframework/tuf.git",
|
||||
tufDir,
|
||||
).Run(), IsNil)
|
||||
}
|
||||
|
||||
tmp := c.MkDir()
|
||||
files := map[string][]byte{
|
||||
"foo.txt": []byte("foo"),
|
||||
"bar/baz.txt": []byte("baz"),
|
||||
}
|
||||
|
||||
// start file server
|
||||
addr, cleanup := startFileServer(c, tmp)
|
||||
defer cleanup()
|
||||
|
||||
// setup Python env
|
||||
environ := os.Environ()
|
||||
pythonEnv := make([]string, 0, len(environ)+1)
|
||||
// remove any existing PYTHONPATH from the environment
|
||||
for _, e := range environ {
|
||||
if strings.HasPrefix(e, "PYTHONPATH=") {
|
||||
continue
|
||||
}
|
||||
pythonEnv = append(pythonEnv, e)
|
||||
}
|
||||
pythonEnv = append(pythonEnv, "PYTHONPATH="+tufDir)
|
||||
|
||||
for _, consistentSnapshot := range []bool{false, true} {
|
||||
// generate repository
|
||||
name := fmt.Sprintf("consistent-snapshot-%t", consistentSnapshot)
|
||||
dir := filepath.Join(tmp, name)
|
||||
generateRepoFS(c, dir, files, consistentSnapshot)
|
||||
|
||||
// create initial files for Python client
|
||||
clientDir := filepath.Join(dir, "client")
|
||||
currDir := filepath.Join(clientDir, "metadata", "current")
|
||||
prevDir := filepath.Join(clientDir, "metadata", "previous")
|
||||
c.Assert(os.MkdirAll(currDir, 0755), IsNil)
|
||||
c.Assert(os.MkdirAll(prevDir, 0755), IsNil)
|
||||
rootJSON, err := ioutil.ReadFile(filepath.Join(dir, "repository", "root.json"))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(ioutil.WriteFile(filepath.Join(currDir, "root.json"), rootJSON, 0644), IsNil)
|
||||
|
||||
// run Python client update
|
||||
cmd := exec.Command("python", filepath.Join(cwd, "testdata", "client.py"), "--repo=http://"+addr+"/"+name)
|
||||
cmd.Env = pythonEnv
|
||||
cmd.Dir = clientDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
c.Assert(cmd.Run(), IsNil)
|
||||
|
||||
// check the target files got downloaded
|
||||
for path, expected := range files {
|
||||
actual, err := ioutil.ReadFile(filepath.Join(clientDir, "targets", path))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(actual, DeepEquals, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startFileServer(c *C, dir string) (string, func() error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
c.Assert(err, IsNil)
|
||||
addr := l.Addr().String()
|
||||
go http.Serve(l, http.FileServer(http.Dir(dir)))
|
||||
return addr, l.Close
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func MemoryLocalStore() LocalStore {
|
||||
return make(memoryLocalStore)
|
||||
}
|
||||
|
||||
type memoryLocalStore map[string]json.RawMessage
|
||||
|
||||
func (m memoryLocalStore) GetMeta() (map[string]json.RawMessage, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m memoryLocalStore) SetMeta(name string, meta json.RawMessage) error {
|
||||
m[name] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
const dbBucket = "tuf-client"
|
||||
|
||||
func FileLocalStore(path string) (LocalStore, error) {
|
||||
db, err := bolt.Open(path, 0600, &bolt.Options{Timeout: time.Second})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(dbBucket))
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileLocalStore{db: db}, nil
|
||||
}
|
||||
|
||||
type fileLocalStore struct {
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func (f *fileLocalStore) GetMeta() (map[string]json.RawMessage, error) {
|
||||
meta := make(map[string]json.RawMessage)
|
||||
if err := f.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(dbBucket))
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
meta[string(k)] = v
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func (f *fileLocalStore) SetMeta(name string, meta json.RawMessage) error {
|
||||
return f.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(dbBucket))
|
||||
return b.Put([]byte(name), meta)
|
||||
})
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type LocalStoreSuite struct{}
|
||||
|
||||
var _ = Suite(&LocalStoreSuite{})
|
||||
|
||||
func (LocalStoreSuite) TestFileLocalStore(c *C) {
|
||||
tmp := c.MkDir()
|
||||
path := filepath.Join(tmp, "tuf.db")
|
||||
store, err := FileLocalStore(path)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
type meta map[string]json.RawMessage
|
||||
|
||||
assertGet := func(expected meta) {
|
||||
actual, err := store.GetMeta()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(meta(actual), DeepEquals, expected)
|
||||
}
|
||||
|
||||
// initial GetMeta should return empty meta
|
||||
assertGet(meta{})
|
||||
|
||||
// SetMeta should persist
|
||||
rootJSON := []byte(`{"_type":"Root"}`)
|
||||
c.Assert(store.SetMeta("root.json", rootJSON), IsNil)
|
||||
assertGet(meta{"root.json": rootJSON})
|
||||
|
||||
// SetMeta should add to existing meta
|
||||
targetsJSON := []byte(`{"_type":"Target"}`)
|
||||
c.Assert(store.SetMeta("targets.json", targetsJSON), IsNil)
|
||||
assertGet(meta{"root.json": rootJSON, "targets.json": targetsJSON})
|
||||
|
||||
// a new store should get the same meta
|
||||
c.Assert(store.(*fileLocalStore).db.Close(), IsNil)
|
||||
store, err = FileLocalStore(path)
|
||||
c.Assert(err, IsNil)
|
||||
assertGet(meta{"root.json": rootJSON, "targets.json": targetsJSON})
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type HTTPRemoteOptions struct {
|
||||
MetadataPath string
|
||||
TargetsPath string
|
||||
UserAgent string
|
||||
}
|
||||
|
||||
func HTTPRemoteStore(baseURL string, opts *HTTPRemoteOptions) (RemoteStore, error) {
|
||||
if !strings.HasPrefix(baseURL, "http") {
|
||||
return nil, ErrInvalidURL{baseURL}
|
||||
}
|
||||
if opts == nil {
|
||||
opts = &HTTPRemoteOptions{}
|
||||
}
|
||||
if opts.TargetsPath == "" {
|
||||
opts.TargetsPath = "targets"
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
return &httpRemoteStore{baseURL, opts, client}, nil
|
||||
}
|
||||
|
||||
type httpRemoteStore struct {
|
||||
baseURL string
|
||||
opts *HTTPRemoteOptions
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) {
|
||||
return h.get(path.Join(h.opts.MetadataPath, name))
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) {
|
||||
return h.get(path.Join(h.opts.TargetsPath, name))
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) get(s string) (io.ReadCloser, int64, error) {
|
||||
u := h.url(s)
|
||||
fmt.Println("###########")
|
||||
fmt.Println(u)
|
||||
fmt.Println("###########")
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if h.opts.UserAgent != "" {
|
||||
req.Header.Set("User-Agent", h.opts.UserAgent)
|
||||
}
|
||||
res, err := h.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return nil, 0, ErrNotFound{s}
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
res.Body.Close()
|
||||
return nil, 0, &url.Error{
|
||||
Op: "GET",
|
||||
URL: u,
|
||||
Err: fmt.Errorf("unexpected HTTP status %d", res.StatusCode),
|
||||
}
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 0)
|
||||
if err != nil {
|
||||
return res.Body, -1, nil
|
||||
}
|
||||
return res.Body, size, nil
|
||||
}
|
||||
|
||||
func (h *httpRemoteStore) url(path string) string {
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
return h.baseURL + path
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
tuf.log
|
||||
tuf
|
|
@ -1,66 +0,0 @@
|
|||
This file contains the license for TUF: The Update Framework.
|
||||
|
||||
It also lists license information for components and source
|
||||
code used by TUF: The Update Framework.
|
||||
|
||||
If you got this file as a part of a larger bundle,
|
||||
there may be other license terms that you should be aware of.
|
||||
|
||||
===============================================================================
|
||||
TUF: The Update Framework is distributed under this license:
|
||||
|
||||
Copyright (c) 2010, Justin Samuel and Justin Cappos.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and/or hardware specification (the “Work”) to deal in the Work
|
||||
without restriction, including without limitation the rights to use, copy,
|
||||
modify, merge, publish, distribute, sublicense, and/or sell copies of the Work,
|
||||
and to permit persons to whom the Work is furnished to do so, subject to the
|
||||
following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Work.
|
||||
|
||||
THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER
|
||||
DEALINGS IN THE WORK.
|
||||
===============================================================================
|
||||
Many files are modified from Thandy and are licensed under the
|
||||
following license:
|
||||
|
||||
Thandy is distributed under this license:
|
||||
|
||||
Copyright (c) 2008, The Tor Project, Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
* Neither the names of the copyright owners nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
===============================================================================
|
|
@ -1,8 +0,0 @@
|
|||
all:
|
||||
docker build -t tuf-gen ./generate
|
||||
docker run tuf-gen | tar x
|
||||
|
||||
clean:
|
||||
rm -rf with{,out}-consistent-snapshot
|
||||
|
||||
.PHONY: all clean
|
|
@ -1,47 +0,0 @@
|
|||
# TUF testdata
|
||||
|
||||
TUF testdata generated by the Python implementation which is used to test that
|
||||
the Go client is compatible with files generated by the Python repository tool.
|
||||
|
||||
## Generate
|
||||
|
||||
The `generate` directory contains scripts and a Dockerfile for generating the
|
||||
test data files.
|
||||
|
||||
Run `make` to regenerate the test files:
|
||||
|
||||
```
|
||||
$ make clean
|
||||
rm -rf keystore repository
|
||||
|
||||
$ make
|
||||
docker build -t tuf-gen ./generate
|
||||
...
|
||||
Successfully built ac1fba1d0b3b
|
||||
docker run tuf-gen | tar x
|
||||
Files generated:
|
||||
.
|
||||
|-- keystore
|
||||
| |-- root_key
|
||||
| |-- root_key.pub
|
||||
| |-- snapshot_key
|
||||
| |-- snapshot_key.pub
|
||||
| |-- targets_key
|
||||
| |-- targets_key.pub
|
||||
| |-- timestamp_key
|
||||
| `-- timestamp_key.pub
|
||||
|-- repository
|
||||
| |-- metadata
|
||||
| | |-- root.json
|
||||
| | |-- snapshot.json
|
||||
| | |-- targets.json
|
||||
| | |-- targets.json.gz
|
||||
| | `-- timestamp.json
|
||||
| `-- targets
|
||||
| |-- dir
|
||||
| | `-- file2.txt
|
||||
| `-- file1.txt
|
||||
`-- tuf.log
|
||||
|
||||
5 directories, 16 files
|
||||
```
|
|
@ -1,232 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# A script to download updates from a remote TUF repository.
|
||||
#
|
||||
# A modification of basic_client.py from the Python implementation:
|
||||
# https://github.com/theupdateframework/tuf/blob/v0.9.9/tuf/client/basic_client.py
|
||||
|
||||
"""
|
||||
<Program Name>
|
||||
basic_client.py
|
||||
|
||||
<Author>
|
||||
Vladimir Diaz <vladimir.v.diaz@gmail.com>
|
||||
|
||||
<Started>
|
||||
September 2012
|
||||
|
||||
<Copyright>
|
||||
See LICENSE for licensing information.
|
||||
|
||||
<Purpose>
|
||||
Provide a basic TUF client that can update all of the metatada and target
|
||||
files provided by the user-specified repository mirror. Updated files are
|
||||
saved to the 'targets' directory in the current working directory. The
|
||||
repository mirror is specified by the user through the '--repo' command-
|
||||
line option.
|
||||
|
||||
Normally, a software updater integrating TUF will develop their own costum
|
||||
client module by importing 'tuf.client.updater', instantiating the required
|
||||
object, and calling the desired methods to perform an update. This basic
|
||||
client is provided to users who wish to give TUF a quick test run without
|
||||
the hassle of writing client code. This module can also used by updaters that
|
||||
do not need the customization and only require their clients to perform an
|
||||
update of all the files provided by their repository mirror(s).
|
||||
|
||||
For software updaters that DO require customization, see the 'example_client.py'
|
||||
script. The 'example_client.py' script provides an outline of the client code
|
||||
that software updaters may develop and then tailor to their specific software
|
||||
updater or package manager.
|
||||
|
||||
Additional tools for clients running legacy applications will also be made
|
||||
available. These tools will allow secure software updates using The Update
|
||||
Framework without the need to modify the original application.
|
||||
|
||||
<Usage>
|
||||
$ python basic_client.py --repo http://localhost:8001
|
||||
$ python basic_client.py --repo http://localhost:8001 --verbose 3
|
||||
|
||||
<Options>
|
||||
--verbose:
|
||||
Set the verbosity level of logging messages. Accepts values 1-5.
|
||||
|
||||
--repo:
|
||||
Set the repository mirror that will be responding to client requests.
|
||||
E.g., 'http://locahost:8001'.
|
||||
"""
|
||||
|
||||
# Help with Python 3 compatibility, where the print statement is a function, an
|
||||
# implicit relative import is invalid, and the '/' operator performs true
|
||||
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
import optparse
|
||||
import logging
|
||||
|
||||
import tuf
|
||||
import tuf.formats
|
||||
import tuf.client.updater
|
||||
import tuf.log
|
||||
|
||||
# See 'log.py' to learn how logging is handled in TUF.
|
||||
logger = logging.getLogger('tuf.basic_client')
|
||||
|
||||
|
||||
def update_client(repository_mirror):
|
||||
"""
|
||||
<Purpose>
|
||||
Perform an update of the metadata and target files located at
|
||||
'repository_mirror'. Target files are saved to the 'targets' directory
|
||||
in the current working directory. The current directory must already
|
||||
include a 'metadata' directory, which in turn must contain the 'current'
|
||||
and 'previous' directories. At a minimum, these two directories require
|
||||
the 'root.json' metadata file.
|
||||
|
||||
<Arguments>
|
||||
repository_mirror:
|
||||
The URL to the repository mirror hosting the metadata and target
|
||||
files. E.g., 'http://localhost:8001'
|
||||
|
||||
<Exceptions>
|
||||
tuf.RepositoryError, if 'repository_mirror' is improperly formatted.
|
||||
|
||||
<Side Effects>
|
||||
Connects to a repository mirror and updates the metadata files and
|
||||
any target files. Obsolete targets are also removed locally.
|
||||
|
||||
<Returns>
|
||||
None.
|
||||
"""
|
||||
|
||||
# Does 'repository_mirror' have the correct format?
|
||||
try:
|
||||
tuf.formats.URL_SCHEMA.check_match(repository_mirror)
|
||||
except tuf.FormatError as e:
|
||||
message = 'The repository mirror supplied is invalid.'
|
||||
raise tuf.RepositoryError(message)
|
||||
|
||||
# Set the local repository directory containing all of the metadata files.
|
||||
tuf.conf.repository_directory = '.'
|
||||
|
||||
# Set the repository mirrors. This dictionary is needed by the Updater
|
||||
# class of updater.py.
|
||||
repository_mirrors = {'mirror': {'url_prefix': repository_mirror,
|
||||
'metadata_path': 'repository',
|
||||
'targets_path': 'repository/targets',
|
||||
'confined_target_dirs': ['']}}
|
||||
|
||||
# Create the repository object using the repository name 'repository'
|
||||
# and the repository mirrors defined above.
|
||||
updater = tuf.client.updater.Updater('repository', repository_mirrors)
|
||||
|
||||
# The local destination directory to save the target files.
|
||||
destination_directory = './targets'
|
||||
|
||||
# Refresh the repository's top-level roles, store the target information for
|
||||
# all the targets tracked, and determine which of these targets have been
|
||||
# updated.
|
||||
updater.refresh()
|
||||
all_targets = updater.all_targets()
|
||||
updated_targets = updater.updated_targets(all_targets, destination_directory)
|
||||
|
||||
# Download each of these updated targets and save them locally.
|
||||
for target in updated_targets:
|
||||
try:
|
||||
updater.download_target(target, destination_directory)
|
||||
except tuf.DownloadError as e:
|
||||
pass
|
||||
|
||||
# Remove any files from the destination directory that are no longer being
|
||||
# tracked.
|
||||
updater.remove_obsolete_targets(destination_directory)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def parse_options():
|
||||
"""
|
||||
<Purpose>
|
||||
Parse the command-line options and set the logging level
|
||||
as specified by the user through the --verbose option.
|
||||
'basic_client' expects the '--repo' to be set by the user.
|
||||
|
||||
Example:
|
||||
$ python basic_client.py --repo http://localhost:8001
|
||||
|
||||
If the required option is unset, a parser error is printed
|
||||
and the scripts exits.
|
||||
|
||||
<Arguments>
|
||||
None.
|
||||
|
||||
<Exceptions>
|
||||
None.
|
||||
|
||||
<Side Effects>
|
||||
Sets the logging level for TUF logging.
|
||||
|
||||
<Returns>
|
||||
The 'options.REPOSITORY_MIRROR' string.
|
||||
"""
|
||||
|
||||
parser = optparse.OptionParser()
|
||||
|
||||
# Add the options supported by 'basic_client' to the option parser.
|
||||
parser.add_option('--verbose', dest='VERBOSE', type=int, default=2,
|
||||
help='Set the verbosity level of logging messages.'
|
||||
'The lower the setting, the greater the verbosity.')
|
||||
|
||||
parser.add_option('--repo', dest='REPOSITORY_MIRROR', type='string',
|
||||
help='Specifiy the repository mirror\'s URL prefix '
|
||||
'(e.g., http://www.example.com:8001/tuf/).'
|
||||
' The client will download updates from this mirror.')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
# Set the logging level.
|
||||
if options.VERBOSE == 5:
|
||||
tuf.log.set_log_level(logging.CRITICAL)
|
||||
elif options.VERBOSE == 4:
|
||||
tuf.log.set_log_level(logging.ERROR)
|
||||
elif options.VERBOSE == 3:
|
||||
tuf.log.set_log_level(logging.WARNING)
|
||||
elif options.VERBOSE == 2:
|
||||
tuf.log.set_log_level(logging.INFO)
|
||||
elif options.VERBOSE == 1:
|
||||
tuf.log.set_log_level(logging.DEBUG)
|
||||
else:
|
||||
tuf.log.set_log_level(logging.NOTSET)
|
||||
|
||||
# Ensure the '--repo' option was set by the user.
|
||||
if options.REPOSITORY_MIRROR is None:
|
||||
message = '"--repo" must be set on the command-line.'
|
||||
parser.error(message)
|
||||
|
||||
# Return the repository mirror containing the metadata and target files.
|
||||
return options.REPOSITORY_MIRROR
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# Parse the options and set the logging level.
|
||||
repository_mirror = parse_options()
|
||||
|
||||
# Perform an update of all the files in the 'targets' directory located in
|
||||
# the current directory.
|
||||
try:
|
||||
update_client(repository_mirror)
|
||||
|
||||
except (tuf.NoWorkingMirrorError, tuf.RepositoryError) as e:
|
||||
traceback.print_exc()
|
||||
sys.stderr.write('Error: '+str(e)+'\n')
|
||||
sys.exit(1)
|
||||
|
||||
# Successfully updated the client's target files.
|
||||
sys.exit(0)
|
12
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile
generated
vendored
12
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y python python-dev python-pip libffi-dev tree
|
||||
|
||||
# Use the develop branch of tuf for the following fix:
|
||||
# https://github.com/theupdateframework/tuf/commit/38005fe
|
||||
RUN apt-get install -y git
|
||||
RUN pip install --no-use-wheel git+https://github.com/theupdateframework/tuf.git@develop && pip install tuf[tools]
|
||||
|
||||
ADD generate.py generate.sh /
|
||||
CMD /generate.sh
|
82
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py
generated
vendored
82
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py
generated
vendored
|
@ -1,82 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# A script to generate TUF repository files.
|
||||
#
|
||||
# A modification of generate.py from the Python implementation:
|
||||
# https://github.com/theupdateframework/tuf/blob/v0.9.9/tests/repository_data/generate.py
|
||||
|
||||
import shutil
|
||||
import datetime
|
||||
import optparse
|
||||
import stat
|
||||
|
||||
from tuf.repository_tool import *
|
||||
import tuf.util
|
||||
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option("-c","--consistent-snapshot", action='store_true', dest="consistent_snapshot",
|
||||
help="Generate consistent snapshot", default=False)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
repository = create_new_repository('repository')
|
||||
|
||||
root_key_file = 'keystore/root_key'
|
||||
targets_key_file = 'keystore/targets_key'
|
||||
snapshot_key_file = 'keystore/snapshot_key'
|
||||
timestamp_key_file = 'keystore/timestamp_key'
|
||||
|
||||
generate_and_write_ed25519_keypair(root_key_file, password='password')
|
||||
generate_and_write_ed25519_keypair(targets_key_file, password='password')
|
||||
generate_and_write_ed25519_keypair(snapshot_key_file, password='password')
|
||||
generate_and_write_ed25519_keypair(timestamp_key_file, password='password')
|
||||
|
||||
root_public = import_ed25519_publickey_from_file(root_key_file+'.pub')
|
||||
targets_public = import_ed25519_publickey_from_file(targets_key_file+'.pub')
|
||||
snapshot_public = import_ed25519_publickey_from_file(snapshot_key_file+'.pub')
|
||||
timestamp_public = import_ed25519_publickey_from_file(timestamp_key_file+'.pub')
|
||||
|
||||
root_private = import_ed25519_privatekey_from_file(root_key_file, 'password')
|
||||
targets_private = import_ed25519_privatekey_from_file(targets_key_file, 'password')
|
||||
snapshot_private = import_ed25519_privatekey_from_file(snapshot_key_file, 'password')
|
||||
timestamp_private = import_ed25519_privatekey_from_file(timestamp_key_file, 'password')
|
||||
|
||||
repository.root.add_verification_key(root_public)
|
||||
repository.targets.add_verification_key(targets_public)
|
||||
repository.snapshot.add_verification_key(snapshot_public)
|
||||
repository.timestamp.add_verification_key(timestamp_public)
|
||||
|
||||
repository.root.load_signing_key(root_private)
|
||||
repository.targets.load_signing_key(targets_private)
|
||||
repository.snapshot.load_signing_key(snapshot_private)
|
||||
repository.timestamp.load_signing_key(timestamp_private)
|
||||
|
||||
target1_filepath = 'repository/targets/file1.txt'
|
||||
tuf.util.ensure_parent_dir(target1_filepath)
|
||||
target2_filepath = 'repository/targets/dir/file2.txt'
|
||||
tuf.util.ensure_parent_dir(target2_filepath)
|
||||
|
||||
with open(target1_filepath, 'wt') as file_object:
|
||||
file_object.write('file1.txt')
|
||||
|
||||
with open(target2_filepath, 'wt') as file_object:
|
||||
file_object.write('file2.txt')
|
||||
|
||||
octal_file_permissions = oct(os.stat(target1_filepath).st_mode)[4:]
|
||||
file_permissions = {'file_permissions': octal_file_permissions}
|
||||
repository.targets.add_target(target1_filepath, file_permissions)
|
||||
repository.targets.add_target(target2_filepath)
|
||||
|
||||
repository.root.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
repository.targets.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
repository.snapshot.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 0, 0)
|
||||
|
||||
repository.targets.compressions = ['gz']
|
||||
|
||||
if options.consistent_snapshot:
|
||||
repository.write(False, True)
|
||||
|
||||
else:
|
||||
repository.write()
|
||||
|
||||
shutil.move('repository/metadata.staged', 'repository/metadata')
|
40
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh
generated
vendored
40
Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh
generated
vendored
|
@ -1,40 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# A script to generate TUF repository files using the Python implementation.
|
||||
#
|
||||
# A list of generated files is printed to STDERR and a tar of the files to STDOUT.
|
||||
|
||||
set -e
|
||||
|
||||
main() {
|
||||
local dir="$(mktemp -d)"
|
||||
trap "rm -rf ${dir}" EXIT
|
||||
|
||||
pushd "${dir}" >/dev/null
|
||||
generate_consistent
|
||||
generate_non_consistent
|
||||
list_files >&2
|
||||
tar c .
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
generate_consistent() {
|
||||
mkdir "with-consistent-snapshot"
|
||||
pushd "with-consistent-snapshot" >/dev/null
|
||||
/generate.py --consistent-snapshot
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
generate_non_consistent() {
|
||||
mkdir "without-consistent-snapshot"
|
||||
pushd "without-consistent-snapshot" >/dev/null
|
||||
/generate.py
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
list_files() {
|
||||
echo "Files generated:"
|
||||
tree
|
||||
}
|
||||
|
||||
main $@
|
|
@ -1 +0,0 @@
|
|||
4fc13ddb4979dbe54ff8ac93cab9b307@@@@100000@@@@e837119fd0a754046e1175445effdf8cdeda587fc94787e6ab27bc468dfb8bb0@@@@0335c7f8953301f91aa11d3be991096d@@@@d55c63737dbc5de581f6814fa37a341723465b8ea5157eca4302a2271b0cee93d48c4e48707a4ab34ecb649e5879577eb5e7bdf95627c8cbdf611fbc7cfa360d48b819525f20050ba7829ff016fc348916ce3154f031d7aed9cd91cbf89bc2d7e03ec4b5f98c4e4b2e4e576dbd9eefb3736aa94a6753875bf328727bbefb582ced865ff2512bd2c46b8c15d4a81ff244a296307949a8e58013588b47c65ae8334fd4df0c25d95778dc03a728969ce121d63f5dc34cd21d71b8ee6c05d85eeab4f4ff7c153b896f459304aa43ac9ef3a4b34701156e8cff3ddcaf91f6bef1dadba2f275cc1c8f675a8bc026023d25428b5a5a6730e76fb3d9a0630f1eb9662b06ef6523816f4e8b71966aa6e2
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"}}
|
|
@ -1 +0,0 @@
|
|||
412601f80d76e03115cdc5620d803bf6@@@@100000@@@@90e058b5b1db460fb28e81105228bfd2b0a4ea748b524e9e349f8e24c8564fad@@@@9922a40a7b682a052b20cea3043018b2@@@@e83737f842c4a847b0302eb8cfba70b790083ce588e8c1fedf1f2a27590ef3a656b4abd0c1bec83c46a907083447b84d64d4307b98d4bbc673f9a12ef05814f550540ca187dc210de3d4147f36700da721433efcde095c98dc8ef0bc39bd40785842b6541c678b5d77b14f9a1170fabcf21dc4c86980776a721d2ac5068fcaa0703d636a60f8f6575e23b2238dd2d603ccaaeb8d4d2ca5794c0036811f0dd09409f07c137361a84358e0eeeba8e7d870652a17a5891c4f7e830672b61bd73b56f04c5e694caf87ecd255c3d7ec263a7e72c13d2fb62d97ec07b4b981776c9cc767d778e38ba1f36964a744114acd081ef7c442086eadd03d3875ad5ce04d273e685547a14b73aff739128873
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"}}
|
|
@ -1 +0,0 @@
|
|||
6c8f2240657a414a478625eb6d86bb81@@@@100000@@@@bc6067a008c1c02592178927c8e6f561a3727827ea801291798b964bf31b2680@@@@384fb8f6e7e050f54ceffbd5e070581d@@@@4c5c8d7eeae6db767bd53e8dacfbd7d6db36584280b3cf3740fadb885442bf038650908ff9cb17f9d2c746d326a17ec7ce513ffb7e3c11abd875b17c92caa71ea556d71a69427741f0e80989df402e75ed23cfb85c79441a7cdf92e20e489abd09977d9028aae944ddc63116a5170dbdbd8607523a1338c61559fa106c164aee8c58d5961ed02195a46fcff615939c4c4adfc49d37b3cb2760b53dfe5961a63a29d2261310f23e568a58fcf71bf61db5816b00284bf15b7c89f1e9b929e1f3374119c0fd201b40c491e30542b5f547eb4a6828aae416bf6ea1c8b3c52ee0a98cc306f3725868e356143869bda527aee680b56abf660579b0a7539032b97b4266015a0ea6693904ef77002e39
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"}}
|
|
@ -1 +0,0 @@
|
|||
59dc296f796a2049c9205a473e05b332@@@@100000@@@@50cda771fbc7d9f2e7209e16565289d73ad43a0b3ee620a9553f911f5ba38ca1@@@@9546236c4cd50458127fc69278037f8c@@@@63170139aa164fa9cb8f03e4015bae2bdee27f05cf22e140d931cded959c51104912eb58df06d5bcc422c28e368e80c2fbaa20a0618841fe650c88b1fde72b7cef32e07aca0d963a293c6c6db7d8e0885c6a17450e8307fc92be36d80e5c168b0abdc214dfa9048b5c44a05f17899176a128c7b8307130e085530a07258ac5047b5f439245b0eceeb0e61bd96315b6386282d40b4977fcc04c6098b7390fb4d538c1f0650e62298b235e4a38840254d7033eff9dddce55c347659632c29cc49ed828d9eba5a8e5b4b75956006014a57c8fc5c7f54d232a8eb78bb49423dc54997e7768d07186b295a5be1518be6c76777e55fd2d227070fece6cf2530d7e40e42468da7cc7413fcdf4091ec2
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"}}
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55",
|
||||
"method": "ed25519",
|
||||
"sig": "0b021b054508971b8636ea6aceca0aaef0a9245c278e2b05aa5c20cd7ff055ba45473ab47e4c8a2475a257c432fff52c4ed551f1b7362461669d85c8f7a67e04"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Targets",
|
||||
"delegations": {
|
||||
"keys": {},
|
||||
"roles": []
|
||||
},
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"targets": {
|
||||
"/dir/file2.txt": {
|
||||
"hashes": {
|
||||
"sha256": "04e2f59431a9d219321baf7d21b8cc797d7615dc3e9515c782c49d2075658701"
|
||||
},
|
||||
"length": 9
|
||||
},
|
||||
"/file1.txt": {
|
||||
"custom": {
|
||||
"file_permissions": "644"
|
||||
},
|
||||
"hashes": {
|
||||
"sha256": "55ae75d991c770d8f3ef07cbfde124ffce9c420da5db6203afab700b27e10cf9"
|
||||
},
|
||||
"length": 9
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b",
|
||||
"method": "ed25519",
|
||||
"sig": "7c9f8155cd074a9666c76cde1a1f83a9b6d965e4e7f6afa95ece5bedf68ce5caea137099110e9ca16aba5b6fd4a554c0c42032a436c8ab37fd89e596144b230e"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Root",
|
||||
"consistent_snapshot": true,
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"keys": {
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"
|
||||
}
|
||||
},
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"
|
||||
}
|
||||
},
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"
|
||||
}
|
||||
},
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"root": {
|
||||
"keyids": [
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"snapshot": {
|
||||
"keyids": [
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"targets": {
|
||||
"keyids": [
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"timestamp": {
|
||||
"keyids": [
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1"
|
||||
],
|
||||
"threshold": 1
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
Binary file not shown.
|
@ -1,28 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5",
|
||||
"method": "ed25519",
|
||||
"sig": "006082dcb87a9f1c2d312d28886cad12eec8f9dcd7b3028b7ba83424261381c8323df331be9e0e54c5aca830cd2637dc9bfcfe6b7a01f312a9786d0e54c0a600"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Snapshot",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"root.json": {
|
||||
"hashes": {
|
||||
"sha256": "a68b4847c117ec84f3787b9adabd607785bf30d3a9a4646661761bddc1a11e62"
|
||||
},
|
||||
"length": 1808
|
||||
},
|
||||
"targets.json": {
|
||||
"hashes": {
|
||||
"sha256": "32f37ab8ba96d5a3b2d10cc716ce408c860d82b4ba00e6a7a479df6bcfee2864"
|
||||
},
|
||||
"length": 790
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1",
|
||||
"method": "ed25519",
|
||||
"sig": "ca207b22bf344aac04c25a09cb5c78a416123ace5064599e13df925d63cce835477b37933c3d6a72af19bb547ce108df9154dcf9b3cb4df733fed5ac9d1cb60d"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Timestamp",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"snapshot.json": {
|
||||
"hashes": {
|
||||
"sha256": "d03b00f125367bcd2237c6a65c442f865b3aac0ba11864d64c0f69ced766e011"
|
||||
},
|
||||
"length": 682
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b",
|
||||
"method": "ed25519",
|
||||
"sig": "7c9f8155cd074a9666c76cde1a1f83a9b6d965e4e7f6afa95ece5bedf68ce5caea137099110e9ca16aba5b6fd4a554c0c42032a436c8ab37fd89e596144b230e"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Root",
|
||||
"consistent_snapshot": true,
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"keys": {
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"
|
||||
}
|
||||
},
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"
|
||||
}
|
||||
},
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"
|
||||
}
|
||||
},
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"root": {
|
||||
"keyids": [
|
||||
"d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"snapshot": {
|
||||
"keyids": [
|
||||
"bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"targets": {
|
||||
"keyids": [
|
||||
"daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"timestamp": {
|
||||
"keyids": [
|
||||
"b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1"
|
||||
],
|
||||
"threshold": 1
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1",
|
||||
"method": "ed25519",
|
||||
"sig": "ca207b22bf344aac04c25a09cb5c78a416123ace5064599e13df925d63cce835477b37933c3d6a72af19bb547ce108df9154dcf9b3cb4df733fed5ac9d1cb60d"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Timestamp",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"snapshot.json": {
|
||||
"hashes": {
|
||||
"sha256": "d03b00f125367bcd2237c6a65c442f865b3aac0ba11864d64c0f69ced766e011"
|
||||
},
|
||||
"length": 682
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
file1.txt
|
|
@ -1 +0,0 @@
|
|||
file2.txt
|
|
@ -1 +0,0 @@
|
|||
file2.txt
|
|
@ -1 +0,0 @@
|
|||
file1.txt
|
|
@ -1 +0,0 @@
|
|||
9d2587d3e9e964aa50595d39535c1cdc@@@@100000@@@@ea2bb63b57beb3a1802547f9b1e36742c9a59d651fdc44cdd60c1e1d9003a1ec@@@@731ffdec7d29dd53a17a8e35560fd117@@@@3af4ea3d8458d0652396f422e5fefbc08e7183526b1631eed4bec2f30c5c6847a17b25c3bb30482eb6df7448f8286abb6aad38d0bd9224679ffb5b9c2fc1ee4ed1b61167a04d2d3b31118cbd0a29737dcffe3a828b7f934ef7deecce72e5424da0eef199201ee1b351c7c91dc01a3f7aa6483ac8e58df245934681aa850ce16e8ad878fde8d20fcee3282bd01fb92050b361ed5d5bd1949232d075d43af87a0af0f37a5231c03f5a864da0f7e91dfe2a40a64b427405ad00c0cf344b1f712cecd005d31798b58b8e5f1708e2c30fd3000588cdfe5162733680046f3b73ce82827f6b10ee76971f790c407631858bfac860fc46c240b9a998724efc56fa1dfc9ef3fe5d344f168dc11f6e77fe
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "12230f2d671ebaa1df573b83c8d9eafe643bc75dd69475314e658862b708f654"}}
|
|
@ -1 +0,0 @@
|
|||
3201c015530f11df6f4805a825277245@@@@100000@@@@75faa825155045e47f461b3f3dbecea299e095e0b2eb326721859b6583f82648@@@@e045a7a8cfddd3b395f5085cea1c0f91@@@@b52c8e28543277aa692de6b04d88918cbb41157b7e6e7814bf3a1c9f8a82d93a0e708ef99e4183d8c768be1bc919af36e542b64511c2cd66a3ba4c8fd9715abb0257162ca35b0bfc4b03c71059f93d1a8e86c7d0dec84f5a05092f510c6cb65cee701a2f109b329c16adcb0cf4ea7d5e6b22ba794176882814e5a98b586c9dc9ed36c96929d3bc8b395af242229432691508a713aa961037f896548a3fa17213ec644553b94ca193f34ad7e281650f3ac105708f384ddf5167cd2a9eb38e42819e1a137d9a18369efa28dbc7b3b7abdc8e00243a8c8a67ae160efcec847ee4ff0f5c4f64e9528c40017ad100d33568bafdbca76e3d6023bf1ea1a2c7869dba40ed278ab10ed536ce6e6e5143
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "17627894686328e24f3bca469cd5edc3a70b5cd58c76fdd7c7d582d3db3d6a8d"}}
|
|
@ -1 +0,0 @@
|
|||
c37f71aa8441503da8e0cab4e69c4144@@@@100000@@@@d78abf885fd898ae7cb4fe359413ccbd362eb9aacf22ffaab11aeda84aa6e1ad@@@@6780cab4d7843366c06b057243dc1f0f@@@@bde05980a1f3b3675db97549aff5c235dfc4898f3dd060ee09799801114a1a6f9450d95dbc0c57a2dbd52a6264d92e5e58d4aeba9a6a4dc6d85d8bf62ba8e9bc2338525892609eb66575af0c838613a2c007bb483a3d254fee14aac210a43b419cf77794bc28f5fabccc81d20cd726d1f8b5914ea9caef432728447c54ce76cac87b2d6df5fc2b2ea606d0d6a1d83226452c11a8ea3a0a677c0131225707609818b2b1f10556393ee797da690bc526fca990dd6c7940052d1832dceda3c1c59ba9b5700ca39b9425af77b9726f5531bc9d47a5d1381f740d96a8119f4469a012a73156e353f173a86ca508e953bd569bd7c28a8eb8d7b49b515ecaeac1f64511fe9b3a0bfb345c5bb2dd0602
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "7e344543b8cb1cae3f930b8a50bf47104a1f6fcf044105643b32ba98916c59f6"}}
|
|
@ -1 +0,0 @@
|
|||
367ecd4b77a9c279871f62a31ea06982@@@@100000@@@@64c698fdc72778ef46ce4e93177e04c23e07b3dea315eb752003b9050964d94c@@@@5c67079066f6d258fa48a7b3df3fd0e4@@@@28cb044ce969cc57a51f5567f0e560525ada56cb44bbbc9858f998d289639e0103bc522bc304b2187647d3eef00d05da209f8e220e586504eaf6402faecbe1a584cd7c0f9369f9464ea4f5ca8fc68561c368f10b4eb70d841f76c730c9c8051ae56fbb90c1acca27d73f8e7c7374bc07eb1676cdb0755c015b29faf9d1a0188df9b782b8e15dba2ff2be987aa7dffd7d8f821183180076960a982e537e10cb13a613b8ccf9baa6aab3de2634af82ad9ee6a7b908483420d3900caea1bfdddde0eac89d5b4583352828c335a8b849d23ab53cc7ca7f43220a72f0e7d9df8bb07294f915ad294cdbe4936735515586ab788160d1c4d7d70e941efdc7a7ac5524e790d7c2606f001e2619ee0750
|
|
@ -1 +0,0 @@
|
|||
{"keytype": "ed25519", "keyval": {"public": "776f245fe7c5626ca0dc3c24644f7345a35741bab9a25031a7042bfcb3c4356b"}}
|
|
@ -1,67 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "7bd4a38edbb838e77fd65857ce1b8da742a3ec143e285b1029e1091ab10a52fa",
|
||||
"method": "ed25519",
|
||||
"sig": "dd0d0dbb3be0c259562990582919d480ba41dd752c0c529c2989ed09fd09c99ddfabab21362d6f2b717ca736cd8827180b68dcf85715e80a6e1345d621d6dd08"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Root",
|
||||
"consistent_snapshot": false,
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"keys": {
|
||||
"253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "776f245fe7c5626ca0dc3c24644f7345a35741bab9a25031a7042bfcb3c4356b"
|
||||
}
|
||||
},
|
||||
"7bd4a38edbb838e77fd65857ce1b8da742a3ec143e285b1029e1091ab10a52fa": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "12230f2d671ebaa1df573b83c8d9eafe643bc75dd69475314e658862b708f654"
|
||||
}
|
||||
},
|
||||
"9117d84fd79e2f7db3044a447e3399dd58600af8fcc03369c86641b89a797906": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "17627894686328e24f3bca469cd5edc3a70b5cd58c76fdd7c7d582d3db3d6a8d"
|
||||
}
|
||||
},
|
||||
"ecc259a2126ff1c1f175a137fada7e2d5bb93fdafeb6b0577b3b5c68af184ff8": {
|
||||
"keytype": "ed25519",
|
||||
"keyval": {
|
||||
"public": "7e344543b8cb1cae3f930b8a50bf47104a1f6fcf044105643b32ba98916c59f6"
|
||||
}
|
||||
}
|
||||
},
|
||||
"roles": {
|
||||
"root": {
|
||||
"keyids": [
|
||||
"7bd4a38edbb838e77fd65857ce1b8da742a3ec143e285b1029e1091ab10a52fa"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"snapshot": {
|
||||
"keyids": [
|
||||
"9117d84fd79e2f7db3044a447e3399dd58600af8fcc03369c86641b89a797906"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"targets": {
|
||||
"keyids": [
|
||||
"ecc259a2126ff1c1f175a137fada7e2d5bb93fdafeb6b0577b3b5c68af184ff8"
|
||||
],
|
||||
"threshold": 1
|
||||
},
|
||||
"timestamp": {
|
||||
"keyids": [
|
||||
"253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62"
|
||||
],
|
||||
"threshold": 1
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "9117d84fd79e2f7db3044a447e3399dd58600af8fcc03369c86641b89a797906",
|
||||
"method": "ed25519",
|
||||
"sig": "ffa7e6ea81f87ec7b3c65b7501a25ecc27bd3b6c400b54b2da49a7a2992f5b0faac3126236b889dd2462a68d1dba6c82622480bb06dd52a429b8ce061fb86b04"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Snapshot",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"root.json": {
|
||||
"hashes": {
|
||||
"sha256": "0afc32c79ab3dbcd29477ef1749f859349d2f78bf6305f012bc7c6ca93143300"
|
||||
},
|
||||
"length": 1809
|
||||
},
|
||||
"targets.json": {
|
||||
"hashes": {
|
||||
"sha256": "b9a821a57d4d61a23ba70e1c7d1681497aaf31c86c3eb9dd9cda023a8057528b"
|
||||
},
|
||||
"length": 790
|
||||
},
|
||||
"targets.json.gz": {
|
||||
"hashes": {
|
||||
"sha256": "8964b7a9166a437d0b9db4b07f020a1afeab266d41af8d3599aea4b03913e092"
|
||||
},
|
||||
"length": 460
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "ecc259a2126ff1c1f175a137fada7e2d5bb93fdafeb6b0577b3b5c68af184ff8",
|
||||
"method": "ed25519",
|
||||
"sig": "92ed3e32c061c87ddf41c9ee606fa88a320513458b634ac259c6f2383bffe2e983d53c00ab78991c6ed965f21284c24246907e79d96100d955087a517761c10d"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Targets",
|
||||
"delegations": {
|
||||
"keys": {},
|
||||
"roles": []
|
||||
},
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"targets": {
|
||||
"/dir/file2.txt": {
|
||||
"hashes": {
|
||||
"sha256": "04e2f59431a9d219321baf7d21b8cc797d7615dc3e9515c782c49d2075658701"
|
||||
},
|
||||
"length": 9
|
||||
},
|
||||
"/file1.txt": {
|
||||
"custom": {
|
||||
"file_permissions": "644"
|
||||
},
|
||||
"hashes": {
|
||||
"sha256": "55ae75d991c770d8f3ef07cbfde124ffce9c420da5db6203afab700b27e10cf9"
|
||||
},
|
||||
"length": 9
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
Binary file not shown.
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"signatures": [
|
||||
{
|
||||
"keyid": "253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62",
|
||||
"method": "ed25519",
|
||||
"sig": "b5e3aad4caad2aef5b6ffbe4547e181a8c3c73382271ded933a6eed5754ff09890e826460e90d0032371def25a7c16ede4622758b91c87105f20f83864b4b601"
|
||||
}
|
||||
],
|
||||
"signed": {
|
||||
"_type": "Timestamp",
|
||||
"expires": "2030-01-01T00:00:00Z",
|
||||
"meta": {
|
||||
"snapshot.json": {
|
||||
"hashes": {
|
||||
"sha256": "f56dd748c9c0a7dd3c81f575795d72d788b9743687a9fcc1c0e178296ebd2800"
|
||||
},
|
||||
"length": 835
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
file2.txt
|
|
@ -1 +0,0 @@
|
|||
file1.txt
|
|
@ -1,84 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/endophage/go-tuf/signed"
|
||||
"github.com/endophage/go-tuf/store"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
usage := `usage: tuftools [-h|--help] <command> [<args>...]
|
||||
|
||||
Options:
|
||||
-h, --help
|
||||
|
||||
Commands:
|
||||
help Show usage for a specific command
|
||||
meta Generate metadata from the given file path
|
||||
|
||||
See "tuf help <command>" for more information on a specific command
|
||||
`
|
||||
|
||||
args, _ := docopt.Parse(usage, nil, true, "", true)
|
||||
cmd := args.String["<command>"]
|
||||
cmdArgs := args.All["<args>"].([]string)
|
||||
|
||||
if cmd == "help" {
|
||||
if len(cmdArgs) == 0 { // `tuf help`
|
||||
fmt.Println(usage)
|
||||
return
|
||||
} else { // `tuf help <command>`
|
||||
cmd = cmdArgs[0]
|
||||
cmdArgs = []string{"--help"}
|
||||
}
|
||||
}
|
||||
|
||||
if err := runCommand(cmd, cmdArgs); err != nil {
|
||||
log.Fatalln("ERROR:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type cmdFunc func(*docopt.Args, *tuf.Repo) error
|
||||
|
||||
type command struct {
|
||||
usage string
|
||||
f cmdFunc
|
||||
}
|
||||
|
||||
var commands = make(map[string]*command)
|
||||
|
||||
func register(name string, f cmdFunc, usage string) {
|
||||
commands[name] = &command{usage: usage, f: f}
|
||||
}
|
||||
|
||||
func runCommand(name string, args []string) error {
|
||||
argv := make([]string, 1, 1+len(args))
|
||||
argv[0] = name
|
||||
argv = append(argv, args...)
|
||||
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s is not a tuf command. See 'tuf help'", name)
|
||||
}
|
||||
|
||||
parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db := util.GetSqliteDB()
|
||||
local := store.DBStore(db, "")
|
||||
signer := signed.Ed25519{}
|
||||
repo, err := tuf.NewRepo(&signer, local, "sha256")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.f(parsedArgs, repo)
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/flynn/go-docopt"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("meta", cmdMeta, `
|
||||
usage: tuftools meta [<path>...]
|
||||
|
||||
Generate sample metadata for file(s) given by path.
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdMeta(args *docopt.Args, repo *tuf.Repo) error {
|
||||
paths := args.All["<path>"].([]string)
|
||||
for _, file := range paths {
|
||||
reader, _ := os.Open(file)
|
||||
meta, _ := util.GenerateFileMeta(reader, "sha256")
|
||||
jsonBytes, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%s.meta.json", file)
|
||||
err = ioutil.WriteFile(filename, jsonBytes, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
# go-tuf client CLI
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
go get github.com/flynn/go-tuf/cmd/tuf-client
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The CLI provides three commands:
|
||||
|
||||
* `tuf-client init` - initialize a local file store using root keys (e.g. from
|
||||
the output of `tuf root-keys`)
|
||||
* `tuf-client list` - list available targets and their file sizes
|
||||
* `tuf-client get` - get a target file and write to STDOUT
|
||||
|
||||
All commands require the base URL of the TUF repository as the first non-flag
|
||||
argument, and accept an optional `--store` flag which is the path to the local
|
||||
storage.
|
||||
|
||||
Run `tuf-client help` from the command line to get more detailed usage
|
||||
information.
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
# init
|
||||
$ tuf-client init https://example.com/path/to/repo
|
||||
|
||||
# init with a custom store path
|
||||
$ tuf-client init --store /tmp/tuf.db https://example.com/path/to/repo
|
||||
|
||||
# list available targets
|
||||
$ tuf-client list https://example.com/path/to/repo
|
||||
PATH SIZE
|
||||
/foo.txt 1.6KB
|
||||
/bar.txt 336B
|
||||
/baz.txt 1.5KB
|
||||
|
||||
# get a target
|
||||
$ tuf-client get https://example.com/path/to/repo /foo.txt
|
||||
the contents of foo.txt
|
||||
|
||||
# the prefixed / is optional
|
||||
$ tuf-client get https://example.com/path/to/repo foo.txt
|
||||
the contents of foo.txt
|
||||
```
|
|
@ -1,52 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
tuf "github.com/endophage/go-tuf/client"
|
||||
"github.com/endophage/go-tuf/util"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("get", cmdGet, `
|
||||
usage: tuf-client get [-s|--store=<path>] <url> <target>
|
||||
|
||||
Options:
|
||||
-s <path> The path to the local file store [default: tuf.db]
|
||||
|
||||
Get a target from the repository.
|
||||
`)
|
||||
}
|
||||
|
||||
type tmpFile struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (t *tmpFile) Delete() error {
|
||||
t.Close()
|
||||
return os.Remove(t.Name())
|
||||
}
|
||||
|
||||
func cmdGet(args *docopt.Args, client *tuf.Client) error {
|
||||
if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) {
|
||||
return err
|
||||
}
|
||||
target := util.NormalizeTarget(args.String["<target>"])
|
||||
file, err := ioutil.TempFile("", "go-tuf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp := tmpFile{file}
|
||||
if err := client.Download(target, &tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
defer tmp.Delete()
|
||||
if _, err := tmp.Seek(0, os.SEEK_SET); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(os.Stdout, file)
|
||||
return err
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
tuf "github.com/endophage/go-tuf/client"
|
||||
"github.com/endophage/go-tuf/data"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("init", cmdInit, `
|
||||
usage: tuf-client init [-s|--store=<path>] <url> [<root-keys-file>]
|
||||
|
||||
Options:
|
||||
-s <path> The path to the local file store [default: tuf.db]
|
||||
|
||||
Initialize the local file store with root keys.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdInit(args *docopt.Args, client *tuf.Client) error {
|
||||
file := args.String["<root-keys-file>"]
|
||||
var in io.Reader
|
||||
if file == "" || file == "-" {
|
||||
in = os.Stdin
|
||||
} else {
|
||||
var err error
|
||||
in, err = os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var rootKeys []*data.Key
|
||||
if err := json.NewDecoder(in).Decode(&rootKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
return client.Init(rootKeys, len(rootKeys))
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
tuf "github.com/endophage/go-tuf/client"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("list", cmdList, `
|
||||
usage: tuf-client list [-s|--store=<path>] <url>
|
||||
|
||||
Options:
|
||||
-s <path> The path to the local file store [default: tuf.db]
|
||||
|
||||
List available target files.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdList(args *docopt.Args, client *tuf.Client) error {
|
||||
if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) {
|
||||
return err
|
||||
}
|
||||
targets, err := client.Targets()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintln(w, "PATH\tSIZE")
|
||||
for path, meta := range targets {
|
||||
fmt.Fprintf(w, "%s\t%s\n", path, humanize.Bytes(uint64(meta.Length)))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
tuf "github.com/endophage/go-tuf/client"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
usage := `usage: tuf-client [-h|--help] <command> [<args>...]
|
||||
|
||||
Options:
|
||||
-h, --help
|
||||
|
||||
Commands:
|
||||
help Show usage for a specific command
|
||||
init Initialize with root keys
|
||||
list List available target files
|
||||
get Get a target file
|
||||
|
||||
See "tuf-client help <command>" for more information on a specific command.
|
||||
`
|
||||
|
||||
args, _ := docopt.Parse(usage, nil, true, "", true)
|
||||
cmd := args.String["<command>"]
|
||||
cmdArgs := args.All["<args>"].([]string)
|
||||
|
||||
if cmd == "help" {
|
||||
if len(cmdArgs) == 0 { // `tuf-client help`
|
||||
fmt.Println(usage)
|
||||
return
|
||||
} else { // `tuf-client help <command>`
|
||||
cmd = cmdArgs[0]
|
||||
cmdArgs = []string{"--help"}
|
||||
}
|
||||
}
|
||||
|
||||
if err := runCommand(cmd, cmdArgs); err != nil {
|
||||
log.Fatalln("ERROR:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type cmdFunc func(*docopt.Args, *tuf.Client) error
|
||||
|
||||
type command struct {
|
||||
usage string
|
||||
f cmdFunc
|
||||
}
|
||||
|
||||
var commands = make(map[string]*command)
|
||||
|
||||
func register(name string, f cmdFunc, usage string) {
|
||||
commands[name] = &command{usage: usage, f: f}
|
||||
}
|
||||
|
||||
func runCommand(name string, args []string) error {
|
||||
argv := make([]string, 1, 1+len(args))
|
||||
argv[0] = name
|
||||
argv = append(argv, args...)
|
||||
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s is not a tuf-client command. See 'tuf-client help'", name)
|
||||
}
|
||||
|
||||
parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := tufClient(parsedArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.f(parsedArgs, client)
|
||||
}
|
||||
|
||||
func tufClient(args *docopt.Args) (*tuf.Client, error) {
|
||||
store, ok := args.String["--store"]
|
||||
if !ok {
|
||||
store = args.String["-s"]
|
||||
}
|
||||
local, err := tuf.FileLocalStore(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote, err := tuf.HTTPRemoteStore(args.String["<url>"], nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tuf.NewClient(local, remote), nil
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
// "encoding/json"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("add", cmdAdd, `
|
||||
usage: tuf add [--expires=<days>] [--custom=<data>] [<path>...]
|
||||
|
||||
Add target file(s).
|
||||
|
||||
Options:
|
||||
--expires=<days> Set the targets manifest to expire <days> days from now.
|
||||
--custom=<data> Set custom JSON data for the target(s).
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdAdd(args *docopt.Args, repo *tuf.Repo) error {
|
||||
// var custom json.RawMessage
|
||||
// if c := args.String["--custom"]; c != "" {
|
||||
// custom = json.RawMessage(c)
|
||||
// }
|
||||
paths := args.All["<path>"].([]string)
|
||||
if arg := args.String["--expires"]; arg != "" {
|
||||
expires, err := parseExpires(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.AddTargetsWithExpires(paths, nil, expires)
|
||||
}
|
||||
return repo.AddTargets(paths, nil)
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("clean", cmdClean, `
|
||||
usage: tuf clean
|
||||
|
||||
Remove all staged manifests.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdClean(args *docopt.Args, repo *tuf.Repo) error {
|
||||
return repo.Clean()
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("commit", cmdCommit, `
|
||||
usage: tuf commit
|
||||
|
||||
Commit staged files to the repository.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdCommit(args *docopt.Args, repo *tuf.Repo) error {
|
||||
return repo.Commit()
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/endophage/go-tuf"
|
||||
"github.com/flynn/go-docopt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("gen-key", cmdGenKey, `
|
||||
usage: tuf gen-key [--expires=<days>] <role>
|
||||
|
||||
Generate a new signing key for the given role.
|
||||
|
||||
The key will be serialized to JSON and written to the "keys" directory with
|
||||
filename pattern "ROLE-KEYID.json". The root manifest will also be staged
|
||||
with the addition of the key's ID to the role's list of key IDs.
|
||||
|
||||
Options:
|
||||
--expires=<days> Set the root manifest to expire <days> days from now.
|
||||
`)
|
||||
}
|
||||
|
||||
func cmdGenKey(args *docopt.Args, repo *tuf.Repo) error {
|
||||
role := args.String["<role>"]
|
||||
var id string
|
||||
var err error
|
||||
if arg := args.String["--expires"]; arg != "" {
|
||||
expires, err := parseExpires(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err = repo.GenKeyWithExpires(role, expires)
|
||||
} else {
|
||||
id, err = repo.GenKey(role)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Generated", role, "key with ID", id)
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue