From cc0782d3d2ba309cb4c72480c342cac69fad1ea8 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Thu, 11 Jun 2015 13:54:49 -0700 Subject: [PATCH] rewriting imports to 'gotuf', adding config to set log level, making restart/shutdown cleanup connections after timeout, updating godeps --- Godeps/Godeps.json | 65 +- .../p/go-uuid/uuid}/LICENSE | 6 +- .../src/code.google.com/p/go-uuid/uuid/dce.go | 84 + .../src/code.google.com/p/go-uuid/uuid/doc.go | 8 + .../code.google.com/p/go-uuid/uuid/hash.go | 53 + .../code.google.com/p/go-uuid/uuid/json.go | 30 + .../p/go-uuid/uuid/json_test.go | 32 + .../code.google.com/p/go-uuid/uuid/node.go | 101 ++ .../p/go-uuid/uuid/seq_test.go | 66 + .../code.google.com/p/go-uuid/uuid/time.go | 132 ++ .../code.google.com/p/go-uuid/uuid/util.go | 43 + .../code.google.com/p/go-uuid/uuid/uuid.go | 163 ++ .../p/go-uuid/uuid/uuid_test.go | 390 +++++ .../p/go-uuid/uuid/version1.go | 41 + .../p/go-uuid/uuid/version4.go | 25 + .../p/gosqlite/sqlite3/driver.go | 498 ------ .../docker/distribution/context/context.go | 76 + .../docker/distribution/context/doc.go | 76 + .../docker/distribution/context/http.go | 336 ++++ .../docker/distribution/context/http_test.go | 285 +++ .../docker/distribution/context/logger.go | 108 ++ .../docker/distribution/context/trace.go | 104 ++ .../docker/distribution/context/trace_test.go | 85 + .../docker/distribution/context/util.go | 32 + .../docker/distribution/registry/auth/auth.go | 142 ++ .../registry/auth/silly/access.go | 96 ++ .../registry/auth/silly/access_test.go | 70 + .../registry/auth/token/accesscontroller.go | 274 +++ .../registry/auth}/token/stringset.go | 0 .../registry/auth}/token/token.go | 66 +- .../registry/auth}/token/token_test.go | 91 +- .../distribution/registry/auth}/token/util.go | 0 .../docker/distribution/uuid/uuid.go | 112 ++ .../docker/distribution/uuid/uuid_test.go | 48 + .../github.com/endophage/go-tuf/.gitignore | 1 - .../github.com/endophage/go-tuf/.travis.yml | 30 - .../endophage/go-tuf/Godeps/Godeps.json | 60 - .../github.com/endophage/go-tuf/Godeps/Readme | 5 - .../github.com/endophage/go-tuf/MAINTAINERS | 2 - .../src/github.com/endophage/go-tuf/README.md | 511 ------ .../endophage/go-tuf/client/client.go | 627 ------- .../endophage/go-tuf/client/client_test.go | 838 --------- .../endophage/go-tuf/client/errors.go | 106 -- .../endophage/go-tuf/client/interop_test.go | 183 -- .../endophage/go-tuf/client/local_store.go | 65 - .../go-tuf/client/local_store_test.go | 46 - .../endophage/go-tuf/client/remote_store.go | 92 - .../go-tuf/client/testdata/.gitignore | 2 - .../go-tuf/client/testdata/LICENSE.txt | 66 - .../endophage/go-tuf/client/testdata/Makefile | 8 - .../go-tuf/client/testdata/README.md | 47 - .../go-tuf/client/testdata/client.py | 232 --- .../client/testdata/generate/Dockerfile | 12 - .../client/testdata/generate/generate.py | 82 - .../client/testdata/generate/generate.sh | 40 - .../keystore/root_key | 1 - .../keystore/root_key.pub | 1 - .../keystore/snapshot_key | 1 - .../keystore/snapshot_key.pub | 1 - .../keystore/targets_key | 1 - .../keystore/targets_key.pub | 1 - .../keystore/timestamp_key | 1 - .../keystore/timestamp_key.pub | 1 - ...d82b4ba00e6a7a479df6bcfee2864.targets.json | 35 - ...85bf30d3a9a4646661761bddc1a11e62.root.json | 67 - ...a4fd19c35d1ccca92ce9a6a6da.targets.json.gz | Bin 459 -> 0 bytes ...ac0ba11864d64c0f69ced766e011.snapshot.json | 28 - ...4b86c50db88bf001e942ef625ad.timestamp.json | 22 - .../repository/metadata/root.json | 67 - .../repository/metadata/timestamp.json | 22 - ...ce9c420da5db6203afab700b27e10cf9.file1.txt | 1 - ...7d7615dc3e9515c782c49d2075658701.file2.txt | 1 - .../repository/targets/dir/file2.txt | 1 - .../repository/targets/file1.txt | 1 - .../keystore/root_key | 1 - .../keystore/root_key.pub | 1 - .../keystore/snapshot_key | 1 - .../keystore/snapshot_key.pub | 1 - .../keystore/targets_key | 1 - .../keystore/targets_key.pub | 1 - .../keystore/timestamp_key | 1 - .../keystore/timestamp_key.pub | 1 - .../repository/metadata/root.json | 67 - .../repository/metadata/snapshot.json | 34 - .../repository/metadata/targets.json | 35 - .../repository/metadata/targets.json.gz | Bin 460 -> 0 bytes .../repository/metadata/timestamp.json | 22 - .../repository/targets/dir/file2.txt | 1 - .../repository/targets/file1.txt | 1 - .../endophage/go-tuf/cmd/tools/main.go | 84 - .../endophage/go-tuf/cmd/tools/meta.go | 39 - .../endophage/go-tuf/cmd/tuf-client/README.md | 48 - .../endophage/go-tuf/cmd/tuf-client/get.go | 52 - .../endophage/go-tuf/cmd/tuf-client/init.go | 41 - .../endophage/go-tuf/cmd/tuf-client/list.go | 39 - .../endophage/go-tuf/cmd/tuf-client/main.go | 96 -- .../endophage/go-tuf/cmd/tuf/add.go | 36 - .../endophage/go-tuf/cmd/tuf/clean.go | 18 - .../endophage/go-tuf/cmd/tuf/commit.go | 18 - .../endophage/go-tuf/cmd/tuf/gen_key.go | 43 - .../endophage/go-tuf/cmd/tuf/init.go | 23 - .../endophage/go-tuf/cmd/tuf/main.go | 167 -- .../endophage/go-tuf/cmd/tuf/regenerate.go | 22 - .../endophage/go-tuf/cmd/tuf/remove.go | 35 - .../endophage/go-tuf/cmd/tuf/revoke_key.go | 31 - .../endophage/go-tuf/cmd/tuf/root_keys.go | 27 - .../endophage/go-tuf/cmd/tuf/sign.go | 18 - .../endophage/go-tuf/cmd/tuf/snapshot.go | 29 - .../endophage/go-tuf/cmd/tuf/timestamp.go | 28 - .../github.com/endophage/go-tuf/data/types.go | 157 -- .../endophage/go-tuf/encrypted/encrypted.go | 226 --- .../go-tuf/encrypted/encrypted_test.go | 64 - .../github.com/endophage/go-tuf/keys/db.go | 115 -- .../src/github.com/endophage/go-tuf/repo.go | 680 -------- .../github.com/endophage/go-tuf/repo_test.go | 936 ---------- .../endophage/go-tuf/signed/interface.go | 20 - .../endophage/go-tuf/store/dbstore.go | 265 --- .../endophage/go-tuf/store/dbstore_test.go | 155 -- .../endophage/go-tuf/store/filestore.go | 355 ---- .../endophage/go-tuf/store/interfaces.go | 24 - .../endophage/go-tuf/store/memorystore.go | 90 - .../endophage/go-tuf/util/testutils.go | 46 - .../github.com/endophage/go-tuf/util/util.go | 118 -- .../endophage/go-tuf/util/util_test.go | 133 -- .../{go-tuf => gotuf}/data/hex_bytes.go | 0 .../{go-tuf => gotuf}/data/hex_bytes_test.go | 0 .../github.com/endophage/gotuf/data/keys.go | 105 ++ .../github.com/endophage/gotuf/data/roles.go | 117 ++ .../github.com/endophage/gotuf/data/root.go | 69 + .../endophage/gotuf/data/snapshot.go | 93 + .../endophage/gotuf/data/targets.go | 117 ++ .../endophage/gotuf/data/timestamp.go | 75 + .../github.com/endophage/gotuf/data/types.go | 136 ++ .../{go-tuf => gotuf}/errors/errors.go | 0 .../src/github.com/endophage/gotuf/keys/db.go | 60 + .../trust.go => gotuf/signed/ed25519.go} | 58 +- .../{go-tuf => gotuf}/signed/errors.go | 3 +- .../endophage/gotuf/signed/interface.go | 45 + .../{go-tuf => gotuf}/signed/sign.go | 27 +- .../{go-tuf => gotuf}/signed/sign_test.go | 24 +- .../endophage/gotuf/signed/verifiers.go | 119 ++ .../endophage/gotuf/signed/verifiers_test.go | 13 + .../{go-tuf => gotuf}/signed/verify.go | 107 +- .../{go-tuf => gotuf}/signed/verify_test.go | 35 +- .../x/crypto/nacl/secretbox/secretbox.go | 149 -- .../x/crypto/nacl/secretbox/secretbox_test.go | 91 - .../src/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - .../golang.org/x/crypto/pbkdf2/pbkdf2_test.go | 157 -- .../x/crypto/poly1305/const_amd64.s | 45 - .../golang.org/x/crypto/poly1305/poly1305.go | 32 - .../x/crypto/poly1305/poly1305_amd64.s | 497 ------ .../x/crypto/poly1305/poly1305_test.go | 74 - .../golang.org/x/crypto/poly1305/sum_amd64.go | 24 - .../golang.org/x/crypto/poly1305/sum_ref.go | 1531 ----------------- .../x/crypto/salsa20/salsa/hsalsa20.go | 144 -- .../x/crypto/salsa20/salsa/salsa2020_amd64.s | 902 ---------- .../x/crypto/salsa20/salsa/salsa208.go | 199 --- .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 - .../x/crypto/salsa20/salsa/salsa20_ref.go | 234 --- .../x/crypto/salsa20/salsa/salsa_test.go | 35 - .../src/golang.org/x/crypto/scrypt/scrypt.go | 243 --- .../golang.org/x/crypto/scrypt/scrypt_test.go | 160 -- README.md | 31 +- auth/auth.go | 115 -- auth/auth_test.go | 36 - auth/token/authorizer.go | 225 --- auth/token/errors.go | 13 - cmd/vetinari-server/config.json | 3 + cmd/vetinari-server/main.go | 17 +- config/config.go | 5 + server/handlers/default.go | 183 +- server/handlers/default_test.go | 2 +- server/server.go | 95 +- server/server_test.go | 2 +- signer/rufus_trust.go | 4 +- utils/auth.go | 103 -- utils/auth_test.go | 39 - utils/context.go | 71 - utils/context_test.go | 28 - utils/http.go | 57 +- utils/http_test.go | 2 +- 181 files changed, 4350 insertions(+), 13993 deletions(-) rename Godeps/_workspace/src/{github.com/endophage/go-tuf => code.google.com/p/go-uuid/uuid}/LICENSE (88%) create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go delete mode 100644 Godeps/_workspace/src/code.google.com/p/gosqlite/sqlite3/driver.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/context.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/http.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/logger.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/trace.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go rename {auth => Godeps/_workspace/src/github.com/docker/distribution/registry/auth}/token/stringset.go (100%) rename {auth => Godeps/_workspace/src/github.com/docker/distribution/registry/auth}/token/token.go (86%) rename {auth => Godeps/_workspace/src/github.com/docker/distribution/registry/auth}/token/token_test.go (79%) rename {auth => Godeps/_workspace/src/github.com/docker/distribution/registry/auth}/token/util.go (100%) create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Godeps.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Readme delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/MAINTAINERS delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/README.md delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/client.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/client_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/errors.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/interop_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/remote_store.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/LICENSE.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/Makefile delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/README.md delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/client.py delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/32f37ab8ba96d5a3b2d10cc716ce408c860d82b4ba00e6a7a479df6bcfee2864.targets.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/a68b4847c117ec84f3787b9adabd607785bf30d3a9a4646661761bddc1a11e62.root.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/cae20eb655ea7f5b4ac4638e98dc9ad53dfb87a4fd19c35d1ccca92ce9a6a6da.targets.json.gz delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/d03b00f125367bcd2237c6a65c442f865b3aac0ba11864d64c0f69ced766e011.snapshot.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/f32cb268ca4f11f89364e9becf091ad410dc74b86c50db88bf001e942ef625ad.timestamp.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/root.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/timestamp.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/55ae75d991c770d8f3ef07cbfde124ffce9c420da5db6203afab700b27e10cf9.file1.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/dir/04e2f59431a9d219321baf7d21b8cc797d7615dc3e9515c782c49d2075658701.file2.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/dir/file2.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/targets/file1.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/root_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/root_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/snapshot_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/snapshot_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/targets_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/targets_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/timestamp_key delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/keystore/timestamp_key.pub delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/root.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/snapshot.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/targets.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/targets.json.gz delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/timestamp.json delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/dir/file2.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/file1.txt delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/main.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/meta.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/README.md delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/get.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/init.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/list.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/main.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/add.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/clean.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/commit.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/gen_key.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/init.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/main.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/regenerate.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/remove.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/revoke_key.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/root_keys.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/sign.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/snapshot.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/timestamp.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/data/types.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/keys/db.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/repo.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/repo_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/signed/interface.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore_test.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/store/filestore.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/store/interfaces.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/store/memorystore.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/util/testutils.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/util/util.go delete mode 100644 Godeps/_workspace/src/github.com/endophage/go-tuf/util/util_test.go rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/data/hex_bytes.go (100%) rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/data/hex_bytes_test.go (100%) create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/keys.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/roles.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/root.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/snapshot.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/targets.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/timestamp.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/data/types.go rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/errors/errors.go (100%) create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/keys/db.go rename Godeps/_workspace/src/github.com/endophage/{go-tuf/signed/trust.go => gotuf/signed/ed25519.go} (50%) rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/signed/errors.go (92%) create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/signed/interface.go rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/signed/sign.go (57%) rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/signed/sign_test.go (77%) create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers.go create mode 100644 Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers_test.go rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/signed/verify.go (52%) rename Godeps/_workspace/src/github.com/endophage/{go-tuf => gotuf}/signed/verify_test.go (87%) delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt.go delete mode 100644 Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt_test.go delete mode 100644 auth/auth.go delete mode 100644 auth/auth_test.go delete mode 100644 auth/token/authorizer.go delete mode 100644 auth/token/errors.go delete mode 100644 utils/auth.go delete mode 100644 utils/auth_test.go delete mode 100644 utils/context.go delete mode 100644 utils/context_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 4658c279ea..524bc5fa2b 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -6,9 +6,13 @@ ], "Deps": [ { - "ImportPath": "code.google.com/p/gosqlite/sqlite3", - "Comment": "null-16", - "Rev": "74691fb6f83716190870cde1b658538dd4b18eb0" + "ImportPath": "code.google.com/p/go-uuid/uuid", + "Comment": "null-15", + "Rev": "35bc42037350f0078e3c974c6ea690f1926603ab" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, { "ImportPath": "github.com/agl/ed25519", @@ -18,6 +22,21 @@ "ImportPath": "github.com/bradfitz/http2", "Rev": "97124afb234048ae0c91b8883c59fcd890bf8145" }, + { + "ImportPath": "github.com/docker/distribution/context", + "Comment": "v2.0.0-228-gb230183", + "Rev": "b230183b0fe8b8ed3c9ae2898c47c8c8618dc80f" + }, + { + "ImportPath": "github.com/docker/distribution/registry/auth", + "Comment": "v2.0.0-228-gb230183", + "Rev": "b230183b0fe8b8ed3c9ae2898c47c8c8618dc80f" + }, + { + "ImportPath": "github.com/docker/distribution/uuid", + "Comment": "v2.0.0-228-gb230183", + "Rev": "b230183b0fe8b8ed3c9ae2898c47c8c8618dc80f" + }, { "ImportPath": "github.com/docker/libtrust", "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" @@ -27,8 +46,20 @@ "Rev": "61b53384b24bfa83e8e0a5f11f28ae83457fd80c" }, { - "ImportPath": "github.com/endophage/go-tuf", - "Rev": "73f8528774f93cfe707851ab2ff7503a8756ff4b" + "ImportPath": "github.com/endophage/gotuf/data", + "Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a" + }, + { + "ImportPath": "github.com/endophage/gotuf/errors", + "Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a" + }, + { + "ImportPath": "github.com/endophage/gotuf/keys", + "Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a" + }, + { + "ImportPath": "github.com/endophage/gotuf/signed", + "Rev": "930a4e1cc71f866a412aea60c960ee4345f0c76a" }, { "ImportPath": "github.com/golang/protobuf/proto", @@ -42,34 +73,10 @@ "ImportPath": "github.com/gorilla/mux", "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" - }, { "ImportPath": "github.com/tent/canonical-json-go", "Rev": "96e4ba3a7613a1216cbd1badca4efe382adea337" }, - { - "ImportPath": "golang.org/x/crypto/nacl/secretbox", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/pbkdf2", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/poly1305", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/salsa20/salsa", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/scrypt", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, { "ImportPath": "golang.org/x/net/context", "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974" diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/LICENSE b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE similarity index 88% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/LICENSE rename to Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE index 3671e7cb52..5dc68268d9 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/LICENSE +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE @@ -1,6 +1,4 @@ -Flynn is a trademark of Prime Directive, Inc. - -Copyright (c) 2014-2015 Prime Directive, Inc. All rights reserved. +Copyright (c) 2009,2014 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -12,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Prime Directive, Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go new file mode 100644 index 0000000000..50a0f2d099 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go new file mode 100644 index 0000000000..d8bd013e68 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go new file mode 100644 index 0000000000..cdd4192fd9 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go new file mode 100644 index 0000000000..760580a504 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json.go @@ -0,0 +1,30 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) == 0 { + return []byte(`""`), nil + } + return []byte(`"` + u.String() + `"`), nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if len(data) == 0 || string(data) == `""` { + return nil + } + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go new file mode 100644 index 0000000000..b5eae09247 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/json_test.go @@ -0,0 +1,32 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/json" + "reflect" + "testing" +) + +var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + +func TestJSON(t *testing.T) { + type S struct { + ID1 UUID + ID2 UUID + } + s1 := S{ID1: testUUID} + data, err := json.Marshal(&s1) + if err != nil { + t.Fatal(err) + } + var s2 S + if err := json.Unmarshal(data, &s2); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&s1, &s2) { + t.Errorf("got %#v, want %#v", s2, s1) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go new file mode 100644 index 0000000000..dd0a8ac189 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go new file mode 100644 index 0000000000..3b3d1430d5 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/seq_test.go @@ -0,0 +1,66 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "flag" + "runtime" + "testing" + "time" +) + +// This test is only run when --regressions is passed on the go test line. +var regressions = flag.Bool("regressions", false, "run uuid regression tests") + +// TestClockSeqRace tests for a particular race condition of returning two +// identical Version1 UUIDs. The duration of 1 minute was chosen as the race +// condition, before being fixed, nearly always occured in under 30 seconds. +func TestClockSeqRace(t *testing.T) { + if !*regressions { + t.Skip("skipping regression tests") + } + duration := time.Minute + + done := make(chan struct{}) + defer close(done) + + ch := make(chan UUID, 10000) + ncpu := runtime.NumCPU() + switch ncpu { + case 0, 1: + // We can't run the test effectively. + t.Skip("skipping race test, only one CPU detected") + return + default: + runtime.GOMAXPROCS(ncpu) + } + for i := 0; i < ncpu; i++ { + go func() { + for { + select { + case <-done: + return + case ch <- NewUUID(): + } + } + }() + } + + uuids := make(map[string]bool) + cnt := 0 + start := time.Now() + for u := range ch { + s := u.String() + if uuids[s] { + t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s) + return + } + uuids[s] = true + if time.Since(start) > duration { + return + } + cnt++ + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go new file mode 100644 index 0000000000..7ebc9bef10 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go new file mode 100644 index 0000000000..de40b102c4 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go new file mode 100644 index 0000000000..2920fae632 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go new file mode 100644 index 0000000000..417ebeb26a --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go @@ -0,0 +1,390 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" +) + +type test struct { + in string + version Version + variant Variant + isuuid bool +} + +var tests = []test{ + {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, + {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, + {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, + {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, + {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, + {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, + {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, + {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, + {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, + {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, + {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, + {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, + {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, + {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, + + {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, + {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, + + {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, + {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, +} + +var constants = []struct { + c interface{} + name string +}{ + {Person, "Person"}, + {Group, "Group"}, + {Org, "Org"}, + {Invalid, "Invalid"}, + {RFC4122, "RFC4122"}, + {Reserved, "Reserved"}, + {Microsoft, "Microsoft"}, + {Future, "Future"}, + {Domain(17), "Domain17"}, + {Variant(42), "BadVariant42"}, +} + +func testTest(t *testing.T, in string, tt test) { + uuid := Parse(in) + if ok := (uuid != nil); ok != tt.isuuid { + t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if uuid == nil { + return + } + + if v := uuid.Variant(); v != tt.variant { + t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) + } + if v, _ := uuid.Version(); v != tt.version { + t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) + } +} + +func TestUUID(t *testing.T) { + for _, tt := range tests { + testTest(t, tt.in, tt) + testTest(t, strings.ToUpper(tt.in), tt) + } +} + +func TestConstants(t *testing.T) { + for x, tt := range constants { + v, ok := tt.c.(fmt.Stringer) + if !ok { + t.Errorf("%x: %v: not a stringer", x, v) + } else if s := v.String(); s != tt.name { + v, _ := tt.c.(int) + t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name) + } + } +} + +func TestRandomUUID(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + uuid := NewRandom() + s := uuid.String() + if m[s] { + t.Errorf("NewRandom returned duplicated UUID %s\n", s) + } + m[s] = true + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func TestNew(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + s := New() + if m[s] { + t.Errorf("New returned duplicated UUID %s\n", s) + } + m[s] = true + uuid := Parse(s) + if uuid == nil { + t.Errorf("New returned %q which does not decode\n", s) + continue + } + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func clockSeq(t *testing.T, uuid UUID) int { + seq, ok := uuid.ClockSequence() + if !ok { + t.Fatalf("%s: invalid clock sequence\n", uuid) + } + return seq +} + +func TestClockSeq(t *testing.T) { + // Fake time.Now for this test to return a monotonically advancing time; restore it at end. + defer func(orig func() time.Time) { timeNow = orig }(timeNow) + monTime := time.Now() + timeNow = func() time.Time { + monTime = monTime.Add(1 * time.Second) + return monTime + } + + SetClockSequence(-1) + uuid1 := NewUUID() + uuid2 := NewUUID() + + if clockSeq(t, uuid1) != clockSeq(t, uuid2) { + t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2)) + } + + SetClockSequence(-1) + uuid2 = NewUUID() + + // Just on the very off chance we generated the same sequence + // two times we try again. + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + SetClockSequence(-1) + uuid2 = NewUUID() + } + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1)) + } + + SetClockSequence(0x1234) + uuid1 = NewUUID() + if seq := clockSeq(t, uuid1); seq != 0x1234 { + t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq) + } +} + +func TestCoding(t *testing.T) { + text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" + urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" + data := UUID{ + 0x7d, 0x44, 0x48, 0x40, + 0x9d, 0xc0, + 0x11, 0xd1, + 0xb2, 0x45, + 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, + } + if v := data.String(); v != text { + t.Errorf("%x: encoded to %s, expected %s\n", data, v, text) + } + if v := data.URN(); v != urn { + t.Errorf("%x: urn is %s, expected %s\n", data, v, urn) + } + + uuid := Parse(text) + if !Equal(uuid, data) { + t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data) + } +} + +func TestVersion1(t *testing.T) { + uuid1 := NewUUID() + uuid2 := NewUUID() + + if Equal(uuid1, uuid2) { + t.Errorf("%s:duplicate uuid\n", uuid1) + } + if v, _ := uuid1.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid1, v) + } + if v, _ := uuid2.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid2, v) + } + n1 := uuid1.NodeID() + n2 := uuid2.NodeID() + if !bytes.Equal(n1, n2) { + t.Errorf("Different nodes %x != %x\n", n1, n2) + } + t1, ok := uuid1.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid1) + } + t2, ok := uuid2.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid2) + } + q1, ok := uuid1.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence\n", uuid1) + } + q2, ok := uuid2.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence", uuid2) + } + + switch { + case t1 == t2 && q1 == q2: + t.Errorf("time stopped\n") + case t1 > t2 && q1 == q2: + t.Errorf("time reversed\n") + case t1 < t2 && q1 != q2: + t.Errorf("clock sequence chaned unexpectedly\n") + } +} + +func TestNodeAndTime(t *testing.T) { + // Time is February 5, 1998 12:30:23.136364800 AM GMT + + uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") + node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} + + ts, ok := uuid.Time() + if ok { + c := time.Unix(ts.UnixTime()) + want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) + if !c.Equal(want) { + t.Errorf("Got time %v, want %v", c, want) + } + } else { + t.Errorf("%s: bad time\n", uuid) + } + if !bytes.Equal(node, uuid.NodeID()) { + t.Errorf("Expected node %v got %v\n", node, uuid.NodeID()) + } +} + +func TestMD5(t *testing.T) { + uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() + want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" + if uuid != want { + t.Errorf("MD5: got %q expected %q\n", uuid, want) + } +} + +func TestSHA1(t *testing.T) { + uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() + want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" + if uuid != want { + t.Errorf("SHA1: got %q expected %q\n", uuid, want) + } +} + +func TestNodeID(t *testing.T) { + nid := []byte{1, 2, 3, 4, 5, 6} + SetNodeInterface("") + s := NodeInterface() + if s == "" || s == "user" { + t.Errorf("NodeInterface %q after SetInteface\n", s) + } + node1 := NodeID() + if node1 == nil { + t.Errorf("NodeID nil after SetNodeInterface\n", s) + } + SetNodeID(nid) + s = NodeInterface() + if s != "user" { + t.Errorf("Expected NodeInterface %q got %q\n", "user", s) + } + node2 := NodeID() + if node2 == nil { + t.Errorf("NodeID nil after SetNodeID\n", s) + } + if bytes.Equal(node1, node2) { + t.Errorf("NodeID not changed after SetNodeID\n", s) + } else if !bytes.Equal(nid, node2) { + t.Errorf("NodeID is %x, expected %x\n", node2, nid) + } +} + +func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { + if uuid == nil { + t.Errorf("%s failed\n", name) + return + } + if v, _ := uuid.Version(); v != 2 { + t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v) + return + } + if v, ok := uuid.Domain(); !ok || v != domain { + if !ok { + t.Errorf("%s: %d: Domain failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v) + } + } + if v, ok := uuid.Id(); !ok || v != id { + if !ok { + t.Errorf("%s: %d: Id failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v) + } + } +} + +func TestDCE(t *testing.T) { + testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678) + testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid())) + testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid())) +} + +type badRand struct{} + +func (r badRand) Read(buf []byte) (int, error) { + for i, _ := range buf { + buf[i] = byte(i) + } + return len(buf), nil +} + +func TestBadRand(t *testing.T) { + SetRand(badRand{}) + uuid1 := New() + uuid2 := New() + if uuid1 != uuid2 { + t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2) + } + SetRand(nil) + uuid1 = New() + uuid2 = New() + if uuid1 == uuid2 { + t.Errorf("unexecpted duplicates, got %q\n", uuid1) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go new file mode 100644 index 0000000000..0127eacfab --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go new file mode 100644 index 0000000000..b3d4a368dd --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/gosqlite/sqlite3/driver.go b/Godeps/_workspace/src/code.google.com/p/gosqlite/sqlite3/driver.go deleted file mode 100644 index 982e08ec04..0000000000 --- a/Godeps/_workspace/src/code.google.com/p/gosqlite/sqlite3/driver.go +++ /dev/null @@ -1,498 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sqlite3 provides access to the SQLite library, version 3. -// -// The package has no exported API. -// It registers a driver for the standard Go database/sql package. -// -// import _ "code.google.com/p/gosqlite/sqlite3" -// -// (For an alternate, earlier API, see the code.google.com/p/gosqlite/sqlite package.) -package sqlite - -/* -#cgo LDFLAGS: -lsqlite3 - -#include -#include - -// These wrappers are necessary because SQLITE_TRANSIENT -// is a pointer constant, and cgo doesn't translate them correctly. -// The definition in sqlite3.h is: -// -// typedef void (*sqlite3_destructor_type)(void*); -// #define SQLITE_STATIC ((sqlite3_destructor_type)0) -// #define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) - -static int my_bind_text(sqlite3_stmt *stmt, int n, char *p, int np) { - return sqlite3_bind_text(stmt, n, p, np, SQLITE_TRANSIENT); -} -static int my_bind_blob(sqlite3_stmt *stmt, int n, void *p, int np) { - return sqlite3_bind_blob(stmt, n, p, np, SQLITE_TRANSIENT); -} - -*/ -import "C" - -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "io" - "strings" - "time" - "unsafe" -) - -func init() { - sql.Register("sqlite3", impl{}) -} - -type errno int - -func (e errno) Error() string { - s := errText[e] - if s == "" { - return fmt.Sprintf("errno %d", int(e)) - } - return s -} - -var ( - errError error = errno(1) // /* SQL error or missing database */ - errInternal error = errno(2) // /* Internal logic error in SQLite */ - errPerm error = errno(3) // /* Access permission denied */ - errAbort error = errno(4) // /* Callback routine requested an abort */ - errBusy error = errno(5) // /* The database file is locked */ - errLocked error = errno(6) // /* A table in the database is locked */ - errNoMem error = errno(7) // /* A malloc() failed */ - errReadOnly error = errno(8) // /* Attempt to write a readonly database */ - errInterrupt error = errno(9) // /* Operation terminated by sqlite3_interrupt()*/ - errIOErr error = errno(10) // /* Some kind of disk I/O error occurred */ - errCorrupt error = errno(11) // /* The database disk image is malformed */ - errFull error = errno(13) // /* Insertion failed because database is full */ - errCantOpen error = errno(14) // /* Unable to open the database file */ - errEmpty error = errno(16) // /* Database is empty */ - errSchema error = errno(17) // /* The database schema changed */ - errTooBig error = errno(18) // /* String or BLOB exceeds size limit */ - errConstraint error = errno(19) // /* Abort due to constraint violation */ - errMismatch error = errno(20) // /* Data type mismatch */ - errMisuse error = errno(21) // /* Library used incorrectly */ - errNolfs error = errno(22) // /* Uses OS features not supported on host */ - errAuth error = errno(23) // /* Authorization denied */ - errFormat error = errno(24) // /* Auxiliary database format error */ - errRange error = errno(25) // /* 2nd parameter to sqlite3_bind out of range */ - errNotDB error = errno(26) // /* File opened that is not a database file */ - stepRow = errno(100) // /* sqlite3_step() has another row ready */ - stepDone = errno(101) // /* sqlite3_step() has finished executing */ -) - -var errText = map[errno]string{ - 1: "SQL error or missing database", - 2: "Internal logic error in SQLite", - 3: "Access permission denied", - 4: "Callback routine requested an abort", - 5: "The database file is locked", - 6: "A table in the database is locked", - 7: "A malloc() failed", - 8: "Attempt to write a readonly database", - 9: "Operation terminated by sqlite3_interrupt()*/", - 10: "Some kind of disk I/O error occurred", - 11: "The database disk image is malformed", - 12: "NOT USED. Table or record not found", - 13: "Insertion failed because database is full", - 14: "Unable to open the database file", - 15: "NOT USED. Database lock protocol error", - 16: "Database is empty", - 17: "The database schema changed", - 18: "String or BLOB exceeds size limit", - 19: "Abort due to constraint violation", - 20: "Data type mismatch", - 21: "Library used incorrectly", - 22: "Uses OS features not supported on host", - 23: "Authorization denied", - 24: "Auxiliary database format error", - 25: "2nd parameter to sqlite3_bind out of range", - 26: "File opened that is not a database file", - 100: "sqlite3_step() has another row ready", - 101: "sqlite3_step() has finished executing", -} - -type impl struct{} - -func (impl) Open(name string) (driver.Conn, error) { - if C.sqlite3_threadsafe() == 0 { - return nil, errors.New("sqlite library was not compiled for thread-safe operation") - } - - var db *C.sqlite3 - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - rv := C.sqlite3_open_v2(cname, &db, - C.SQLITE_OPEN_FULLMUTEX| - C.SQLITE_OPEN_READWRITE| - C.SQLITE_OPEN_CREATE, - nil) - if rv != 0 { - return nil, errno(rv) - } - if db == nil { - return nil, errors.New("sqlite succeeded without returning a database") - } - return &conn{db: db}, nil -} - -type conn struct { - db *C.sqlite3 - closed bool - tx bool -} - -func (c *conn) error(rv C.int) error { - if rv == 0 { - return nil - } - if rv == 21 || c.closed { - return errno(rv) - } - return errors.New(errno(rv).Error() + ": " + C.GoString(C.sqlite3_errmsg(c.db))) -} - -func (c *conn) Prepare(cmd string) (driver.Stmt, error) { - if c.closed { - panic("database/sql/driver: misuse of sqlite driver: Prepare after Close") - } - cmdstr := C.CString(cmd) - defer C.free(unsafe.Pointer(cmdstr)) - var s *C.sqlite3_stmt - var tail *C.char - rv := C.sqlite3_prepare_v2(c.db, cmdstr, C.int(len(cmd)+1), &s, &tail) - if rv != 0 { - return nil, c.error(rv) - } - return &stmt{c: c, stmt: s, sql: cmd, t0: time.Now()}, nil -} - -func (c *conn) Close() error { - if c.closed { - panic("database/sql/driver: misuse of sqlite driver: multiple Close") - } - c.closed = true - rv := C.sqlite3_close(c.db) - c.db = nil - return c.error(rv) -} - -func (c *conn) exec(cmd string) error { - cstring := C.CString(cmd) - defer C.free(unsafe.Pointer(cstring)) - rv := C.sqlite3_exec(c.db, cstring, nil, nil, nil) - return c.error(rv) -} - -func (c *conn) Begin() (driver.Tx, error) { - if c.tx { - panic("database/sql/driver: misuse of sqlite driver: multiple Tx") - } - if err := c.exec("BEGIN TRANSACTION"); err != nil { - return nil, err - } - c.tx = true - return &tx{c}, nil -} - -type tx struct { - c *conn -} - -func (t *tx) Commit() error { - if t.c == nil || !t.c.tx { - panic("database/sql/driver: misuse of sqlite driver: extra Commit") - } - t.c.tx = false - err := t.c.exec("COMMIT TRANSACTION") - t.c = nil - return err -} - -func (t *tx) Rollback() error { - if t.c == nil || !t.c.tx { - panic("database/sql/driver: misuse of sqlite driver: extra Rollback") - } - t.c.tx = false - err := t.c.exec("ROLLBACK") - t.c = nil - return err -} - -type stmt struct { - c *conn - stmt *C.sqlite3_stmt - err error - t0 time.Time - sql string - args string - closed bool - rows bool - colnames []string - coltypes []string -} - -func (s *stmt) Close() error { - if s.rows { - panic("database/sql/driver: misuse of sqlite driver: Close with active Rows") - } - if s.closed { - panic("database/sql/driver: misuse of sqlite driver: double Close of Stmt") - } - s.closed = true - rv := C.sqlite3_finalize(s.stmt) - if rv != 0 { - return s.c.error(rv) - } - return nil -} - -func (s *stmt) NumInput() int { - if s.closed { - panic("database/sql/driver: misuse of sqlite driver: NumInput after Close") - } - return int(C.sqlite3_bind_parameter_count(s.stmt)) -} - -func (s *stmt) reset() error { - return s.c.error(C.sqlite3_reset(s.stmt)) -} - -func (s *stmt) start(args []driver.Value) error { - if err := s.reset(); err != nil { - return err - } - - n := int(C.sqlite3_bind_parameter_count(s.stmt)) - if n != len(args) { - return fmt.Errorf("incorrect argument count for command: have %d want %d", len(args), n) - } - - for i, v := range args { - var str string - switch v := v.(type) { - case nil: - if rv := C.sqlite3_bind_null(s.stmt, C.int(i+1)); rv != 0 { - return s.c.error(rv) - } - continue - - case float64: - if rv := C.sqlite3_bind_double(s.stmt, C.int(i+1), C.double(v)); rv != 0 { - return s.c.error(rv) - } - continue - - case int64: - if rv := C.sqlite3_bind_int64(s.stmt, C.int(i+1), C.sqlite3_int64(v)); rv != 0 { - return s.c.error(rv) - } - continue - - case []byte: - var p *byte - if len(v) > 0 { - p = &v[0] - } - if rv := C.my_bind_blob(s.stmt, C.int(i+1), unsafe.Pointer(p), C.int(len(v))); rv != 0 { - return s.c.error(rv) - } - continue - - case bool: - var vi int64 - if v { - vi = 1 - } - if rv := C.sqlite3_bind_int64(s.stmt, C.int(i+1), C.sqlite3_int64(vi)); rv != 0 { - return s.c.error(rv) - } - continue - - case time.Time: - str = v.UTC().Format(timefmt[0]) - - case string: - str = v - - default: - str = fmt.Sprint(v) - } - - cstr := C.CString(str) - rv := C.my_bind_text(s.stmt, C.int(i+1), cstr, C.int(len(str))) - C.free(unsafe.Pointer(cstr)) - if rv != 0 { - return s.c.error(rv) - } - } - - return nil -} - -func (s *stmt) Exec(args []driver.Value) (driver.Result, error) { - if s.closed { - panic("database/sql/driver: misuse of sqlite driver: Exec after Close") - } - if s.rows { - panic("database/sql/driver: misuse of sqlite driver: Exec with active Rows") - } - - err := s.start(args) - if err != nil { - return nil, err - } - - rv := C.sqlite3_step(s.stmt) - if errno(rv) != stepDone { - if rv == 0 { - rv = 21 // errMisuse - } - return nil, s.c.error(rv) - } - - id := int64(C.sqlite3_last_insert_rowid(s.c.db)) - rows := int64(C.sqlite3_changes(s.c.db)) - return &result{id, rows}, nil -} - -func (s *stmt) Query(args []driver.Value) (driver.Rows, error) { - if s.closed { - panic("database/sql/driver: misuse of sqlite driver: Query after Close") - } - if s.rows { - panic("database/sql/driver: misuse of sqlite driver: Query with active Rows") - } - - err := s.start(args) - if err != nil { - return nil, err - } - - s.rows = true - if s.colnames == nil { - n := int64(C.sqlite3_column_count(s.stmt)) - s.colnames = make([]string, n) - s.coltypes = make([]string, n) - for i := range s.colnames { - s.colnames[i] = C.GoString(C.sqlite3_column_name(s.stmt, C.int(i))) - s.coltypes[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(s.stmt, C.int(i)))) - } - } - return &rows{s}, nil -} - -type rows struct { - s *stmt -} - -func (r *rows) Columns() []string { - if r.s == nil { - panic("database/sql/driver: misuse of sqlite driver: Columns of closed Rows") - } - return r.s.colnames -} - -const maxslice = 1<<31 - 1 - -var timefmt = []string{ - "2006-01-02 15:04:05.999999999", - "2006-01-02T15:04:05.999999999", - "2006-01-02 15:04:05", - "2006-01-02T15:04:05", - "2006-01-02 15:04", - "2006-01-02T15:04", - "2006-01-02", -} - -func (r *rows) Next(dst []driver.Value) error { - if r.s == nil { - panic("database/sql/driver: misuse of sqlite driver: Next of closed Rows") - } - - rv := C.sqlite3_step(r.s.stmt) - if errno(rv) != stepRow { - if errno(rv) == stepDone { - return io.EOF - } - if rv == 0 { - rv = 21 - } - return r.s.c.error(rv) - } - - for i := range dst { - switch typ := C.sqlite3_column_type(r.s.stmt, C.int(i)); typ { - default: - return fmt.Errorf("unexpected sqlite3 column type %d", typ) - case C.SQLITE_INTEGER: - val := int64(C.sqlite3_column_int64(r.s.stmt, C.int(i))) - switch r.s.coltypes[i] { - case "timestamp", "datetime": - dst[i] = time.Unix(val, 0).UTC() - case "boolean": - dst[i] = val > 0 - default: - dst[i] = val - } - - case C.SQLITE_FLOAT: - dst[i] = float64(C.sqlite3_column_double(r.s.stmt, C.int(i))) - - case C.SQLITE_BLOB, C.SQLITE_TEXT: - n := int(C.sqlite3_column_bytes(r.s.stmt, C.int(i))) - var b []byte - if n > 0 { - p := C.sqlite3_column_blob(r.s.stmt, C.int(i)) - b = (*[maxslice]byte)(unsafe.Pointer(p))[:n] - } - dst[i] = b - switch r.s.coltypes[i] { - case "timestamp", "datetime": - dst[i] = time.Time{} - s := string(b) - for _, f := range timefmt { - if t, err := time.Parse(f, s); err == nil { - dst[i] = t - break - } - } - } - - case C.SQLITE_NULL: - dst[i] = nil - } - } - return nil -} - -func (r *rows) Close() error { - if r.s == nil { - panic("database/sql/driver: misuse of sqlite driver: Close of closed Rows") - } - r.s.rows = false - r.s = nil - return nil -} - -type result struct { - id int64 - rows int64 -} - -func (r *result) LastInsertId() (int64, error) { - return r.id, nil -} - -func (r *result) RowsAffected() (int64, error) { - return r.rows, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go new file mode 100644 index 0000000000..7a3a70e00d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go @@ -0,0 +1,76 @@ +package context + +import ( + "github.com/docker/distribution/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), + id: uuid.Generate().String(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go new file mode 100644 index 0000000000..a63989e54d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go @@ -0,0 +1,76 @@ +// Package context provides several utilities for working with +// golang.org/x/net/context in http requests. Primarily, the focus is on +// logging relevent request information but this package is not limited to +// that purpose. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped varaibles. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go new file mode 100644 index 0000000000..8413a14622 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go @@ -0,0 +1,336 @@ +package context + +import ( + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx Context, r *http.Request) Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + irw := &instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + + return irw, irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx Context, r *http.Request) Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriter provides response writer information in a +// context. +type instrumentedResponseWriter struct { + http.ResponseWriter + Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go new file mode 100644 index 0000000000..3d4b3c8eb1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go @@ -0,0 +1,285 @@ +package context + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = "GET" + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(testcase.key) + + if v == nil { + t.Fatalf("value not found for %q", testcase.key) + } + + if testcase.expected != nil && v != testcase.expected { + t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) + } + + // Key specific checks! + switch testcase.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(testcase.key) + + if !reflect.DeepEqual(v, testcase.expected) { + t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) + } + } +} + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go new file mode 100644 index 0000000000..b0f0c50845 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go @@ -0,0 +1,108 @@ +package context + +import ( + "fmt" + + "github.com/docker/distribution/uuid" + + "github.com/Sirupsen/logrus" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) +} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[string]interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithFields(logrus.Fields(fields)) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value("logger") + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + // If no logger is found, just return the standard logger. + logger = logrus.NewEntry(logrus.StandardLogger()) + } + + fields := logrus.Fields{} + + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} + +func init() { + // inject a logger into the uuid library. + uuid.Loggerf = GetLogger(Background()).Warnf +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go new file mode 100644 index 0000000000..af4f1351e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -0,0 +1,104 @@ +package context + +import ( + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapased time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go new file mode 100644 index 0000000000..4b969fbb0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { + + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go new file mode 100644 index 0000000000..c0aff00d28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go @@ -0,0 +1,32 @@ +package context + +import ( + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx Context, key interface{}) time.Duration { + startedAtI := ctx.Value(key) + if startedAtI != nil { + if startedAt, ok := startedAtI.(time.Time); ok { + return time.Since(startedAt) + } + } + + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx Context, key string) (value string) { + stringi := ctx.Value(key) + if stringi != nil { + if valuev, ok := stringi.(string); ok { + value = valuev + } + } + + return value +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 0000000000..ec82b46977 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,142 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.ServeHTTP(w, r) +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "fmt" + "net/http" + + "golang.org/x/net/context" +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + // ServeHTTP prepares the request to conduct the appropriate challenge + // response. For most implementations, simply calling ServeHTTP should be + // sufficient. Because no body is written, users may write a custom body after + // calling ServeHTTP, but any headers must be written before the call and may + // be overwritten. + ServeHTTP(w http.ResponseWriter, r *http.Request) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case "auth.user": + return uic.user + case "auth.user.name": + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go new file mode 100644 index 0000000000..39318d1a39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go @@ -0,0 +1,96 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := ctxu.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) + w.WriteHeader(http.StatusUnauthorized) +} + +func (ch *challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go new file mode 100644 index 0000000000..d579e87803 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -0,0 +1,70 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(nil, "http.request", r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.ServeHTTP(w, r) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 0000000000..4547336a45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,274 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +// Error returns the internal error string for this authChallenge. +func (ac *authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac *authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac *authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetHeader sets the WWW-Authenticate value for the given header. +func (ac *authChallenge) SetHeader(header http.Header) { + header.Add("WWW-Authenticate", ac.challengeParams()) +} + +// ServeHttp handles writing the challenge response +// by setting the challenge header and status code. +func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ac.SetHeader(w.Header()) + w.WriteHeader(ac.Status()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := ctxu.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/auth/token/stringset.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go similarity index 100% rename from auth/token/stringset.go rename to Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go diff --git a/auth/token/token.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go similarity index 86% rename from auth/token/token.go rename to Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go index b941ec0d93..166816eeaa 100644 --- a/auth/token/token.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go @@ -7,13 +7,13 @@ import ( "encoding/json" "errors" "fmt" - "log" "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/libtrust" - "github.com/docker/vetinari/auth" + "github.com/docker/distribution/registry/auth" ) const ( @@ -22,10 +22,17 @@ const ( TokenSeparator = "." ) +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + // ResourceActions stores allowed actions on a named and typed resource. type ResourceActions struct { - auth.Resource - Actions []auth.SimpleScope `json:"actions"` + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` } // ClaimSet describes the main section of a JSON Web Token. @@ -78,14 +85,14 @@ func NewToken(rawToken string) (*Token, error) { } var ( + rawHeader, rawClaims = parts[0], parts[1] headerJSON, claimsJSON []byte err error ) - rawHeader, rawClaims := parts[0], parts[1] defer func() { if err != nil { - log.Printf("error while unmarshalling raw token: %s", err) + log.Errorf("error while unmarshalling raw token: %s", err) } }() @@ -125,39 +132,39 @@ func NewToken(rawToken string) (*Token, error) { func (t *Token) Verify(verifyOpts VerifyOptions) error { // Verify that the Issuer claim is a trusted authority. if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { - log.Printf("token from untrusted issuer: %q", t.Claims.Issuer) + log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) return ErrInvalidToken } // Verify that the Audience claim is allowed. if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { - log.Printf("token intended for another audience: %q", t.Claims.Audience) + log.Errorf("token intended for another audience: %q", t.Claims.Audience) return ErrInvalidToken } // Verify that the token is currently usable and not expired. currentUnixTime := time.Now().Unix() if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { - log.Printf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) + log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) return ErrInvalidToken } // Verify the token signature. if len(t.Signature) == 0 { - log.Println("token has no signature") + log.Error("token has no signature") return ErrInvalidToken } // Verify that the signing key is trusted. - signingKey, err := t.verifySigningKey(verifyOpts) + signingKey, err := t.VerifySigningKey(verifyOpts) if err != nil { - log.Println(err) + log.Error(err) return ErrInvalidToken } // Finally, verify the signature of the token using the key which signed it. if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { - log.Printf("unable to verify token signature: %s", err) + log.Errorf("unable to verify token signature: %s", err) return ErrInvalidToken } @@ -175,7 +182,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error { // the trustedKeys field of the given verify options. // Each of these methods are tried in that order of preference until the // signing key is found or an error is returned. -func (t *Token) verifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { // First attempt to get an x509 certificate chain from the header. var ( x5c = t.Header.X5c @@ -304,22 +311,31 @@ func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pub // accessSet returns a set of actions available for the resource // actions listed in the `access` section of this token. -func (t *Token) scopes(resource auth.Resource) []auth.Scope { - scopes := make([]auth.Scope, 0, 1) +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } - if t.Claims != nil { - for _, resourceActions := range t.Claims.Access { - if resourceActions.Type != resource.Type || resourceActions.Name != resource.Name { - continue - } - for _, act := range resourceActions.Actions { - scopes = append(scopes, &act) - } + accessSet := make(accessSet, len(t.Claims.Access)) + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) } } - return scopes + return accessSet } func (t *Token) compactRaw() string { diff --git a/auth/token/token_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go similarity index 79% rename from auth/token/token_test.go rename to Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go index db7e39f088..9d84d4efb2 100644 --- a/auth/token/token_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go @@ -15,8 +15,9 @@ import ( "testing" "time" + "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" - "github.com/docker/vetinari/auth" + "golang.org/x/net/context" ) func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { @@ -149,11 +150,6 @@ func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey l return NewToken(tokenString) } -// Some people put this in the non-test files... I prefer to have it as a test. -func TestAuthorizerInterface(t *testing.T) { - var _ auth.Authorizer = &tokenAuthorizer{} -} - // This test makes 4 tokens with a varying number of intermediate // certificates ranging from no intermediate chain to a length of 3 // intermediates. @@ -164,11 +160,9 @@ func TestTokenVerify(t *testing.T) { audience = "test-audience" access = []*ResourceActions{ { - Resource: auth.Resource{ - Type: "repository", - Name: "foo/bar", - }, - Actions: []auth.SimpleScope{auth.SimpleScope("pull"), auth.SimpleScope("push")}, + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, }, } ) @@ -263,51 +257,54 @@ func TestAccessController(t *testing.T) { issuer := "test-issuer.example.com" service := "test-service.example.com" - jsonConf := fmt.Sprintf( - "{\"realm\":\"%s\", \"issuer\":\"%s\", \"service\":\"%s\", \"root_cert_bundle\":\"%s\"}", - realm, - issuer, - service, - rootCertBundleFilename, - ) - options := json.RawMessage{} - options.UnmarshalJSON([]byte(jsonConf)) + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } - accessController, err := NewTokenAuthorizer(options) + accessController, err := newAccessController(options) if err != nil { t.Fatal(err) } // 1. Make a mock http.Request with no token. - req, err := http.NewRequest("GET", "http://example.com/foo/bar/init", nil) + req, err := http.NewRequest("GET", "http://example.com/foo", nil) if err != nil { t.Fatal(err) } - testAccess := auth.SimpleScope("baz") - testResource := auth.Resource{Type: "repo", Name: "foo/bar"} + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } - //ctx := context.WithValue(nil, "http.request", req) - userInfo, err := accessController.Authorize(req, testAccess) - challenge, ok := err.(*authChallenge) + ctx := context.WithValue(nil, "http.request", req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) if !ok { - t.Fatalf("accessController did not return a challenge") + t.Fatal("accessController did not return a challenge") } if challenge.Error() != ErrTokenRequired.Error() { t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) } - if userInfo != nil { - t.Fatalf("expected nil User but got %s", userInfo.Name) + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) } // 2. Supply an invalid token. token, err := makeTestToken( issuer, service, []*ResourceActions{{ - Resource: testResource, - Actions: []auth.SimpleScope{testAccess}, + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, }}, rootKeys[1], 1, // Everything is valid except the key which signed it. ) @@ -317,8 +314,8 @@ func TestAccessController(t *testing.T) { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - userInfo, err = accessController.Authorize(req, testAccess) - challenge, ok = err.(*authChallenge) + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) if !ok { t.Fatal("accessController did not return a challenge") } @@ -327,8 +324,8 @@ func TestAccessController(t *testing.T) { t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) } - if userInfo != nil { - t.Fatalf("expected nil User but got %s", userInfo.Name) + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) } // 3. Supply a token with insufficient access. @@ -343,8 +340,8 @@ func TestAccessController(t *testing.T) { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - userInfo, err = accessController.Authorize(req, testAccess) - challenge, ok = err.(*authChallenge) + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) if !ok { t.Fatal("accessController did not return a challenge") } @@ -353,16 +350,17 @@ func TestAccessController(t *testing.T) { t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) } - if userInfo != nil { - t.Fatalf("expected nil User but got %s", userInfo.Name) + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) } // 4. Supply the token we need, or deserve, or whatever. token, err = makeTestToken( issuer, service, []*ResourceActions{{ - Resource: testResource, - Actions: []auth.SimpleScope{testAccess}, + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, }}, rootKeys[0], 1, ) @@ -372,16 +370,13 @@ func TestAccessController(t *testing.T) { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - // 5. We've tested the various failure cases, we now have to treat accessController specifically as - // a tokenAuthorizer to test the success case via the .authorize method. This lets us inject the - // correct parsed resource as mux will not parse the resource correctly from the test req object. - auther, _ := accessController.(*tokenAuthorizer) - userInfo, err = auther.authorize(token, testResource, testAccess) + authCtx, err = accessController.Authorized(ctx, testAccess) if err != nil { t.Fatalf("accessController returned unexpected error: %s", err) } - if userInfo == nil { + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { t.Fatal("token accessController did not set auth.user context") } diff --git a/auth/token/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go similarity index 100% rename from auth/token/util.go rename to Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 0000000000..af1913cd0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,112 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "log" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = log.Printf +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + _, err := io.ReadFull(rand.Reader, u[:]) + if err != nil { + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + if retries < maxretries { + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff, err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go new file mode 100644 index 0000000000..09c3a7bb4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/.gitignore b/Godeps/_workspace/src/github.com/endophage/go-tuf/.gitignore deleted file mode 100644 index 6c911bf928..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/db/ diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/.travis.yml b/Godeps/_workspace/src/github.com/endophage/go-tuf/.travis.yml deleted file mode 100644 index b0822a6c99..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -language: go -go: - - 1.4 - - tip - -sudo: false - -before_install: - - go get golang.org/x/tools/cmd/cover - -script: - - go test -race -cover ./... - -notifications: - irc: - channels: - - "chat.freenode.net#flynn" - use_notice: true - skip_join: true - on_success: change - on_failure: always - template: - - "%{repository}/%{branch} - %{commit}: %{message} %{build_url}" - email: - on_success: never - on_failure: always - -matrix: - allow_failures: - - go: tip diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Godeps.json deleted file mode 100644 index e785405994..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Godeps.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "ImportPath": "github.com/flynn/go-tuf", - "GoVersion": "go1.4.1", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/agl/ed25519", - "Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c" - }, - { - "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.0-19-g00c6357", - "Rev": "00c635718fa0592764453e60194451889876eea0" - }, - { - "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "v1.4.1-775-g70fbd45", - "Rev": "70fbd45a5c88f6f39a07b04f81a07721bf5f3eed" - }, - { - "ImportPath": "github.com/dustin/go-humanize", - "Rev": "145fabdb1ab757076a70a886d092a3af27f66f4c" - }, - { - "ImportPath": "github.com/flynn/go-docopt", - "Comment": "0.6.1-rc2-26-gf6dd2eb", - "Rev": "f6dd2ebbb31e9721c860cf1faf5c944aa73e3844" - }, - { - "ImportPath": "github.com/tent/canonical-json-go", - "Rev": "96e4ba3a7613a1216cbd1badca4efe382adea337" - }, - { - "ImportPath": "golang.org/x/crypto/nacl/secretbox", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/pbkdf2", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/poly1305", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/salsa20/salsa", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "golang.org/x/crypto/scrypt", - "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" - } - ] -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Readme b/Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Readme deleted file mode 100644 index 4cdaa53d56..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/MAINTAINERS b/Godeps/_workspace/src/github.com/endophage/go-tuf/MAINTAINERS deleted file mode 100644 index 76853cc7f1..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Jonathan Rudenberg (github: titanous) -Lewis Marshall (github: lmars) diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/README.md b/Godeps/_workspace/src/github.com/endophage/go-tuf/README.md deleted file mode 100644 index 7f52b06392..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/README.md +++ /dev/null @@ -1,511 +0,0 @@ -# go-tuf [![Build Status](https://travis-ci.org/flynn/go-tuf.svg?branch=master)](https://travis-ci.org/flynn/go-tuf) - -This is a Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/), -a framework for securing software update systems. - -## Directory layout - -A TUF repository has the following directory layout: - -``` -. -├── keys -├── repository -│   └── targets -└── staged -    └── targets -``` - -The directories contain the following files: - -* `keys/` - signing keys (optionally encrypted) with filename pattern `ROLE.json` -* `repository/` - signed manifests -* `repository/targets/` - hashed target files -* `staged/` - either signed, unsigned or partially signed manifests -* `staged/targets/` - unhashed target files - -## CLI - -`go-tuf` provides a CLI for managing a local TUF repository. - -### Install - -``` -go get github.com/flynn/go-tuf/cmd/tuf -``` - -### Commands - -#### `tuf init [--consistent-snapshot=false]` - -Initializes a new repository. - -This is only required if the repository should not generate consistent -snapshots (i.e. by passing `--consistent-snapshot=false`). If consistent -snapshots should be generated, the repository will be implicitly -initialized to do so when generating keys. - -#### `tuf gen-key ` - -Prompts the user for an encryption passphrase (unless the -`--insecure-plaintext` flag is set), then generates a new signing key and -writes it to the relevant key file in the `keys` directory. It also stages -the addition of the new key to the `root` manifest. - -#### `tuf add [...]` - -Hashes files in the `staged/targets` directory at the given path(s), then -updates and stages the `targets` manifest. Specifying no paths hashes all -files in the `staged/targets` directory. - -#### `tuf remove [...]` - -Stages the removal of files with the given path(s) from the `targets` manifest -(they get removed from the filesystem when the change is committed). Specifying -no paths removes all files from the `targets` manifest. - -#### `tuf snapshot [--compression=]` - -Expects a staged, fully signed `targets` manifest and stages an appropriate -`snapshot` manifest. It optionally compresses the staged `targets` manifest. - -#### `tuf timestamp` - -Stages an appropriate `timestamp` manifest. If a `snapshot` manifest is staged, -it must be fully signed. - -#### `tuf sign ROLE` - -Signs the given role's staged manifest with all keys present in the `keys` -directory for that role. - -#### `tuf commit` - -Verifies that all staged changes contain the correct information and are signed -to the correct threshold, then moves the staged files into the `repository` -directory. It also removes any target files which are not in the `targets` -manifest. - -#### `tuf regenerate [--consistent-snapshot=false]` - -Recreates the `targets` manifest based on the files in `repository/targets`. - -#### `tuf clean` - -Removes all staged manifests and targets. - -#### `tuf root-keys` - -Outputs a JSON serialized array of root keys to STDOUT. The resulting JSON -should be distributed to clients for performing initial updates. - -For a list of supported commands, run `tuf help` from the command line. - -### Examples - -The following are example workflows for managing a TUF repository with the CLI. - -The `tree` commands do not need to be run, but their output serve as an -illustration of what files should exist after performing certain commands. - -Although only two machines are referenced (i.e. the "root" and "repo" boxes), -the workflows can be trivially extended to many signing machines by copying -staged changes and signing on each machine in turn before finally committing. - -Some key IDs are truncated for illustrative purposes. - -#### Create signed root manifest - -Generate a root key on the root box: - -``` -$ tuf gen-key root -Enter root keys passphrase: -Repeat root keys passphrase: -Generated root key with ID 184b133f - -$ tree . -. -├── keys -│   └── root.json -├── repository -└── staged - ├── root.json - └── targets -``` - -Copy `staged/root.json` from the root box to the repo box and generate targets, -snapshot and timestamp keys: - -``` -$ tree . -. -├── keys -├── repository -└── staged - ├── root.json - └── targets - -$ tuf gen-key targets -Enter targets keys passphrase: -Repeat targets keys passphrase: -Generated targets key with ID 8cf4810c - -$ tuf gen-key snapshot -Enter snapshot keys passphrase: -Repeat snapshot keys passphrase: -Generated snapshot key with ID 3e070e53 - -$ tuf gen-key timestamp -Enter timestamp keys passphrase: -Repeat timestamp keys passphrase: -Generated timestamp key with ID a3768063 - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -└── staged - ├── root.json - └── targets -``` - -Copy `staged/root.json` from the repo box back to the root box and sign it: - -``` -$ tree . -. -├── keys -│   ├── root.json -├── repository -└── staged - ├── root.json - └── targets - -$ tuf sign root.json -Enter root keys passphrase: -``` - -The staged `root.json` can now be copied back to the repo box ready to be -committed alongside other manifests. - -#### Add a target file - -Assuming a staged, signed `root` manifest and the file to add exists at -`staged/targets/foo/bar/baz.txt`: - -``` -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -└── staged -    ├── root.json - └── targets - └── foo - └── bar - └── baz.txt - -$ tuf add foo/bar/baz.txt -Enter targets keys passphrase: - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -└── staged -    ├── root.json - ├── targets - │   └── foo - │   └── bar - │   └── baz.txt - └── targets.json - -$ tuf snapshot -Enter snapshot keys passphrase: - -$ tuf timestamp -Enter timestamp keys passphrase: - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -└── staged -    ├── root.json -    ├── snapshot.json - ├── targets - │   └── foo - │   └── bar - │   └── baz.txt -    ├── targets.json -    └── timestamp.json - -$ tuf commit - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged -``` - -#### Remove a target file - -Assuming the file to remove is at `repository/targets/foo/bar/baz.txt`: - -``` -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged - -$ tuf remove foo/bar/baz.txt -Enter targets keys passphrase: - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged - └── targets.json - -$ tuf snapshot -Enter snapshot keys passphrase: - -$ tuf timestamp -Enter timestamp keys passphrase: - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged -    ├── snapshot.json -    ├── targets.json -    └── timestamp.json - -$ tuf commit - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -└── staged -``` - -#### Regenerate manifests based on targets tree - -``` -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged - -$ tuf regenerate -Enter targets keys passphrase: - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged - └── targets.json - -$ tuf snapshot -Enter snapshot keys passphrase: - -$ tuf timestamp -Enter timestamp keys passphrase: - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged -    ├── snapshot.json -    ├── targets.json -    └── timestamp.json - -$ tuf commit - -$ tree . -. -├── keys -│   ├── snapshot.json -│   ├── targets.json -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged -``` - -#### Update timestamp.json - -``` -$ tree . -. -├── keys -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged - -$ tuf timestamp -Enter timestamp keys passphrase: - -$ tree . -. -├── keys -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged - └── timestamp.json - -$ tuf commit - -$ tree . -. -├── keys -│   └── timestamp.json -├── repository -│   ├── root.json -│   ├── snapshot.json -│   ├── targets -│   │   └── foo -│   │   └── bar -│   │   └── baz.txt -│   ├── targets.json -│   └── timestamp.json -└── staged -``` - -#### Modify key thresholds - -TODO - -## Client - -For the client package, see https://godoc.org/github.com/flynn/go-tuf/client. - -For the client CLI, see https://github.com/flynn/go-tuf/tree/master/cmd/tuf-client. diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/client.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/client.go deleted file mode 100644 index d916728ce6..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/client.go +++ /dev/null @@ -1,627 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/util" -) - -// LocalStore is local storage for downloaded top-level metadata. -type LocalStore interface { - // GetMeta returns top-level metadata from local storage. The keys are - // in the form `ROLE.json`, with ROLE being a valid top-level role. - GetMeta() (map[string]json.RawMessage, error) - - // SetMeta persists the given top-level metadata in local storage, the - // name taking the same format as the keys returned by GetMeta. - SetMeta(name string, meta json.RawMessage) error -} - -// RemoteStore downloads top-level metadata and target files from a remote -// repository. -type RemoteStore interface { - // GetMeta downloads the given metadata from remote storage. - // - // `name` is the filename of the metadata (e.g. "root.json") - // - // `err` is ErrNotFound if the given file does not exist. - // - // `size` is the size of the stream, -1 indicating an unknown length. - GetMeta(name string) (stream io.ReadCloser, size int64, err error) - - // GetTarget downloads the given target file from remote storage. - // - // `path` is the path of the file relative to the root of the remote - // targets directory (e.g. "/path/to/file.txt"). - // - // `err` is ErrNotFound if the given file does not exist. - // - // `size` is the size of the stream, -1 indicating an unknown length. - GetTarget(path string) (stream io.ReadCloser, size int64, err error) -} - -// Client provides methods for fetching updates from a remote repository and -// downloading remote target files. -type Client struct { - local LocalStore - remote RemoteStore - - // The following four fields represent the versions of metatdata either - // from local storage or from recently downloaded metadata - rootVer int - targetsVer int - snapshotVer int - timestampVer int - - // targets is the list of available targets, either from local storage - // or from recently downloaded targets metadata - targets data.Files - - // localMeta is the raw metadata from local storage and is used to - // check whether remote metadata is present locally - localMeta map[string]json.RawMessage - - // db is a key DB used for verifying metadata - db *keys.DB - - // consistentSnapshot indicates whether the remote storage is using - // consistent snapshots (as specified in root.json) - consistentSnapshot bool -} - -func NewClient(local LocalStore, remote RemoteStore) *Client { - return &Client{ - local: local, - remote: remote, - } -} - -// Init initializes a local repository. -// -// The latest root.json is fetched from remote storage, verified using rootKeys -// and threshold, and then saved in local storage. It is expected that rootKeys -// were securely distributed with the software being updated. -func (c *Client) Init(rootKeys []*data.Key, threshold int) error { - if len(rootKeys) < threshold { - return ErrInsufficientKeys - } - rootJSON, err := c.downloadMetaUnsafe("root.json") - if err != nil { - return err - } - - c.db = keys.NewDB() - rootKeyIDs := make([]string, len(rootKeys)) - for i, key := range rootKeys { - id := key.ID() - pk := keys.PublicKey{*key, id} - rootKeyIDs[i] = id - if err := c.db.AddKey(&pk); err != nil { - return err - } - } - role := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs} - if err := c.db.AddRole("root", role); err != nil { - fmt.Println("Error adding role:", err.Error()) - return err - } - - if err := c.decodeRoot(rootJSON); err != nil { - return err - } - - return c.local.SetMeta("root.json", rootJSON) -} - -// Update downloads and verifies remote metadata and returns updated targets. -// -// It performs the update part of "The client application" workflow from -// section 5.1 of the TUF spec: -// -// https://github.com/theupdateframework/tuf/blob/v0.9.9/docs/tuf-spec.txt#L714 -func (c *Client) Update() (data.Files, error) { - return c.update(false) -} - -func (c *Client) update(latestRoot bool) (data.Files, error) { - // Always start the update using local metadata - fmt.Println("tuf client: update()") - if err := c.getLocalMeta(); err != nil { - fmt.Println("tuf client: error on getLocalMeta", err.Error()) - if !latestRoot { - fmt.Println("tuf client: latestRoot is false, calling updateWithlatestRoot()") - return c.updateWithLatestRoot(nil) - } else if latestRoot && err == signed.ErrRoleThreshold { - fmt.Println("tuf client: have latest root and err is signing threshold") - // Root was updated with new keys, so our local metadata is no - // longer validating. Read only the versions from the local metadata - // and re-download everything. - if err := c.getRootAndLocalVersionsUnsafe(); err != nil { - fmt.Println("tuf client: err on getRootAndLocalVersionUnsafe") - return nil, err - } - } else { - fmt.Println("tuf client: got other err: ", err.Error()) - return nil, err - } - } - - // Get timestamp.json, extract snapshot.json file meta and save the - // timestamp.json locally - fmt.Println("tuf client: downloading timestamp") - timestampJSON, err := c.downloadMetaUnsafe("timestamp.json") - if err != nil { - return nil, err - } - snapshotMeta, err := c.decodeTimestamp(timestampJSON) - if err != nil { - // ErrRoleThreshold could indicate timestamp keys have been - // revoked, so retry with the latest root.json - if isDecodeFailedWithErr(err, signed.ErrRoleThreshold) && !latestRoot { - return c.updateWithLatestRoot(nil) - } - return nil, err - } - if err := c.local.SetMeta("timestamp.json", timestampJSON); err != nil { - return nil, err - } - - // Return ErrLatestSnapshot if we already have the latest snapshot.json - if c.hasMeta("snapshot.json", snapshotMeta) { - return nil, ErrLatestSnapshot{c.snapshotVer} - } - - // Get snapshot.json, then extract root.json and targets.json file meta. - // - // The snapshot.json is only saved locally after checking root.json and - // targets.json so that it will be re-downloaded on subsequent updates - // if this update fails. - fmt.Println("tuf client: downloading snapshot") - snapshotJSON, err := c.downloadMeta("snapshot.json", snapshotMeta) - if err != nil { - return nil, err - } - rootMeta, targetsMeta, err := c.decodeSnapshot(snapshotJSON) - if err != nil { - // ErrRoleThreshold could indicate snapshot keys have been - // revoked, so retry with the latest root.json - if isDecodeFailedWithErr(err, signed.ErrRoleThreshold) && !latestRoot { - return c.updateWithLatestRoot(nil) - } - return nil, err - } - - // If we don't have the root.json, download it, save it in local - // storage and restart the update - if !c.hasMeta("root.json", rootMeta) { - return c.updateWithLatestRoot(&rootMeta) - } - - // If we don't have the targets.json, download it, determine updated - // targets and save targets.json in local storage - var updatedTargets data.Files - if !c.hasMeta("targets.json", targetsMeta) { - fmt.Println("tuf client: downloading targets") - targetsJSON, err := c.downloadMeta("targets.json", targetsMeta) - if err != nil { - return nil, err - } - updatedTargets, err = c.decodeTargets(targetsJSON) - if err != nil { - return nil, err - } - if err := c.local.SetMeta("targets.json", targetsJSON); err != nil { - return nil, err - } - } - - // Save the snapshot.json now it has been processed successfully - if err := c.local.SetMeta("snapshot.json", snapshotJSON); err != nil { - return nil, err - } - - return updatedTargets, nil -} - -func (c *Client) updateWithLatestRoot(m *data.FileMeta) (data.Files, error) { - var rootJSON json.RawMessage - var err error - if m == nil { - rootJSON, err = c.downloadMetaUnsafe("root.json") - } else { - rootJSON, err = c.downloadMeta("root.json", *m) - } - fmt.Println("Root JSON") - fmt.Println(string(rootJSON)) - fmt.Println("End root JSON") - if err != nil { - return nil, err - } - if err := c.decodeRoot(rootJSON); err != nil { - return nil, err - } - if err := c.local.SetMeta("root.json", rootJSON); err != nil { - return nil, err - } - return c.update(true) -} - -// getLocalMeta decodes and verifies metadata from local storage. -// -// The verification of local files is purely for consistency, if an attacker -// has compromised the local storage, there is no guarantee it can be trusted. -func (c *Client) getLocalMeta() error { - meta, err := c.local.GetMeta() - if err != nil { - return err - } - - if rootJSON, ok := meta["root.json"]; ok { - // unmarshal root.json without verifying as we need the root - // keys first - s := &data.Signed{} - if err := json.Unmarshal(rootJSON, s); err != nil { - return err - } - root := &data.Root{} - if err := json.Unmarshal(s.Signed, root); err != nil { - return err - } - db := keys.NewDB() - for _, k := range root.Keys { - pk := keys.PublicKey{*k, k.ID()} - if err := db.AddKey(&pk); err != nil { - return err - } - } - for name, role := range root.Roles { - fmt.Println("Adding Role:", name) - if err := db.AddRole(name, role); err != nil { - return err - } - } - if err := signed.Verify(s, "root", 0, db); err != nil { - return err - } - c.consistentSnapshot = root.ConsistentSnapshot - c.db = db - } else { - return ErrNoRootKeys - } - - if snapshotJSON, ok := meta["snapshot.json"]; ok { - snapshot := &data.Snapshot{} - if err := signed.UnmarshalTrusted(snapshotJSON, snapshot, "snapshot", c.db); err != nil { - return err - } - c.snapshotVer = snapshot.Version - } - - if targetsJSON, ok := meta["targets.json"]; ok { - targets := &data.Targets{} - if err := signed.UnmarshalTrusted(targetsJSON, targets, "targets", c.db); err != nil { - return err - } - c.targetsVer = targets.Version - c.targets = targets.Targets - } - - if timestampJSON, ok := meta["timestamp.json"]; ok { - timestamp := &data.Timestamp{} - if err := signed.UnmarshalTrusted(timestampJSON, timestamp, "timestamp", c.db); err != nil { - return err - } - c.timestampVer = timestamp.Version - } - - c.localMeta = meta - return nil -} - -// maxMetaSize is the maximum number of bytes that will be downloaded when -// getting remote metadata without knowing it's length. -const maxMetaSize = 50 * 1024 - -// downloadMetaUnsafe downloads top-level metadata from remote storage without -// verifying it's length and hashes (used for example to download timestamp.json -// which has unknown size). It will download at most maxMetaSize bytes. -func (c *Client) downloadMetaUnsafe(name string) ([]byte, error) { - r, size, err := c.remote.GetMeta(name) - if err != nil { - if IsNotFound(err) { - return nil, ErrMissingRemoteMetadata{name} - } - return nil, ErrDownloadFailed{name, err} - } - defer r.Close() - - // return ErrMetaTooLarge if the reported size is greater than maxMetaSize - if size > maxMetaSize { - return nil, ErrMetaTooLarge{name, size} - } - - // although the size has been checked above, use a LimitReader in case - // the reported size is inaccurate, or size is -1 which indicates an - // unknown length - return ioutil.ReadAll(io.LimitReader(r, maxMetaSize)) -} - -// getRootAndLocalVersionsUnsafe decodes the versions stored in the local -// metadata without verifying signatures to protect against downgrade attacks -// when the root is replaced and contains new keys. It also sets the local meta -// cache to only contain the local root metadata. -func (c *Client) getRootAndLocalVersionsUnsafe() error { - type versionData struct { - Signed struct { - Version int - } - } - - meta, err := c.local.GetMeta() - if err != nil { - return err - } - - getVersion := func(name string) (int, error) { - m, ok := meta[name] - if !ok { - return 0, nil - } - var data versionData - if err := json.Unmarshal(m, &data); err != nil { - return 0, err - } - return data.Signed.Version, nil - } - - c.timestampVer, err = getVersion("timestamp.json") - if err != nil { - return err - } - c.snapshotVer, err = getVersion("snapshot.json") - if err != nil { - return err - } - c.targetsVer, err = getVersion("targets.json") - if err != nil { - return err - } - - root, ok := meta["root.json"] - if !ok { - return errors.New("tuf: missing local root after downloading, this should not be possible") - } - c.localMeta = map[string]json.RawMessage{"root.json": root} - - return nil -} - -// remoteGetFunc is the type of function the download method uses to download -// remote files -type remoteGetFunc func(string) (io.ReadCloser, int64, error) - -// download downloads the given file from remote storage using the get function, -// adding hashes to the path if consistent snapshots are in use -func (c *Client) download(file string, get remoteGetFunc, hashes data.Hashes) (io.ReadCloser, int64, error) { - if c.consistentSnapshot { - // try each hashed path in turn, and either return the contents, - // try the next one if a 404 is returned, or return an error - for _, path := range util.HashedPaths(file, hashes) { - r, size, err := get(path) - if err != nil { - if IsNotFound(err) { - continue - } - return nil, 0, err - } - return r, size, nil - } - return nil, 0, ErrNotFound{file} - } else { - return get(file) - } -} - -// downloadMeta downloads top-level metadata from remote storage and verifies -// it using the given file metadata. -func (c *Client) downloadMeta(name string, m data.FileMeta) ([]byte, error) { - r, size, err := c.download(name, c.remote.GetMeta, m.Hashes) - if err != nil { - if IsNotFound(err) { - return nil, ErrMissingRemoteMetadata{name} - } - return nil, err - } - defer r.Close() - - // return ErrWrongSize if the reported size is known and incorrect - if size >= 0 && size != m.Length { - return nil, ErrWrongSize{name, size, m.Length} - } - - // wrap the data in a LimitReader so we download at most m.Length bytes - stream := io.LimitReader(r, m.Length) - - // read the data, simultaneously writing it to buf and generating metadata - var buf bytes.Buffer - meta, err := util.GenerateFileMeta(io.TeeReader(stream, &buf), m.HashAlgorithms()...) - if err != nil { - return nil, err - } - if err := util.FileMetaEqual(meta, m); err != nil { - return nil, ErrDownloadFailed{name, err} - } - return buf.Bytes(), nil -} - -// decodeRoot decodes and verifies root metadata. -func (c *Client) decodeRoot(b json.RawMessage) error { - root := &data.Root{} - fmt.Println("tuf client: db:", c.db) - if err := signed.Unmarshal(b, root, "root", c.rootVer, c.db); err != nil { - return ErrDecodeFailed{"root.json", err} - } - c.rootVer = root.Version - c.consistentSnapshot = root.ConsistentSnapshot - return nil -} - -// decodeSnapshot decodes and verifies snapshot metadata, and returns the new -// root and targets file meta. -func (c *Client) decodeSnapshot(b json.RawMessage) (data.FileMeta, data.FileMeta, error) { - snapshot := &data.Snapshot{} - if err := signed.Unmarshal(b, snapshot, "snapshot", c.snapshotVer, c.db); err != nil { - return data.FileMeta{}, data.FileMeta{}, ErrDecodeFailed{"snapshot.json", err} - } - c.snapshotVer = snapshot.Version - return snapshot.Meta["root.json"], snapshot.Meta["targets.json"], nil -} - -// decodeTargets decodes and verifies targets metadata, sets c.targets and -// returns updated targets. -func (c *Client) decodeTargets(b json.RawMessage) (data.Files, error) { - targets := &data.Targets{} - if err := signed.Unmarshal(b, targets, "targets", c.targetsVer, c.db); err != nil { - return nil, ErrDecodeFailed{"targets.json", err} - } - updatedTargets := make(data.Files) - for path, meta := range targets.Targets { - if local, ok := c.targets[path]; ok { - if err := util.FileMetaEqual(local, meta); err == nil { - continue - } - } - updatedTargets[path] = meta - } - c.targetsVer = targets.Version - c.targets = targets.Targets - return updatedTargets, nil -} - -// decodeTimestamp decodes and verifies timestamp metadata, and returns the -// new snapshot file meta. -func (c *Client) decodeTimestamp(b json.RawMessage) (data.FileMeta, error) { - timestamp := &data.Timestamp{} - if err := signed.Unmarshal(b, timestamp, "timestamp", c.timestampVer, c.db); err != nil { - return data.FileMeta{}, ErrDecodeFailed{"timestamp.json", err} - } - c.timestampVer = timestamp.Version - return timestamp.Meta["snapshot.json"], nil -} - -// hasMeta checks whether local metadata has the given file meta -func (c *Client) hasMeta(name string, m data.FileMeta) bool { - b, ok := c.localMeta[name] - if !ok { - return false - } - meta, err := util.GenerateFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) - if err != nil { - return false - } - err = util.FileMetaEqual(meta, m) - return err == nil -} - -type Destination interface { - io.Writer - Delete() error -} - -// Download downloads the given target file from remote storage into dest. -// -// dest will be deleted and an error returned in the following situations: -// -// * The target does not exist in the local targets.json -// * The target does not exist in remote storage -// * Metadata cannot be generated for the downloaded data -// * Generated metadata does not match local metadata for the given file -func (c *Client) Download(name string, dest Destination) (err error) { - // delete dest if there is an error - defer func() { - if err != nil { - dest.Delete() - } - }() - - // populate c.targets from local storage if not set - if c.targets == nil { - if err := c.getLocalMeta(); err != nil { - return err - } - } - - // return ErrUnknownTarget if the file is not in the local targets.json - normalizedName := util.NormalizeTarget(name) - localMeta, ok := c.targets[normalizedName] - if !ok { - return ErrUnknownTarget{name} - } - - // get the data from remote storage - r, size, err := c.download(normalizedName, c.remote.GetTarget, localMeta.Hashes) - if err != nil { - return err - } - defer r.Close() - - return c.Verify(name, r, size, dest) -} - -func (c *Client) Verify(name string, r io.Reader, size int64, dest Destination) error { - normalizedName := util.NormalizeTarget(name) - if c.targets == nil { - return ErrUnknownTarget{name} - } - localMeta, ok := c.targets[normalizedName] - if !ok { - return ErrUnknownTarget{name} - } - - // return ErrWrongSize if the reported size is known and incorrect - if size >= 0 && size != localMeta.Length { - return ErrWrongSize{name, size, localMeta.Length} - } - - // wrap the data in a LimitReader so we download at most localMeta.Length bytes - stream := io.LimitReader(r, localMeta.Length) - - // read the data, simultaneously writing it to dest and generating metadata - actual, err := util.GenerateFileMeta(io.TeeReader(stream, dest), localMeta.HashAlgorithms()...) - for algo, hash := range actual.Hashes { - fmt.Println("Actual hash", algo, hash.String()) - } - if err != nil { - return ErrDownloadFailed{name, err} - } - - // check the data has the correct length and hashes - if err := util.FileMetaEqual(actual, localMeta); err != nil { - if err == util.ErrWrongLength { - return ErrWrongSize{name, actual.Length, localMeta.Length} - } - return ErrDownloadFailed{name, err} - } - - return nil - -} - -// Targets returns the complete list of available targets. -func (c *Client) Targets() (data.Files, error) { - // populate c.targets from local storage if not set - if c.targets == nil { - if err := c.getLocalMeta(); err != nil { - return nil, err - } - } - return c.targets, nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/client_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/client_test.go deleted file mode 100644 index 5fa5b5ac00..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/client_test.go +++ /dev/null @@ -1,838 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "testing" - "time" - - "github.com/endophage/go-tuf" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/store" - "github.com/endophage/go-tuf/util" - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -type ClientSuite struct { - store store.LocalStore - repo *tuf.Repo - local LocalStore - remote *fakeRemoteStore - expiredTime time.Time - keyIDs map[string]string -} - -var _ = Suite(&ClientSuite{}) - -func newFakeRemoteStore() *fakeRemoteStore { - return &fakeRemoteStore{ - meta: make(map[string]*fakeFile), - targets: make(map[string]*fakeFile), - } -} - -type fakeRemoteStore struct { - meta map[string]*fakeFile - targets map[string]*fakeFile -} - -func (f *fakeRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) { - return f.get(name, f.meta) -} - -func (f *fakeRemoteStore) GetTarget(path string) (io.ReadCloser, int64, error) { - return f.get(path, f.targets) -} - -func (f *fakeRemoteStore) get(name string, store map[string]*fakeFile) (io.ReadCloser, int64, error) { - file, ok := store[name] - if !ok { - return nil, 0, ErrNotFound{name} - } - return file, file.size, nil -} - -func newFakeFile(b []byte) *fakeFile { - return &fakeFile{buf: bytes.NewReader(b), size: int64(len(b))} -} - -type fakeFile struct { - buf *bytes.Reader - bytesRead int - size int64 -} - -func (f *fakeFile) Read(p []byte) (int, error) { - n, err := f.buf.Read(p) - f.bytesRead += n - return n, err -} - -func (f *fakeFile) Close() error { - f.buf.Seek(0, os.SEEK_SET) - return nil -} - -var targetFiles = map[string][]byte{ - "/foo.txt": []byte("foo"), - "/bar.txt": []byte("bar"), - "/baz.txt": []byte("baz"), -} - -func (s *ClientSuite) SetUpTest(c *C) { - s.store = store.MemoryStore(nil, targetFiles) - - // create a valid repo containing foo.txt - var err error - signer := signed.NewEd25519() - s.repo, err = tuf.NewRepo(signer, s.store, "sha256") - c.Assert(err, IsNil) - // don't use consistent snapshots to make testing easier (consistent - // snapshots are tested explicitly elsewhere) - c.Assert(s.repo.Init(false), IsNil) - s.keyIDs = map[string]string{ - "root": s.genKey(c, "root"), - "targets": s.genKey(c, "targets"), - "snapshot": s.genKey(c, "snapshot"), - "timestamp": s.genKey(c, "timestamp"), - } - c.Assert(s.repo.AddTarget("foo.txt", nil), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - - // create a remote store containing valid repo files - s.remote = newFakeRemoteStore() - s.syncRemote(c) - for path, data := range targetFiles { - s.remote.targets[path] = newFakeFile(data) - } - - s.expiredTime = time.Now().Add(time.Hour) -} - -func (s *ClientSuite) genKey(c *C, role string) string { - id, err := s.repo.GenKey(role) - c.Assert(err, IsNil) - return id -} - -func (s *ClientSuite) genKeyExpired(c *C, role string) string { - id, err := s.repo.GenKeyWithExpires(role, s.expiredTime) - c.Assert(err, IsNil) - return id -} - -// withMetaExpired sets signed.IsExpired throughout the invocation of f so that -// any metadata marked to expire at s.expiredTime will be expired (this avoids -// the need to sleep in the tests). -func (s *ClientSuite) withMetaExpired(f func()) { - e := signed.IsExpired - defer func() { signed.IsExpired = e }() - signed.IsExpired = func(t time.Time) bool { - return t.Unix() == s.expiredTime.Round(time.Second).Unix() - } - f() -} - -func (s *ClientSuite) syncLocal(c *C) { - meta, err := s.store.GetMeta() - c.Assert(err, IsNil) - for k, v := range meta { - c.Assert(s.local.SetMeta(k, v), IsNil) - } -} - -func (s *ClientSuite) syncRemote(c *C) { - meta, err := s.store.GetMeta() - c.Assert(err, IsNil) - for name, data := range meta { - s.remote.meta[name] = newFakeFile(data) - } -} - -func (s *ClientSuite) addRemoteTarget(c *C, name string) { - c.Assert(s.repo.AddTarget(name, nil), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) -} - -func (s *ClientSuite) rootKeys(c *C) []*data.Key { - rootKeys, err := s.repo.RootKeys() - c.Assert(err, IsNil) - c.Assert(rootKeys, HasLen, 1) - return rootKeys -} - -func (s *ClientSuite) newClient(c *C) *Client { - s.local = MemoryLocalStore() - client := NewClient(s.local, s.remote) - c.Assert(client.Init(s.rootKeys(c), 1), IsNil) - return client -} - -func (s *ClientSuite) updatedClient(c *C) *Client { - client := s.newClient(c) - _, err := client.Update() - c.Assert(err, IsNil) - return client -} - -func assertFiles(c *C, files data.Files, names []string) { - c.Assert(files, HasLen, len(names)) - for _, name := range names { - target, ok := targetFiles[name] - if !ok { - c.Fatalf("unknown target %s", name) - } - file, ok := files[name] - if !ok { - c.Fatalf("expected files to contain %s", name) - } - meta, err := util.GenerateFileMeta(bytes.NewReader(target), file.HashAlgorithms()...) - c.Assert(err, IsNil) - c.Assert(util.FileMetaEqual(file, meta), IsNil) - } -} - -func assertWrongHash(c *C, err error) { - // just test the type of err rather using DeepEquals as it contains - // hashes we don't necessarily need to check. - e, ok := err.(ErrDownloadFailed) - if !ok { - c.Fatalf("expected err to have type ErrDownloadFailed, got %T", err) - } - if _, ok := e.Err.(util.ErrWrongHash); !ok { - c.Fatalf("expected err.Err to have type util.ErrWrongHash, got %T", err) - } -} - -func (s *ClientSuite) assertErrExpired(c *C, err error, file string) { - decodeErr, ok := err.(ErrDecodeFailed) - if !ok { - c.Fatalf("expected err to have type ErrDecodeFailed, got %T", err) - } - c.Assert(decodeErr.File, Equals, file) - expiredErr, ok := decodeErr.Err.(signed.ErrExpired) - if !ok { - c.Fatalf("expected err.Err to have type signed.ErrExpired, got %T", err) - } - c.Assert(expiredErr.Expired.Unix(), Equals, s.expiredTime.Round(time.Second).Unix()) -} - -func (s *ClientSuite) TestInitRootTooLarge(c *C) { - client := NewClient(MemoryLocalStore(), s.remote) - s.remote.meta["root.json"] = newFakeFile(make([]byte, maxMetaSize+1)) - c.Assert(client.Init(s.rootKeys(c), 0), Equals, ErrMetaTooLarge{"root.json", maxMetaSize + 1}) -} - -func (s *ClientSuite) TestInitRootExpired(c *C) { - s.genKeyExpired(c, "targets") - s.syncRemote(c) - client := NewClient(MemoryLocalStore(), s.remote) - s.withMetaExpired(func() { - s.assertErrExpired(c, client.Init(s.rootKeys(c), 1), "root.json") - }) -} - -func (s *ClientSuite) TestInit(c *C) { - client := NewClient(MemoryLocalStore(), s.remote) - - // check Init() returns keys.ErrInvalidThreshold with an invalid threshold - c.Assert(client.Init(s.rootKeys(c), 0), Equals, keys.ErrInvalidThreshold) - - // check Init() returns signed.ErrRoleThreshold when not enough keys - c.Assert(client.Init(s.rootKeys(c), 2), Equals, ErrInsufficientKeys) - - // check Update() returns ErrNoRootKeys when uninitialized - _, err := client.Update() - c.Assert(err, Equals, ErrNoRootKeys) - - // check Update() does not return ErrNoRootKeys after initialization - c.Assert(client.Init(s.rootKeys(c), 1), IsNil) - _, err = client.Update() - c.Assert(err, Not(Equals), ErrNoRootKeys) -} - -func (s *ClientSuite) TestFirstUpdate(c *C) { - files, err := s.newClient(c).Update() - c.Assert(err, IsNil) - c.Assert(files, HasLen, 1) - assertFiles(c, files, []string{"/foo.txt"}) -} - -func (s *ClientSuite) TestMissingRemoteMetadata(c *C) { - client := s.newClient(c) - - delete(s.remote.meta, "targets.json") - _, err := client.Update() - c.Assert(err, Equals, ErrMissingRemoteMetadata{"targets.json"}) - - delete(s.remote.meta, "timestamp.json") - _, err = client.Update() - c.Assert(err, Equals, ErrMissingRemoteMetadata{"timestamp.json"}) -} - -func (s *ClientSuite) TestNoChangeUpdate(c *C) { - client := s.newClient(c) - _, err := client.Update() - c.Assert(err, IsNil) - _, err = client.Update() - c.Assert(IsLatestSnapshot(err), Equals, true) -} - -func (s *ClientSuite) TestNewTimestamp(c *C) { - client := s.updatedClient(c) - version := client.timestampVer - c.Assert(version > 0, Equals, true) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - _, err := client.Update() - c.Assert(IsLatestSnapshot(err), Equals, true) - c.Assert(client.timestampVer > version, Equals, true) -} - -func (s *ClientSuite) TestNewRoot(c *C) { - client := s.newClient(c) - - // replace all keys - newKeyIDs := make(map[string]string) - for role, id := range s.keyIDs { - c.Assert(s.repo.RevokeKey(role, id), IsNil) - newKeyIDs[role] = s.genKey(c, role) - } - - // update metadata - c.Assert(s.repo.Sign("targets.json"), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - - // check update gets new root version - c.Assert(client.getLocalMeta(), IsNil) - version := client.rootVer - c.Assert(version > 0, Equals, true) - _, err := client.Update() - c.Assert(err, IsNil) - c.Assert(client.rootVer > version, Equals, true) - - // check old keys are not in db - for _, id := range s.keyIDs { - c.Assert(client.db.GetKey(id), IsNil) - } - - // check new keys are in db - for name, id := range newKeyIDs { - key := client.db.GetKey(id) - c.Assert(key, NotNil) - c.Assert(key.ID, Equals, id) - role := client.db.GetRole(name) - c.Assert(role, NotNil) - c.Assert(role.KeyIDs, DeepEquals, []string{id}) - } -} - -func (s *ClientSuite) TestNewTargets(c *C) { - client := s.newClient(c) - files, err := client.Update() - c.Assert(err, IsNil) - assertFiles(c, files, []string{"/foo.txt"}) - - s.addRemoteTarget(c, "bar.txt") - s.addRemoteTarget(c, "baz.txt") - - files, err = client.Update() - c.Assert(err, IsNil) - assertFiles(c, files, []string{"/bar.txt", "/baz.txt"}) - - // Adding the same exact file should not lead to an update - s.addRemoteTarget(c, "bar.txt") - files, err = client.Update() - c.Assert(err, IsNil) - c.Assert(files, HasLen, 0) -} - -func (s *ClientSuite) TestNewTimestampKey(c *C) { - client := s.newClient(c) - - // replace key - oldID := s.keyIDs["timestamp"] - c.Assert(s.repo.RevokeKey("timestamp", oldID), IsNil) - newID := s.genKey(c, "timestamp") - - // generate new snapshot (because root has changed) and timestamp - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - - // check update gets new root and timestamp - c.Assert(client.getLocalMeta(), IsNil) - rootVer := client.rootVer - timestampVer := client.timestampVer - _, err := client.Update() - c.Assert(err, IsNil) - c.Assert(client.rootVer > rootVer, Equals, true) - c.Assert(client.timestampVer > timestampVer, Equals, true) - - // check key has been replaced in db - c.Assert(client.db.GetKey(oldID), IsNil) - key := client.db.GetKey(newID) - c.Assert(key, NotNil) - c.Assert(key.ID, Equals, newID) - role := client.db.GetRole("timestamp") - c.Assert(role, NotNil) - c.Assert(role.KeyIDs, DeepEquals, []string{newID}) -} - -func (s *ClientSuite) TestNewSnapshotKey(c *C) { - client := s.newClient(c) - - // replace key - oldID := s.keyIDs["snapshot"] - c.Assert(s.repo.RevokeKey("snapshot", oldID), IsNil) - newID := s.genKey(c, "snapshot") - - // generate new snapshot and timestamp - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - - // check update gets new root, snapshot and timestamp - c.Assert(client.getLocalMeta(), IsNil) - rootVer := client.rootVer - snapshotVer := client.snapshotVer - timestampVer := client.timestampVer - _, err := client.Update() - c.Assert(err, IsNil) - c.Assert(client.rootVer > rootVer, Equals, true) - c.Assert(client.snapshotVer > snapshotVer, Equals, true) - c.Assert(client.timestampVer > timestampVer, Equals, true) - - // check key has been replaced in db - c.Assert(client.db.GetKey(oldID), IsNil) - key := client.db.GetKey(newID) - c.Assert(key, NotNil) - c.Assert(key.ID, Equals, newID) - role := client.db.GetRole("snapshot") - c.Assert(role, NotNil) - c.Assert(role.KeyIDs, DeepEquals, []string{newID}) -} - -func (s *ClientSuite) TestNewTargetsKey(c *C) { - client := s.newClient(c) - - // replace key - oldID := s.keyIDs["targets"] - c.Assert(s.repo.RevokeKey("targets", oldID), IsNil) - newID := s.genKey(c, "targets") - - // re-sign targets and generate new snapshot and timestamp - c.Assert(s.repo.Sign("targets.json"), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - - // check update gets new metadata - c.Assert(client.getLocalMeta(), IsNil) - rootVer := client.rootVer - targetsVer := client.targetsVer - snapshotVer := client.snapshotVer - timestampVer := client.timestampVer - _, err := client.Update() - c.Assert(err, IsNil) - c.Assert(client.rootVer > rootVer, Equals, true) - c.Assert(client.targetsVer > targetsVer, Equals, true) - c.Assert(client.snapshotVer > snapshotVer, Equals, true) - c.Assert(client.timestampVer > timestampVer, Equals, true) - - // check key has been replaced in db - c.Assert(client.db.GetKey(oldID), IsNil) - key := client.db.GetKey(newID) - c.Assert(key, NotNil) - c.Assert(key.ID, Equals, newID) - role := client.db.GetRole("targets") - c.Assert(role, NotNil) - c.Assert(role.KeyIDs, DeepEquals, []string{newID}) -} - -func (s *ClientSuite) TestLocalExpired(c *C) { - client := s.newClient(c) - - // locally expired timestamp.json is ok - version := client.timestampVer - c.Assert(s.repo.TimestampWithExpires(s.expiredTime), IsNil) - s.syncLocal(c) - s.withMetaExpired(func() { - c.Assert(client.getLocalMeta(), IsNil) - c.Assert(client.timestampVer > version, Equals, true) - }) - - // locally expired snapshot.json is ok - version = client.snapshotVer - c.Assert(s.repo.SnapshotWithExpires(tuf.CompressionTypeNone, s.expiredTime), IsNil) - s.syncLocal(c) - s.withMetaExpired(func() { - c.Assert(client.getLocalMeta(), IsNil) - c.Assert(client.snapshotVer > version, Equals, true) - }) - - // locally expired targets.json is ok - version = client.targetsVer - c.Assert(s.repo.AddTargetWithExpires("foo.txt", nil, s.expiredTime), IsNil) - s.syncLocal(c) - s.withMetaExpired(func() { - c.Assert(client.getLocalMeta(), IsNil) - c.Assert(client.targetsVer > version, Equals, true) - }) - - // locally expired root.json is not ok - version = client.rootVer - s.genKeyExpired(c, "targets") - s.syncLocal(c) - s.withMetaExpired(func() { - err := client.getLocalMeta() - if _, ok := err.(signed.ErrExpired); !ok { - c.Fatalf("expected err to have type signed.ErrExpired, got %T", err) - } - c.Assert(client.rootVer, Equals, version) - }) -} - -func (s *ClientSuite) TestTimestampTooLarge(c *C) { - s.remote.meta["timestamp.json"] = newFakeFile(make([]byte, maxMetaSize+1)) - _, err := s.newClient(c).Update() - c.Assert(err, Equals, ErrMetaTooLarge{"timestamp.json", maxMetaSize + 1}) -} - -func (s *ClientSuite) TestUpdateLocalRootExpired(c *C) { - client := s.newClient(c) - - // add soon to expire root.json to local storage - s.genKeyExpired(c, "timestamp") - c.Assert(s.repo.Timestamp(), IsNil) - s.syncLocal(c) - - // add far expiring root.json to remote storage - s.genKey(c, "timestamp") - s.addRemoteTarget(c, "bar.txt") - s.syncRemote(c) - - // check the update downloads the non expired remote root.json and - // restarts itself, thus successfully updating - s.withMetaExpired(func() { - err := client.getLocalMeta() - if _, ok := err.(signed.ErrExpired); !ok { - c.Fatalf("expected err to have type signed.ErrExpired, got %T", err) - } - _, err = client.Update() - c.Assert(err, IsNil) - }) -} - -func (s *ClientSuite) TestUpdateRemoteExpired(c *C) { - client := s.updatedClient(c) - - // expired remote metadata should always be rejected - c.Assert(s.repo.TimestampWithExpires(s.expiredTime), IsNil) - s.syncRemote(c) - s.withMetaExpired(func() { - _, err := client.Update() - s.assertErrExpired(c, err, "timestamp.json") - }) - - c.Assert(s.repo.SnapshotWithExpires(tuf.CompressionTypeNone, s.expiredTime), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - s.withMetaExpired(func() { - _, err := client.Update() - s.assertErrExpired(c, err, "snapshot.json") - }) - - c.Assert(s.repo.AddTargetWithExpires("bar.txt", nil, s.expiredTime), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - s.withMetaExpired(func() { - _, err := client.Update() - s.assertErrExpired(c, err, "targets.json") - }) - - s.genKeyExpired(c, "timestamp") - c.Assert(s.repo.RemoveTarget("bar.txt"), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - s.withMetaExpired(func() { - _, err := client.Update() - s.assertErrExpired(c, err, "root.json") - }) -} - -func (s *ClientSuite) TestUpdateLocalRootExpiredKeyChange(c *C) { - client := s.newClient(c) - - // add soon to expire root.json to local storage - s.genKeyExpired(c, "timestamp") - c.Assert(s.repo.Timestamp(), IsNil) - s.syncLocal(c) - - // replace all keys - newKeyIDs := make(map[string]string) - for role, id := range s.keyIDs { - c.Assert(s.repo.RevokeKey(role, id), IsNil) - newKeyIDs[role] = s.genKey(c, role) - } - - // update metadata - c.Assert(s.repo.Sign("targets.json"), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - - // check the update downloads the non expired remote root.json and - // restarts itself, thus successfully updating - s.withMetaExpired(func() { - err := client.getLocalMeta() - c.Assert(err, FitsTypeOf, signed.ErrExpired{}) - - _, err = client.Update() - c.Assert(err, IsNil) - }) -} - -func (s *ClientSuite) TestUpdateMixAndMatchAttack(c *C) { - // generate metadata with an explicit expires so we can make predictable changes - expires := time.Now().Add(time.Hour) - c.Assert(s.repo.AddTargetWithExpires("foo.txt", nil, expires), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - client := s.updatedClient(c) - - // grab the remote targets.json - oldTargets, ok := s.remote.meta["targets.json"] - if !ok { - c.Fatal("missing remote targets.json") - } - - // generate new remote metadata, but replace targets.json with the old one - c.Assert(s.repo.AddTargetWithExpires("bar.txt", nil, expires), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - newTargets, ok := s.remote.meta["targets.json"] - if !ok { - c.Fatal("missing remote targets.json") - } - s.remote.meta["targets.json"] = oldTargets - - // check update returns ErrWrongSize for targets.json - _, err := client.Update() - c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", oldTargets.size, newTargets.size}) - - // do the same but keep the size the same - c.Assert(s.repo.RemoveTargetWithExpires("foo.txt", expires), IsNil) - c.Assert(s.repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - s.remote.meta["targets.json"] = oldTargets - - // check update returns ErrWrongHash - _, err = client.Update() - assertWrongHash(c, err) -} - -func (s *ClientSuite) TestUpdateReplayAttack(c *C) { - client := s.updatedClient(c) - - // grab the remote timestamp.json - oldTimestamp, ok := s.remote.meta["timestamp.json"] - if !ok { - c.Fatal("missing remote timestamp.json") - } - - // generate a new timestamp and sync with the client - version := client.timestampVer - c.Assert(version > 0, Equals, true) - c.Assert(s.repo.Timestamp(), IsNil) - s.syncRemote(c) - _, err := client.Update() - c.Assert(IsLatestSnapshot(err), Equals, true) - c.Assert(client.timestampVer > version, Equals, true) - - // replace remote timestamp.json with the old one - s.remote.meta["timestamp.json"] = oldTimestamp - - // check update returns ErrLowVersion - _, err = client.Update() - c.Assert(err, DeepEquals, ErrDecodeFailed{"timestamp.json", signed.ErrLowVersion{version, client.timestampVer}}) -} - -func (s *ClientSuite) TestUpdateTamperedTargets(c *C) { - client := s.newClient(c) - - // get local targets.json - meta, err := s.store.GetMeta() - c.Assert(err, IsNil) - targetsJSON, ok := meta["targets.json"] - if !ok { - c.Fatal("missing targets.json") - } - targets := &data.Signed{} - c.Assert(json.Unmarshal(targetsJSON, targets), IsNil) - - // update remote targets.json to have different content but same size - c.Assert(targets.Signatures, HasLen, 1) - targets.Signatures[0].Method = "xxxxxxx" - tamperedJSON, err := json.Marshal(targets) - c.Assert(err, IsNil) - s.store.SetMeta("targets.json", tamperedJSON) - s.syncRemote(c) - _, err = client.Update() - assertWrongHash(c, err) - - // update remote targets.json to have the wrong size - targets.Signatures[0].Method = "xxx" - tamperedJSON, err = json.Marshal(targets) - c.Assert(err, IsNil) - s.store.SetMeta("targets.json", tamperedJSON) - s.syncRemote(c) - _, err = client.Update() - c.Assert(err, DeepEquals, ErrWrongSize{"targets.json", int64(len(tamperedJSON)), int64(len(targetsJSON))}) -} - -func (s *ClientSuite) TestUpdateHTTP(c *C) { - tmp := c.MkDir() - - // start file server - addr, cleanup := startFileServer(c, tmp) - defer cleanup() - - for _, consistentSnapshot := range []bool{false, true} { - dir := fmt.Sprintf("consistent-snapshot-%t", consistentSnapshot) - - // generate repository - repo := generateRepoFS(c, filepath.Join(tmp, dir), targetFiles, consistentSnapshot) - - // initialize a client - remote, err := HTTPRemoteStore(fmt.Sprintf("http://%s/%s/repository", addr, dir), nil) - c.Assert(err, IsNil) - client := NewClient(MemoryLocalStore(), remote) - rootKeys, err := repo.RootKeys() - c.Assert(err, IsNil) - c.Assert(rootKeys, HasLen, 1) - c.Assert(client.Init(rootKeys, 1), IsNil) - - // check update is ok - targets, err := client.Update() - c.Assert(err, IsNil) - assertFiles(c, targets, []string{"/foo.txt", "/bar.txt", "/baz.txt"}) - - // check can download files - for name, data := range targetFiles { - var dest testDestination - c.Assert(client.Download(name, &dest), IsNil) - c.Assert(dest.deleted, Equals, false) - c.Assert(dest.String(), Equals, string(data)) - } - } -} - -type testDestination struct { - bytes.Buffer - deleted bool -} - -func (t *testDestination) Delete() error { - t.deleted = true - return nil -} - -func (s *ClientSuite) TestDownloadUnknownTarget(c *C) { - client := s.updatedClient(c) - var dest testDestination - c.Assert(client.Download("/nonexistent", &dest), Equals, ErrUnknownTarget{"/nonexistent"}) - c.Assert(dest.deleted, Equals, true) -} - -func (s *ClientSuite) TestDownloadNoExist(c *C) { - client := s.updatedClient(c) - delete(s.remote.targets, "/foo.txt") - var dest testDestination - c.Assert(client.Download("/foo.txt", &dest), Equals, ErrNotFound{"/foo.txt"}) - c.Assert(dest.deleted, Equals, true) -} - -func (s *ClientSuite) TestDownloadOK(c *C) { - client := s.updatedClient(c) - // the filename is normalized if necessary - for _, name := range []string{"/foo.txt", "foo.txt"} { - var dest testDestination - c.Assert(client.Download(name, &dest), IsNil) - c.Assert(dest.deleted, Equals, false) - c.Assert(dest.String(), Equals, "foo") - } -} - -func (s *ClientSuite) TestDownloadWrongSize(c *C) { - client := s.updatedClient(c) - remoteFile := &fakeFile{buf: bytes.NewReader([]byte("wrong-size")), size: 10} - s.remote.targets["/foo.txt"] = remoteFile - var dest testDestination - c.Assert(client.Download("/foo.txt", &dest), DeepEquals, ErrWrongSize{"/foo.txt", 10, 3}) - c.Assert(remoteFile.bytesRead, Equals, 0) - c.Assert(dest.deleted, Equals, true) -} - -func (s *ClientSuite) TestDownloadTargetTooLong(c *C) { - client := s.updatedClient(c) - remoteFile := s.remote.targets["/foo.txt"] - remoteFile.buf = bytes.NewReader([]byte("foo-ooo")) - var dest testDestination - c.Assert(client.Download("/foo.txt", &dest), IsNil) - c.Assert(remoteFile.bytesRead, Equals, 3) - c.Assert(dest.deleted, Equals, false) - c.Assert(dest.String(), Equals, "foo") -} - -func (s *ClientSuite) TestDownloadTargetTooShort(c *C) { - client := s.updatedClient(c) - remoteFile := s.remote.targets["/foo.txt"] - remoteFile.buf = bytes.NewReader([]byte("fo")) - var dest testDestination - c.Assert(client.Download("/foo.txt", &dest), DeepEquals, ErrWrongSize{"/foo.txt", 2, 3}) - c.Assert(dest.deleted, Equals, true) -} - -func (s *ClientSuite) TestDownloadTargetCorruptData(c *C) { - client := s.updatedClient(c) - remoteFile := s.remote.targets["/foo.txt"] - remoteFile.buf = bytes.NewReader([]byte("corrupt")) - var dest testDestination - assertWrongHash(c, client.Download("/foo.txt", &dest)) - c.Assert(dest.deleted, Equals, true) -} - -func (s *ClientSuite) TestAvailableTargets(c *C) { - client := s.updatedClient(c) - files, err := client.Targets() - c.Assert(err, IsNil) - assertFiles(c, files, []string{"/foo.txt"}) - - s.addRemoteTarget(c, "bar.txt") - s.addRemoteTarget(c, "baz.txt") - _, err = client.Update() - c.Assert(err, IsNil) - files, err = client.Targets() - c.Assert(err, IsNil) - assertFiles(c, files, []string{"/foo.txt", "/bar.txt", "/baz.txt"}) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/errors.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/errors.go deleted file mode 100644 index c769d15e96..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/errors.go +++ /dev/null @@ -1,106 +0,0 @@ -package client - -import ( - "errors" - "fmt" -) - -var ( - ErrNoRootKeys = errors.New("tuf: no root keys found in local meta store") - ErrInsufficientKeys = errors.New("tuf: insufficient keys to meet threshold") -) - -type ErrMissingRemoteMetadata struct { - Name string -} - -func (e ErrMissingRemoteMetadata) Error() string { - return fmt.Sprintf("tuf: missing remote metadata %s", e.Name) -} - -type ErrDownloadFailed struct { - File string - Err error -} - -func (e ErrDownloadFailed) Error() string { - return fmt.Sprintf("tuf: failed to download %s: %s", e.File, e.Err) -} - -type ErrDecodeFailed struct { - File string - Err error -} - -func (e ErrDecodeFailed) Error() string { - return fmt.Sprintf("tuf: failed to decode %s: %s", e.File, e.Err) -} - -func isDecodeFailedWithErr(err, expected error) bool { - e, ok := err.(ErrDecodeFailed) - if !ok { - return false - } - return e.Err == expected -} - -type ErrNotFound struct { - File string -} - -func (e ErrNotFound) Error() string { - return fmt.Sprintf("tuf: file not found: %s", e.File) -} - -func IsNotFound(err error) bool { - _, ok := err.(ErrNotFound) - return ok -} - -type ErrWrongSize struct { - File string - Actual int64 - Expected int64 -} - -func (e ErrWrongSize) Error() string { - return fmt.Sprintf("tuf: unexpected file size: %s (expected %d bytes, got %d bytes)", e.File, e.Expected, e.Actual) -} - -type ErrLatestSnapshot struct { - Version int -} - -func (e ErrLatestSnapshot) Error() string { - return fmt.Sprintf("tuf: the local snapshot version (%d) is the latest", e.Version) -} - -func IsLatestSnapshot(err error) bool { - _, ok := err.(ErrLatestSnapshot) - return ok -} - -type ErrUnknownTarget struct { - Name string -} - -func (e ErrUnknownTarget) Error() string { - return fmt.Sprintf("tuf: unknown target file: %s", e.Name) -} - -type ErrMetaTooLarge struct { - Name string - Size int64 -} - -func (e ErrMetaTooLarge) Error() string { - return fmt.Sprintf("tuf: %s size %d bytes greater than maximum %d bytes", e.Name, e.Size, maxMetaSize) -} - -type ErrInvalidURL struct { - URL string -} - -func (e ErrInvalidURL) Error() string { - return fmt.Sprintf("tuf: invalid repository URL %s", e.URL) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/interop_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/interop_test.go deleted file mode 100644 index a3f529d511..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/interop_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/agl/ed25519" - "github.com/endophage/go-tuf" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/store" - "github.com/endophage/go-tuf/util" - . "gopkg.in/check.v1" -) - -type InteropSuite struct{} - -var _ = Suite(&InteropSuite{}) - -var pythonTargets = map[string][]byte{ - "/file1.txt": []byte("file1.txt"), - "/dir/file2.txt": []byte("file2.txt"), -} - -func (InteropSuite) TestGoClientPythonGenerated(c *C) { - // start file server - cwd, err := os.Getwd() - c.Assert(err, IsNil) - testDataDir := filepath.Join(cwd, "testdata") - addr, cleanup := startFileServer(c, testDataDir) - defer cleanup() - - for _, dir := range []string{"with-consistent-snapshot", "without-consistent-snapshot"} { - remote, err := HTTPRemoteStore( - fmt.Sprintf("http://%s/%s/repository", addr, dir), - &HTTPRemoteOptions{MetadataPath: "metadata", TargetsPath: "targets"}, - ) - c.Assert(err, IsNil) - - // initiate a client with the root keys - f, err := os.Open(filepath.Join("testdata", dir, "keystore", "root_key.pub")) - c.Assert(err, IsNil) - key := &data.Key{} - c.Assert(json.NewDecoder(f).Decode(key), IsNil) - c.Assert(key.Type, Equals, "ed25519") - c.Assert(key.Value.Public, HasLen, ed25519.PublicKeySize) - client := NewClient(MemoryLocalStore(), remote) - c.Assert(client.Init([]*data.Key{key}, 1), IsNil) - - // check update returns the correct updated targets - files, err := client.Update() - c.Assert(err, IsNil) - c.Assert(files, HasLen, len(pythonTargets)) - for name, data := range pythonTargets { - file, ok := files[name] - if !ok { - c.Fatalf("expected updated targets to contain %s", name) - } - meta, err := util.GenerateFileMeta(bytes.NewReader(data), file.HashAlgorithms()...) - c.Assert(err, IsNil) - c.Assert(util.FileMetaEqual(file, meta), IsNil) - } - - // download the files and check they have the correct content - for name, data := range pythonTargets { - var dest testDestination - c.Assert(client.Download(name, &dest), IsNil) - c.Assert(dest.deleted, Equals, false) - c.Assert(dest.String(), Equals, string(data)) - } - } -} - -func generateRepoFS(c *C, dir string, files map[string][]byte, consistentSnapshot bool) *tuf.Repo { - signer := signed.NewEd25519() - repo, err := tuf.NewRepo(signer, store.FileSystemStore(dir, nil), "sha256") - c.Assert(err, IsNil) - if !consistentSnapshot { - c.Assert(repo.Init(false), IsNil) - } - for _, role := range []string{"root", "snapshot", "targets", "timestamp"} { - _, err := repo.GenKey(role) - c.Assert(err, IsNil) - } - for file, data := range files { - path := filepath.Join(dir, "staged", "targets", file) - c.Assert(os.MkdirAll(filepath.Dir(path), 0755), IsNil) - c.Assert(ioutil.WriteFile(path, data, 0644), IsNil) - c.Assert(repo.AddTarget(file, nil), IsNil) - } - c.Assert(repo.Snapshot(tuf.CompressionTypeNone), IsNil) - c.Assert(repo.Timestamp(), IsNil) - c.Assert(repo.Commit(), IsNil) - return repo -} - -func (InteropSuite) TestPythonClientGoGenerated(c *C) { - // clone the Python client if necessary - cwd, err := os.Getwd() - c.Assert(err, IsNil) - tufDir := filepath.Join(cwd, "testdata", "tuf") - if _, err := os.Stat(tufDir); os.IsNotExist(err) { - c.Assert(exec.Command( - "git", - "clone", - "--quiet", - "--branch=v0.9.9", - "--depth=1", - "https://github.com/theupdateframework/tuf.git", - tufDir, - ).Run(), IsNil) - } - - tmp := c.MkDir() - files := map[string][]byte{ - "foo.txt": []byte("foo"), - "bar/baz.txt": []byte("baz"), - } - - // start file server - addr, cleanup := startFileServer(c, tmp) - defer cleanup() - - // setup Python env - environ := os.Environ() - pythonEnv := make([]string, 0, len(environ)+1) - // remove any existing PYTHONPATH from the environment - for _, e := range environ { - if strings.HasPrefix(e, "PYTHONPATH=") { - continue - } - pythonEnv = append(pythonEnv, e) - } - pythonEnv = append(pythonEnv, "PYTHONPATH="+tufDir) - - for _, consistentSnapshot := range []bool{false, true} { - // generate repository - name := fmt.Sprintf("consistent-snapshot-%t", consistentSnapshot) - dir := filepath.Join(tmp, name) - generateRepoFS(c, dir, files, consistentSnapshot) - - // create initial files for Python client - clientDir := filepath.Join(dir, "client") - currDir := filepath.Join(clientDir, "metadata", "current") - prevDir := filepath.Join(clientDir, "metadata", "previous") - c.Assert(os.MkdirAll(currDir, 0755), IsNil) - c.Assert(os.MkdirAll(prevDir, 0755), IsNil) - rootJSON, err := ioutil.ReadFile(filepath.Join(dir, "repository", "root.json")) - c.Assert(err, IsNil) - c.Assert(ioutil.WriteFile(filepath.Join(currDir, "root.json"), rootJSON, 0644), IsNil) - - // run Python client update - cmd := exec.Command("python", filepath.Join(cwd, "testdata", "client.py"), "--repo=http://"+addr+"/"+name) - cmd.Env = pythonEnv - cmd.Dir = clientDir - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - c.Assert(cmd.Run(), IsNil) - - // check the target files got downloaded - for path, expected := range files { - actual, err := ioutil.ReadFile(filepath.Join(clientDir, "targets", path)) - c.Assert(err, IsNil) - c.Assert(actual, DeepEquals, expected) - } - } -} - -func startFileServer(c *C, dir string) (string, func() error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, IsNil) - addr := l.Addr().String() - go http.Serve(l, http.FileServer(http.Dir(dir))) - return addr, l.Close -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store.go deleted file mode 100644 index 76fc864eb6..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -import ( - "encoding/json" - "time" - - "github.com/boltdb/bolt" -) - -func MemoryLocalStore() LocalStore { - return make(memoryLocalStore) -} - -type memoryLocalStore map[string]json.RawMessage - -func (m memoryLocalStore) GetMeta() (map[string]json.RawMessage, error) { - return m, nil -} - -func (m memoryLocalStore) SetMeta(name string, meta json.RawMessage) error { - m[name] = meta - return nil -} - -const dbBucket = "tuf-client" - -func FileLocalStore(path string) (LocalStore, error) { - db, err := bolt.Open(path, 0600, &bolt.Options{Timeout: time.Second}) - if err != nil { - return nil, err - } - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists([]byte(dbBucket)) - return err - }); err != nil { - return nil, err - } - return &fileLocalStore{db: db}, nil -} - -type fileLocalStore struct { - db *bolt.DB -} - -func (f *fileLocalStore) GetMeta() (map[string]json.RawMessage, error) { - meta := make(map[string]json.RawMessage) - if err := f.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(dbBucket)) - b.ForEach(func(k, v []byte) error { - meta[string(k)] = v - return nil - }) - return nil - }); err != nil { - return nil, err - } - return meta, nil -} - -func (f *fileLocalStore) SetMeta(name string, meta json.RawMessage) error { - return f.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(dbBucket)) - return b.Put([]byte(name), meta) - }) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store_test.go deleted file mode 100644 index c784dc128f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/local_store_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "encoding/json" - "path/filepath" - - . "gopkg.in/check.v1" -) - -type LocalStoreSuite struct{} - -var _ = Suite(&LocalStoreSuite{}) - -func (LocalStoreSuite) TestFileLocalStore(c *C) { - tmp := c.MkDir() - path := filepath.Join(tmp, "tuf.db") - store, err := FileLocalStore(path) - c.Assert(err, IsNil) - - type meta map[string]json.RawMessage - - assertGet := func(expected meta) { - actual, err := store.GetMeta() - c.Assert(err, IsNil) - c.Assert(meta(actual), DeepEquals, expected) - } - - // initial GetMeta should return empty meta - assertGet(meta{}) - - // SetMeta should persist - rootJSON := []byte(`{"_type":"Root"}`) - c.Assert(store.SetMeta("root.json", rootJSON), IsNil) - assertGet(meta{"root.json": rootJSON}) - - // SetMeta should add to existing meta - targetsJSON := []byte(`{"_type":"Target"}`) - c.Assert(store.SetMeta("targets.json", targetsJSON), IsNil) - assertGet(meta{"root.json": rootJSON, "targets.json": targetsJSON}) - - // a new store should get the same meta - c.Assert(store.(*fileLocalStore).db.Close(), IsNil) - store, err = FileLocalStore(path) - c.Assert(err, IsNil) - assertGet(meta{"root.json": rootJSON, "targets.json": targetsJSON}) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/remote_store.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/remote_store.go deleted file mode 100644 index dc98193348..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/remote_store.go +++ /dev/null @@ -1,92 +0,0 @@ -package client - -import ( - "crypto/tls" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strconv" - "strings" -) - -type HTTPRemoteOptions struct { - MetadataPath string - TargetsPath string - UserAgent string -} - -func HTTPRemoteStore(baseURL string, opts *HTTPRemoteOptions) (RemoteStore, error) { - if !strings.HasPrefix(baseURL, "http") { - return nil, ErrInvalidURL{baseURL} - } - if opts == nil { - opts = &HTTPRemoteOptions{} - } - if opts.TargetsPath == "" { - opts.TargetsPath = "targets" - } - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{Transport: tr} - return &httpRemoteStore{baseURL, opts, client}, nil -} - -type httpRemoteStore struct { - baseURL string - opts *HTTPRemoteOptions - client *http.Client -} - -func (h *httpRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) { - return h.get(path.Join(h.opts.MetadataPath, name)) -} - -func (h *httpRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) { - return h.get(path.Join(h.opts.TargetsPath, name)) -} - -func (h *httpRemoteStore) get(s string) (io.ReadCloser, int64, error) { - u := h.url(s) - fmt.Println("###########") - fmt.Println(u) - fmt.Println("###########") - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, 0, err - } - if h.opts.UserAgent != "" { - req.Header.Set("User-Agent", h.opts.UserAgent) - } - res, err := h.client.Do(req) - if err != nil { - return nil, 0, err - } - - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, 0, ErrNotFound{s} - } else if res.StatusCode != http.StatusOK { - res.Body.Close() - return nil, 0, &url.Error{ - Op: "GET", - URL: u, - Err: fmt.Errorf("unexpected HTTP status %d", res.StatusCode), - } - } - - size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 0) - if err != nil { - return res.Body, -1, nil - } - return res.Body, size, nil -} - -func (h *httpRemoteStore) url(path string) string { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return h.baseURL + path -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/.gitignore b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/.gitignore deleted file mode 100644 index 67443ee78f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -tuf.log -tuf diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/LICENSE.txt b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/LICENSE.txt deleted file mode 100644 index 544f53dc45..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/LICENSE.txt +++ /dev/null @@ -1,66 +0,0 @@ - This file contains the license for TUF: The Update Framework. - - It also lists license information for components and source - code used by TUF: The Update Framework. - - If you got this file as a part of a larger bundle, - there may be other license terms that you should be aware of. - -=============================================================================== -TUF: The Update Framework is distributed under this license: - -Copyright (c) 2010, Justin Samuel and Justin Cappos. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and/or hardware specification (the “Work”) to deal in the Work -without restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies of the Work, -and to permit persons to whom the Work is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Work. - -THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER -DEALINGS IN THE WORK. -=============================================================================== -Many files are modified from Thandy and are licensed under the -following license: - -Thandy is distributed under this license: - -Copyright (c) 2008, The Tor Project, Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - - * Neither the names of the copyright owners nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -=============================================================================== diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/Makefile b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/Makefile deleted file mode 100644 index 84b0e003c0..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -all: - docker build -t tuf-gen ./generate - docker run tuf-gen | tar x - -clean: - rm -rf with{,out}-consistent-snapshot - -.PHONY: all clean diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/README.md b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/README.md deleted file mode 100644 index 011b998fe3..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# TUF testdata - -TUF testdata generated by the Python implementation which is used to test that -the Go client is compatible with files generated by the Python repository tool. - -## Generate - -The `generate` directory contains scripts and a Dockerfile for generating the -test data files. - -Run `make` to regenerate the test files: - -``` -$ make clean -rm -rf keystore repository - -$ make -docker build -t tuf-gen ./generate -... -Successfully built ac1fba1d0b3b -docker run tuf-gen | tar x -Files generated: -. -|-- keystore -| |-- root_key -| |-- root_key.pub -| |-- snapshot_key -| |-- snapshot_key.pub -| |-- targets_key -| |-- targets_key.pub -| |-- timestamp_key -| `-- timestamp_key.pub -|-- repository -| |-- metadata -| | |-- root.json -| | |-- snapshot.json -| | |-- targets.json -| | |-- targets.json.gz -| | `-- timestamp.json -| `-- targets -| |-- dir -| | `-- file2.txt -| `-- file1.txt -`-- tuf.log - -5 directories, 16 files -``` diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/client.py b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/client.py deleted file mode 100644 index 5d142e6b90..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/client.py +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/env python -# -# A script to download updates from a remote TUF repository. -# -# A modification of basic_client.py from the Python implementation: -# https://github.com/theupdateframework/tuf/blob/v0.9.9/tuf/client/basic_client.py - -""" - - basic_client.py - - - Vladimir Diaz - - - September 2012 - - - See LICENSE for licensing information. - - - Provide a basic TUF client that can update all of the metatada and target - files provided by the user-specified repository mirror. Updated files are - saved to the 'targets' directory in the current working directory. The - repository mirror is specified by the user through the '--repo' command- - line option. - - Normally, a software updater integrating TUF will develop their own costum - client module by importing 'tuf.client.updater', instantiating the required - object, and calling the desired methods to perform an update. This basic - client is provided to users who wish to give TUF a quick test run without - the hassle of writing client code. This module can also used by updaters that - do not need the customization and only require their clients to perform an - update of all the files provided by their repository mirror(s). - - For software updaters that DO require customization, see the 'example_client.py' - script. The 'example_client.py' script provides an outline of the client code - that software updaters may develop and then tailor to their specific software - updater or package manager. - - Additional tools for clients running legacy applications will also be made - available. These tools will allow secure software updates using The Update - Framework without the need to modify the original application. - - - $ python basic_client.py --repo http://localhost:8001 - $ python basic_client.py --repo http://localhost:8001 --verbose 3 - - - --verbose: - Set the verbosity level of logging messages. Accepts values 1-5. - - --repo: - Set the repository mirror that will be responding to client requests. - E.g., 'http://locahost:8001'. -""" - -# Help with Python 3 compatibility, where the print statement is a function, an -# implicit relative import is invalid, and the '/' operator performs true -# division. Example: print 'hello world' raises a 'SyntaxError' exception. -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from __future__ import unicode_literals - -import sys -import traceback -import optparse -import logging - -import tuf -import tuf.formats -import tuf.client.updater -import tuf.log - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger('tuf.basic_client') - - -def update_client(repository_mirror): - """ - - Perform an update of the metadata and target files located at - 'repository_mirror'. Target files are saved to the 'targets' directory - in the current working directory. The current directory must already - include a 'metadata' directory, which in turn must contain the 'current' - and 'previous' directories. At a minimum, these two directories require - the 'root.json' metadata file. - - - repository_mirror: - The URL to the repository mirror hosting the metadata and target - files. E.g., 'http://localhost:8001' - - - tuf.RepositoryError, if 'repository_mirror' is improperly formatted. - - - Connects to a repository mirror and updates the metadata files and - any target files. Obsolete targets are also removed locally. - - - None. - """ - - # Does 'repository_mirror' have the correct format? - try: - tuf.formats.URL_SCHEMA.check_match(repository_mirror) - except tuf.FormatError as e: - message = 'The repository mirror supplied is invalid.' - raise tuf.RepositoryError(message) - - # Set the local repository directory containing all of the metadata files. - tuf.conf.repository_directory = '.' - - # Set the repository mirrors. This dictionary is needed by the Updater - # class of updater.py. - repository_mirrors = {'mirror': {'url_prefix': repository_mirror, - 'metadata_path': 'repository', - 'targets_path': 'repository/targets', - 'confined_target_dirs': ['']}} - - # Create the repository object using the repository name 'repository' - # and the repository mirrors defined above. - updater = tuf.client.updater.Updater('repository', repository_mirrors) - - # The local destination directory to save the target files. - destination_directory = './targets' - - # Refresh the repository's top-level roles, store the target information for - # all the targets tracked, and determine which of these targets have been - # updated. - updater.refresh() - all_targets = updater.all_targets() - updated_targets = updater.updated_targets(all_targets, destination_directory) - - # Download each of these updated targets and save them locally. - for target in updated_targets: - try: - updater.download_target(target, destination_directory) - except tuf.DownloadError as e: - pass - - # Remove any files from the destination directory that are no longer being - # tracked. - updater.remove_obsolete_targets(destination_directory) - - - - - -def parse_options(): - """ - - Parse the command-line options and set the logging level - as specified by the user through the --verbose option. - 'basic_client' expects the '--repo' to be set by the user. - - Example: - $ python basic_client.py --repo http://localhost:8001 - - If the required option is unset, a parser error is printed - and the scripts exits. - - - None. - - - None. - - - Sets the logging level for TUF logging. - - - The 'options.REPOSITORY_MIRROR' string. - """ - - parser = optparse.OptionParser() - - # Add the options supported by 'basic_client' to the option parser. - parser.add_option('--verbose', dest='VERBOSE', type=int, default=2, - help='Set the verbosity level of logging messages.' - 'The lower the setting, the greater the verbosity.') - - parser.add_option('--repo', dest='REPOSITORY_MIRROR', type='string', - help='Specifiy the repository mirror\'s URL prefix ' - '(e.g., http://www.example.com:8001/tuf/).' - ' The client will download updates from this mirror.') - - options, args = parser.parse_args() - - # Set the logging level. - if options.VERBOSE == 5: - tuf.log.set_log_level(logging.CRITICAL) - elif options.VERBOSE == 4: - tuf.log.set_log_level(logging.ERROR) - elif options.VERBOSE == 3: - tuf.log.set_log_level(logging.WARNING) - elif options.VERBOSE == 2: - tuf.log.set_log_level(logging.INFO) - elif options.VERBOSE == 1: - tuf.log.set_log_level(logging.DEBUG) - else: - tuf.log.set_log_level(logging.NOTSET) - - # Ensure the '--repo' option was set by the user. - if options.REPOSITORY_MIRROR is None: - message = '"--repo" must be set on the command-line.' - parser.error(message) - - # Return the repository mirror containing the metadata and target files. - return options.REPOSITORY_MIRROR - - - -if __name__ == '__main__': - - # Parse the options and set the logging level. - repository_mirror = parse_options() - - # Perform an update of all the files in the 'targets' directory located in - # the current directory. - try: - update_client(repository_mirror) - - except (tuf.NoWorkingMirrorError, tuf.RepositoryError) as e: - traceback.print_exc() - sys.stderr.write('Error: '+str(e)+'\n') - sys.exit(1) - - # Successfully updated the client's target files. - sys.exit(0) diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile deleted file mode 100644 index c6b249d0c1..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM ubuntu:trusty - -RUN apt-get update -RUN apt-get install -y python python-dev python-pip libffi-dev tree - -# Use the develop branch of tuf for the following fix: -# https://github.com/theupdateframework/tuf/commit/38005fe -RUN apt-get install -y git -RUN pip install --no-use-wheel git+https://github.com/theupdateframework/tuf.git@develop && pip install tuf[tools] - -ADD generate.py generate.sh / -CMD /generate.sh diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py deleted file mode 100644 index 055f2f960f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -# -# A script to generate TUF repository files. -# -# A modification of generate.py from the Python implementation: -# https://github.com/theupdateframework/tuf/blob/v0.9.9/tests/repository_data/generate.py - -import shutil -import datetime -import optparse -import stat - -from tuf.repository_tool import * -import tuf.util - -parser = optparse.OptionParser() -parser.add_option("-c","--consistent-snapshot", action='store_true', dest="consistent_snapshot", - help="Generate consistent snapshot", default=False) -(options, args) = parser.parse_args() - -repository = create_new_repository('repository') - -root_key_file = 'keystore/root_key' -targets_key_file = 'keystore/targets_key' -snapshot_key_file = 'keystore/snapshot_key' -timestamp_key_file = 'keystore/timestamp_key' - -generate_and_write_ed25519_keypair(root_key_file, password='password') -generate_and_write_ed25519_keypair(targets_key_file, password='password') -generate_and_write_ed25519_keypair(snapshot_key_file, password='password') -generate_and_write_ed25519_keypair(timestamp_key_file, password='password') - -root_public = import_ed25519_publickey_from_file(root_key_file+'.pub') -targets_public = import_ed25519_publickey_from_file(targets_key_file+'.pub') -snapshot_public = import_ed25519_publickey_from_file(snapshot_key_file+'.pub') -timestamp_public = import_ed25519_publickey_from_file(timestamp_key_file+'.pub') - -root_private = import_ed25519_privatekey_from_file(root_key_file, 'password') -targets_private = import_ed25519_privatekey_from_file(targets_key_file, 'password') -snapshot_private = import_ed25519_privatekey_from_file(snapshot_key_file, 'password') -timestamp_private = import_ed25519_privatekey_from_file(timestamp_key_file, 'password') - -repository.root.add_verification_key(root_public) -repository.targets.add_verification_key(targets_public) -repository.snapshot.add_verification_key(snapshot_public) -repository.timestamp.add_verification_key(timestamp_public) - -repository.root.load_signing_key(root_private) -repository.targets.load_signing_key(targets_private) -repository.snapshot.load_signing_key(snapshot_private) -repository.timestamp.load_signing_key(timestamp_private) - -target1_filepath = 'repository/targets/file1.txt' -tuf.util.ensure_parent_dir(target1_filepath) -target2_filepath = 'repository/targets/dir/file2.txt' -tuf.util.ensure_parent_dir(target2_filepath) - -with open(target1_filepath, 'wt') as file_object: - file_object.write('file1.txt') - -with open(target2_filepath, 'wt') as file_object: - file_object.write('file2.txt') - -octal_file_permissions = oct(os.stat(target1_filepath).st_mode)[4:] -file_permissions = {'file_permissions': octal_file_permissions} -repository.targets.add_target(target1_filepath, file_permissions) -repository.targets.add_target(target2_filepath) - -repository.root.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.targets.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.snapshot.expiration = datetime.datetime(2030, 1, 1, 0, 0) -repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 0, 0) - -repository.targets.compressions = ['gz'] - -if options.consistent_snapshot: - repository.write(False, True) - -else: - repository.write() - -shutil.move('repository/metadata.staged', 'repository/metadata') diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh deleted file mode 100644 index 0307599ca1..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/generate/generate.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# -# A script to generate TUF repository files using the Python implementation. -# -# A list of generated files is printed to STDERR and a tar of the files to STDOUT. - -set -e - -main() { - local dir="$(mktemp -d)" - trap "rm -rf ${dir}" EXIT - - pushd "${dir}" >/dev/null - generate_consistent - generate_non_consistent - list_files >&2 - tar c . - popd >/dev/null -} - -generate_consistent() { - mkdir "with-consistent-snapshot" - pushd "with-consistent-snapshot" >/dev/null - /generate.py --consistent-snapshot - popd >/dev/null -} - -generate_non_consistent() { - mkdir "without-consistent-snapshot" - pushd "without-consistent-snapshot" >/dev/null - /generate.py - popd >/dev/null -} - -list_files() { - echo "Files generated:" - tree -} - -main $@ diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key deleted file mode 100644 index f4d02e94dd..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key +++ /dev/null @@ -1 +0,0 @@ -4fc13ddb4979dbe54ff8ac93cab9b307@@@@100000@@@@e837119fd0a754046e1175445effdf8cdeda587fc94787e6ab27bc468dfb8bb0@@@@0335c7f8953301f91aa11d3be991096d@@@@d55c63737dbc5de581f6814fa37a341723465b8ea5157eca4302a2271b0cee93d48c4e48707a4ab34ecb649e5879577eb5e7bdf95627c8cbdf611fbc7cfa360d48b819525f20050ba7829ff016fc348916ce3154f031d7aed9cd91cbf89bc2d7e03ec4b5f98c4e4b2e4e576dbd9eefb3736aa94a6753875bf328727bbefb582ced865ff2512bd2c46b8c15d4a81ff244a296307949a8e58013588b47c65ae8334fd4df0c25d95778dc03a728969ce121d63f5dc34cd21d71b8ee6c05d85eeab4f4ff7c153b896f459304aa43ac9ef3a4b34701156e8cff3ddcaf91f6bef1dadba2f275cc1c8f675a8bc026023d25428b5a5a6730e76fb3d9a0630f1eb9662b06ef6523816f4e8b71966aa6e2 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key.pub b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key.pub deleted file mode 100644 index 4737bc7faa..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/root_key.pub +++ /dev/null @@ -1 +0,0 @@ -{"keytype": "ed25519", "keyval": {"public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0"}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key deleted file mode 100644 index f36a21d28f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key +++ /dev/null @@ -1 +0,0 @@ -412601f80d76e03115cdc5620d803bf6@@@@100000@@@@90e058b5b1db460fb28e81105228bfd2b0a4ea748b524e9e349f8e24c8564fad@@@@9922a40a7b682a052b20cea3043018b2@@@@e83737f842c4a847b0302eb8cfba70b790083ce588e8c1fedf1f2a27590ef3a656b4abd0c1bec83c46a907083447b84d64d4307b98d4bbc673f9a12ef05814f550540ca187dc210de3d4147f36700da721433efcde095c98dc8ef0bc39bd40785842b6541c678b5d77b14f9a1170fabcf21dc4c86980776a721d2ac5068fcaa0703d636a60f8f6575e23b2238dd2d603ccaaeb8d4d2ca5794c0036811f0dd09409f07c137361a84358e0eeeba8e7d870652a17a5891c4f7e830672b61bd73b56f04c5e694caf87ecd255c3d7ec263a7e72c13d2fb62d97ec07b4b981776c9cc767d778e38ba1f36964a744114acd081ef7c442086eadd03d3875ad5ce04d273e685547a14b73aff739128873 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key.pub b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key.pub deleted file mode 100644 index d6ea89c682..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/snapshot_key.pub +++ /dev/null @@ -1 +0,0 @@ -{"keytype": "ed25519", "keyval": {"public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313"}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key deleted file mode 100644 index 9de1b5ac5e..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key +++ /dev/null @@ -1 +0,0 @@ -6c8f2240657a414a478625eb6d86bb81@@@@100000@@@@bc6067a008c1c02592178927c8e6f561a3727827ea801291798b964bf31b2680@@@@384fb8f6e7e050f54ceffbd5e070581d@@@@4c5c8d7eeae6db767bd53e8dacfbd7d6db36584280b3cf3740fadb885442bf038650908ff9cb17f9d2c746d326a17ec7ce513ffb7e3c11abd875b17c92caa71ea556d71a69427741f0e80989df402e75ed23cfb85c79441a7cdf92e20e489abd09977d9028aae944ddc63116a5170dbdbd8607523a1338c61559fa106c164aee8c58d5961ed02195a46fcff615939c4c4adfc49d37b3cb2760b53dfe5961a63a29d2261310f23e568a58fcf71bf61db5816b00284bf15b7c89f1e9b929e1f3374119c0fd201b40c491e30542b5f547eb4a6828aae416bf6ea1c8b3c52ee0a98cc306f3725868e356143869bda527aee680b56abf660579b0a7539032b97b4266015a0ea6693904ef77002e39 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key.pub b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key.pub deleted file mode 100644 index 1e117637e9..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/targets_key.pub +++ /dev/null @@ -1 +0,0 @@ -{"keytype": "ed25519", "keyval": {"public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6"}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key deleted file mode 100644 index 89b7d6be42..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key +++ /dev/null @@ -1 +0,0 @@ -59dc296f796a2049c9205a473e05b332@@@@100000@@@@50cda771fbc7d9f2e7209e16565289d73ad43a0b3ee620a9553f911f5ba38ca1@@@@9546236c4cd50458127fc69278037f8c@@@@63170139aa164fa9cb8f03e4015bae2bdee27f05cf22e140d931cded959c51104912eb58df06d5bcc422c28e368e80c2fbaa20a0618841fe650c88b1fde72b7cef32e07aca0d963a293c6c6db7d8e0885c6a17450e8307fc92be36d80e5c168b0abdc214dfa9048b5c44a05f17899176a128c7b8307130e085530a07258ac5047b5f439245b0eceeb0e61bd96315b6386282d40b4977fcc04c6098b7390fb4d538c1f0650e62298b235e4a38840254d7033eff9dddce55c347659632c29cc49ed828d9eba5a8e5b4b75956006014a57c8fc5c7f54d232a8eb78bb49423dc54997e7768d07186b295a5be1518be6c76777e55fd2d227070fece6cf2530d7e40e42468da7cc7413fcdf4091ec2 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key.pub b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key.pub deleted file mode 100644 index 23ea00f9ac..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/keystore/timestamp_key.pub +++ /dev/null @@ -1 +0,0 @@ -{"keytype": "ed25519", "keyval": {"public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e"}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/32f37ab8ba96d5a3b2d10cc716ce408c860d82b4ba00e6a7a479df6bcfee2864.targets.json b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/32f37ab8ba96d5a3b2d10cc716ce408c860d82b4ba00e6a7a479df6bcfee2864.targets.json deleted file mode 100644 index 8fd365209d..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/32f37ab8ba96d5a3b2d10cc716ce408c860d82b4ba00e6a7a479df6bcfee2864.targets.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "signatures": [ - { - "keyid": "daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55", - "method": "ed25519", - "sig": "0b021b054508971b8636ea6aceca0aaef0a9245c278e2b05aa5c20cd7ff055ba45473ab47e4c8a2475a257c432fff52c4ed551f1b7362461669d85c8f7a67e04" - } - ], - "signed": { - "_type": "Targets", - "delegations": { - "keys": {}, - "roles": [] - }, - "expires": "2030-01-01T00:00:00Z", - "targets": { - "/dir/file2.txt": { - "hashes": { - "sha256": "04e2f59431a9d219321baf7d21b8cc797d7615dc3e9515c782c49d2075658701" - }, - "length": 9 - }, - "/file1.txt": { - "custom": { - "file_permissions": "644" - }, - "hashes": { - "sha256": "55ae75d991c770d8f3ef07cbfde124ffce9c420da5db6203afab700b27e10cf9" - }, - "length": 9 - } - }, - "version": 1 - } -} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/a68b4847c117ec84f3787b9adabd607785bf30d3a9a4646661761bddc1a11e62.root.json b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/a68b4847c117ec84f3787b9adabd607785bf30d3a9a4646661761bddc1a11e62.root.json deleted file mode 100644 index 1d9980e87c..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/a68b4847c117ec84f3787b9adabd607785bf30d3a9a4646661761bddc1a11e62.root.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "signatures": [ - { - "keyid": "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b", - "method": "ed25519", - "sig": "7c9f8155cd074a9666c76cde1a1f83a9b6d965e4e7f6afa95ece5bedf68ce5caea137099110e9ca16aba5b6fd4a554c0c42032a436c8ab37fd89e596144b230e" - } - ], - "signed": { - "_type": "Root", - "consistent_snapshot": true, - "expires": "2030-01-01T00:00:00Z", - "keys": { - "b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1": { - "keytype": "ed25519", - "keyval": { - "public": "6107dea464cd596b87e53eda99f3f6245967a2b6f25a77c0f14bf2a49d79ae7e" - } - }, - "bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5": { - "keytype": "ed25519", - "keyval": { - "public": "a63faff1ac94ba5ceaf3bb6d73cf5552e75959cd562bef4f91884260720da313" - } - }, - "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b": { - "keytype": "ed25519", - "keyval": { - "public": "4b9d6523d4fca15b88694b87f714c839f3b1db296884c6d235cfea617e5a3df0" - } - }, - "daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55": { - "keytype": "ed25519", - "keyval": { - "public": "80891271acffbe8e901dafce378134b06ec2665d4bc59de26b50354e62c8c9c6" - } - } - }, - "roles": { - "root": { - "keyids": [ - "d275e75592c71464bcacb94a62e617bfce7b7684d93d5726a032e9550efc576b" - ], - "threshold": 1 - }, - "snapshot": { - "keyids": [ - "bba8929df86dae6d9947d2c37eab9de1d0ee828967677fea13fd11a90967f6f5" - ], - "threshold": 1 - }, - "targets": { - "keyids": [ - "daa395038e78a453990a9fe23c538172aa57022e21652f381635b08611c80c55" - ], - "threshold": 1 - }, - "timestamp": { - "keyids": [ - "b1f19cf98e6c333b31133621ff357ac25b248312a6911c2b907f29640d954ee1" - ], - "threshold": 1 - } - }, - "version": 1 - } -} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/cae20eb655ea7f5b4ac4638e98dc9ad53dfb87a4fd19c35d1ccca92ce9a6a6da.targets.json.gz b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/with-consistent-snapshot/repository/metadata/cae20eb655ea7f5b4ac4638e98dc9ad53dfb87a4fd19c35d1ccca92ce9a6a6da.targets.json.gz deleted file mode 100644 index c0c6d901db23d2d73f2b10cf9d4d718f9a55dd18..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 459 zcmV;+0W|&}iwFRcqrFrD|9z58Zrm^oh4*s`L+iAmC{dJhj;vY)0WDE8M&}Qo21 zlB%N1EXwz}3eBjm!h0#Va-b69D5#Y)LLE2*GOuCbNM?X{0NbK=6mQprveS;`z6pjHS>fg*!7tK)T! z&g}KLop;SQZ9Y}ACbC}><(Fwb9sWzS)I6VcJPgxxLuG>ZU+uq{gGiLAq+~NQlvrK+ z5jgj?s6}6E)|5R$5h{IXM5Rj406nr=$TfXC$LFp5)p1GJHteR!mw(QboTC;4008O0 BRAnYd?!ob2V&q0f!~{ zTE$|C%nqES5R%bAXe@>7j7>99aiUEyS%fvmU@Dmckg^XkVsTufBWcvZ7Hze45j^|Y zR0ODz2!c<9-nV`TRW+E)rhC4-Cc-o7oP>IrKF&NNKBi&I)3|V}%6s0XsXraZYvB^v z@^D!v8&3N*)92ZH`HJ7q{Tj5=VBi;EJC6Vl%lWffPuD(Qw)eFk?xo+e{yn`sl5G`ZOvENh`?4W?K%Mx!w&@$EK;QhfBajZL)}j)YWv(8abw@Z>0XK(#oo zb98FY$L+LhzHxI?)k1{diOTCZoeuvI%{4D)9uECDUZYaZ+VA#1%t4gcQ=J#;J=7qE z1$@a;8Fl-)3ddqK)I>GAmS~c;e}Jq#BNT~0&T-qy8xJ#Gdx4uKU;Y6r)`9aD0{{S2 C5#rte diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/timestamp.json b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/timestamp.json deleted file mode 100644 index 415002ecfe..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/metadata/timestamp.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "signatures": [ - { - "keyid": "253b33da0194b7a61f47d820ec069c26e160be74a19e9545ba6615fc9a28eb62", - "method": "ed25519", - "sig": "b5e3aad4caad2aef5b6ffbe4547e181a8c3c73382271ded933a6eed5754ff09890e826460e90d0032371def25a7c16ede4622758b91c87105f20f83864b4b601" - } - ], - "signed": { - "_type": "Timestamp", - "expires": "2030-01-01T00:00:00Z", - "meta": { - "snapshot.json": { - "hashes": { - "sha256": "f56dd748c9c0a7dd3c81f575795d72d788b9743687a9fcc1c0e178296ebd2800" - }, - "length": 835 - } - }, - "version": 1 - } -} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/dir/file2.txt b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/dir/file2.txt deleted file mode 100644 index c3ee11c8b3..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/dir/file2.txt +++ /dev/null @@ -1 +0,0 @@ -file2.txt \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/file1.txt b/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/file1.txt deleted file mode 100644 index 39cd5762dc..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/client/testdata/without-consistent-snapshot/repository/targets/file1.txt +++ /dev/null @@ -1 +0,0 @@ -file1.txt \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/main.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/main.go deleted file mode 100644 index 1ecd2705ea..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/main.go +++ /dev/null @@ -1,84 +0,0 @@ -package main - -import ( - "fmt" - "log" - - "github.com/endophage/go-tuf" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/store" - "github.com/endophage/go-tuf/util" - "github.com/flynn/go-docopt" -) - -func main() { - log.SetFlags(0) - - usage := `usage: tuftools [-h|--help] [...] - -Options: - -h, --help - -Commands: - help Show usage for a specific command - meta Generate metadata from the given file path - -See "tuf help " for more information on a specific command -` - - args, _ := docopt.Parse(usage, nil, true, "", true) - cmd := args.String[""] - cmdArgs := args.All[""].([]string) - - if cmd == "help" { - if len(cmdArgs) == 0 { // `tuf help` - fmt.Println(usage) - return - } else { // `tuf help ` - cmd = cmdArgs[0] - cmdArgs = []string{"--help"} - } - } - - if err := runCommand(cmd, cmdArgs); err != nil { - log.Fatalln("ERROR:", err) - } -} - -type cmdFunc func(*docopt.Args, *tuf.Repo) error - -type command struct { - usage string - f cmdFunc -} - -var commands = make(map[string]*command) - -func register(name string, f cmdFunc, usage string) { - commands[name] = &command{usage: usage, f: f} -} - -func runCommand(name string, args []string) error { - argv := make([]string, 1, 1+len(args)) - argv[0] = name - argv = append(argv, args...) - - cmd, ok := commands[name] - if !ok { - return fmt.Errorf("%s is not a tuf command. See 'tuf help'", name) - } - - parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true) - if err != nil { - return err - } - - db := util.GetSqliteDB() - local := store.DBStore(db, "") - signer := signed.Ed25519{} - repo, err := tuf.NewRepo(&signer, local, "sha256") - if err != nil { - return err - } - return cmd.f(parsedArgs, repo) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/meta.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/meta.go deleted file mode 100644 index d1a8fc049d..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tools/meta.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" - "github.com/endophage/go-tuf/util" -) - -func init() { - register("meta", cmdMeta, ` -usage: tuftools meta [...] - -Generate sample metadata for file(s) given by path. - -`) -} - -func cmdMeta(args *docopt.Args, repo *tuf.Repo) error { - paths := args.All[""].([]string) - for _, file := range paths { - reader, _ := os.Open(file) - meta, _ := util.GenerateFileMeta(reader, "sha256") - jsonBytes, err := json.Marshal(meta) - if err != nil { - return err - } - filename := fmt.Sprintf("%s.meta.json", file) - err = ioutil.WriteFile(filename, jsonBytes, 0644) - if err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/README.md b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/README.md deleted file mode 100644 index 0bafb6a3a2..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# go-tuf client CLI - -## Install - -``` -go get github.com/flynn/go-tuf/cmd/tuf-client -``` - -## Usage - -The CLI provides three commands: - -* `tuf-client init` - initialize a local file store using root keys (e.g. from - the output of `tuf root-keys`) -* `tuf-client list` - list available targets and their file sizes -* `tuf-client get` - get a target file and write to STDOUT - -All commands require the base URL of the TUF repository as the first non-flag -argument, and accept an optional `--store` flag which is the path to the local -storage. - -Run `tuf-client help` from the command line to get more detailed usage -information. - -## Examples - -``` -# init -$ tuf-client init https://example.com/path/to/repo - -# init with a custom store path -$ tuf-client init --store /tmp/tuf.db https://example.com/path/to/repo - -# list available targets -$ tuf-client list https://example.com/path/to/repo -PATH SIZE -/foo.txt 1.6KB -/bar.txt 336B -/baz.txt 1.5KB - -# get a target -$ tuf-client get https://example.com/path/to/repo /foo.txt -the contents of foo.txt - -# the prefixed / is optional -$ tuf-client get https://example.com/path/to/repo foo.txt -the contents of foo.txt -``` diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/get.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/get.go deleted file mode 100644 index df65fa72ea..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/get.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "io" - "io/ioutil" - "os" - - tuf "github.com/endophage/go-tuf/client" - "github.com/endophage/go-tuf/util" - "github.com/flynn/go-docopt" -) - -func init() { - register("get", cmdGet, ` -usage: tuf-client get [-s|--store=] - -Options: - -s The path to the local file store [default: tuf.db] - -Get a target from the repository. - `) -} - -type tmpFile struct { - *os.File -} - -func (t *tmpFile) Delete() error { - t.Close() - return os.Remove(t.Name()) -} - -func cmdGet(args *docopt.Args, client *tuf.Client) error { - if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { - return err - } - target := util.NormalizeTarget(args.String[""]) - file, err := ioutil.TempFile("", "go-tuf") - if err != nil { - return err - } - tmp := tmpFile{file} - if err := client.Download(target, &tmp); err != nil { - return err - } - defer tmp.Delete() - if _, err := tmp.Seek(0, os.SEEK_SET); err != nil { - return err - } - _, err = io.Copy(os.Stdout, file) - return err -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/init.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/init.go deleted file mode 100644 index 71c6bd8baf..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/init.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "encoding/json" - "io" - "os" - - tuf "github.com/endophage/go-tuf/client" - "github.com/endophage/go-tuf/data" - "github.com/flynn/go-docopt" -) - -func init() { - register("init", cmdInit, ` -usage: tuf-client init [-s|--store=] [] - -Options: - -s The path to the local file store [default: tuf.db] - -Initialize the local file store with root keys. - `) -} - -func cmdInit(args *docopt.Args, client *tuf.Client) error { - file := args.String[""] - var in io.Reader - if file == "" || file == "-" { - in = os.Stdin - } else { - var err error - in, err = os.Open(file) - if err != nil { - return err - } - } - var rootKeys []*data.Key - if err := json.NewDecoder(in).Decode(&rootKeys); err != nil { - return err - } - return client.Init(rootKeys, len(rootKeys)) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/list.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/list.go deleted file mode 100644 index 3e91b82200..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/list.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "fmt" - "os" - "text/tabwriter" - - "github.com/dustin/go-humanize" - tuf "github.com/endophage/go-tuf/client" - "github.com/flynn/go-docopt" -) - -func init() { - register("list", cmdList, ` -usage: tuf-client list [-s|--store=] - -Options: - -s The path to the local file store [default: tuf.db] - -List available target files. - `) -} - -func cmdList(args *docopt.Args, client *tuf.Client) error { - if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { - return err - } - targets, err := client.Targets() - if err != nil { - return err - } - w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0) - defer w.Flush() - fmt.Fprintln(w, "PATH\tSIZE") - for path, meta := range targets { - fmt.Fprintf(w, "%s\t%s\n", path, humanize.Bytes(uint64(meta.Length))) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/main.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/main.go deleted file mode 100644 index a4fe3b307d..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf-client/main.go +++ /dev/null @@ -1,96 +0,0 @@ -package main - -import ( - "fmt" - "log" - - tuf "github.com/endophage/go-tuf/client" - "github.com/flynn/go-docopt" -) - -func main() { - log.SetFlags(0) - - usage := `usage: tuf-client [-h|--help] [...] - -Options: - -h, --help - -Commands: - help Show usage for a specific command - init Initialize with root keys - list List available target files - get Get a target file - -See "tuf-client help " for more information on a specific command. -` - - args, _ := docopt.Parse(usage, nil, true, "", true) - cmd := args.String[""] - cmdArgs := args.All[""].([]string) - - if cmd == "help" { - if len(cmdArgs) == 0 { // `tuf-client help` - fmt.Println(usage) - return - } else { // `tuf-client help ` - cmd = cmdArgs[0] - cmdArgs = []string{"--help"} - } - } - - if err := runCommand(cmd, cmdArgs); err != nil { - log.Fatalln("ERROR:", err) - } -} - -type cmdFunc func(*docopt.Args, *tuf.Client) error - -type command struct { - usage string - f cmdFunc -} - -var commands = make(map[string]*command) - -func register(name string, f cmdFunc, usage string) { - commands[name] = &command{usage: usage, f: f} -} - -func runCommand(name string, args []string) error { - argv := make([]string, 1, 1+len(args)) - argv[0] = name - argv = append(argv, args...) - - cmd, ok := commands[name] - if !ok { - return fmt.Errorf("%s is not a tuf-client command. See 'tuf-client help'", name) - } - - parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true) - if err != nil { - return err - } - - client, err := tufClient(parsedArgs) - if err != nil { - return err - } - return cmd.f(parsedArgs, client) -} - -func tufClient(args *docopt.Args) (*tuf.Client, error) { - store, ok := args.String["--store"] - if !ok { - store = args.String["-s"] - } - local, err := tuf.FileLocalStore(store) - if err != nil { - return nil, err - } - remote, err := tuf.HTTPRemoteStore(args.String[""], nil) - if err != nil { - return nil, err - } - return tuf.NewClient(local, remote), nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/add.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/add.go deleted file mode 100644 index e69ca64038..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/add.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - // "encoding/json" - - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("add", cmdAdd, ` -usage: tuf add [--expires=] [--custom=] [...] - -Add target file(s). - -Options: - --expires= Set the targets manifest to expire days from now. - --custom= Set custom JSON data for the target(s). -`) -} - -func cmdAdd(args *docopt.Args, repo *tuf.Repo) error { - // var custom json.RawMessage - // if c := args.String["--custom"]; c != "" { - // custom = json.RawMessage(c) - // } - paths := args.All[""].([]string) - if arg := args.String["--expires"]; arg != "" { - expires, err := parseExpires(arg) - if err != nil { - return err - } - return repo.AddTargetsWithExpires(paths, nil, expires) - } - return repo.AddTargets(paths, nil) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/clean.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/clean.go deleted file mode 100644 index 5655983cd9..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/clean.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("clean", cmdClean, ` -usage: tuf clean - -Remove all staged manifests. - `) -} - -func cmdClean(args *docopt.Args, repo *tuf.Repo) error { - return repo.Clean() -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/commit.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/commit.go deleted file mode 100644 index 9e37626844..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/commit.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("commit", cmdCommit, ` -usage: tuf commit - -Commit staged files to the repository. -`) -} - -func cmdCommit(args *docopt.Args, repo *tuf.Repo) error { - return repo.Commit() -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/gen_key.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/gen_key.go deleted file mode 100644 index cf28809628..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/gen_key.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("gen-key", cmdGenKey, ` -usage: tuf gen-key [--expires=] - -Generate a new signing key for the given role. - -The key will be serialized to JSON and written to the "keys" directory with -filename pattern "ROLE-KEYID.json". The root manifest will also be staged -with the addition of the key's ID to the role's list of key IDs. - -Options: - --expires= Set the root manifest to expire days from now. -`) -} - -func cmdGenKey(args *docopt.Args, repo *tuf.Repo) error { - role := args.String[""] - var id string - var err error - if arg := args.String["--expires"]; arg != "" { - expires, err := parseExpires(arg) - if err != nil { - return err - } - id, err = repo.GenKeyWithExpires(role, expires) - } else { - id, err = repo.GenKey(role) - } - if err != nil { - return err - } - fmt.Println("Generated", role, "key with ID", id) - return nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/init.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/init.go deleted file mode 100644 index de9e31081f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/init.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("init", cmdInit, ` -usage: tuf init [--consistent-snapshot=false] - -Initialize a new repository. - -This is only required if the repository should not generate consistent -snapshots (i.e. by passing "--consistent-snapshot=false"). If consistent -snapshots should be generated, the repository will be implicitly -initialized to do so when generating keys. - `) -} - -func cmdInit(args *docopt.Args, repo *tuf.Repo) error { - return repo.Init(args.String["--consistent-snapshot"] != "false") -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/main.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/main.go deleted file mode 100644 index 06ddde1140..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/main.go +++ /dev/null @@ -1,167 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "log" - "os" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/term" - "github.com/flynn/go-docopt" - - "github.com/endophage/go-tuf" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/store" - "github.com/endophage/go-tuf/util" -) - -func main() { - log.SetFlags(0) - - usage := `usage: tuf [-h|--help] [-d|--dir=] [--insecure-plaintext] [...] - -Options: - -h, --help - -d The path to the repository (defaults to the current working directory) - --insecure-plaintext Don't encrypt signing keys - -Commands: - help Show usage for a specific command - gen-key Generate a new signing key for a specific manifest - revoke-key Revoke a signing key - add Add target file(s) - remove Remove a target file - snapshot Update the snapshot manifest - timestamp Update the timestamp manifest - sign Sign a manifest - commit Commit staged files to the repository - regenerate Recreate the targets manifest - clean Remove all staged manifests - root-keys Output a JSON serialized array of root keys to STDOUT - -See "tuf help " for more information on a specific command -` - - args, _ := docopt.Parse(usage, nil, true, "", true) - cmd := args.String[""] - cmdArgs := args.All[""].([]string) - - if cmd == "help" { - if len(cmdArgs) == 0 { // `tuf help` - fmt.Println(usage) - return - } else { // `tuf help ` - cmd = cmdArgs[0] - cmdArgs = []string{"--help"} - } - } - - dir, ok := args.String["-d"] - if !ok { - dir = args.String["--dir"] - } - if dir == "" { - var err error - dir, err = os.Getwd() - if err != nil { - log.Fatal(err) - } - } - - if err := runCommand(cmd, cmdArgs, dir, args.Bool["--insecure-plaintext"]); err != nil { - log.Fatalln("ERROR:", err) - } -} - -type cmdFunc func(*docopt.Args, *tuf.Repo) error - -type command struct { - usage string - f cmdFunc -} - -var commands = make(map[string]*command) - -func register(name string, f cmdFunc, usage string) { - commands[name] = &command{usage: usage, f: f} -} - -func runCommand(name string, args []string, dir string, insecure bool) error { - argv := make([]string, 1, 1+len(args)) - argv[0] = name - argv = append(argv, args...) - - cmd, ok := commands[name] - if !ok { - return fmt.Errorf("%s is not a tuf command. See 'tuf help'", name) - } - - parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", true) - if err != nil { - return err - } - - var p util.PassphraseFunc - if !insecure { - p = getPassphrase - } - signer := signed.Ed25519{} - repo, err := tuf.NewRepo(&signer, store.FileSystemStore(dir, p), "sha256") - if err != nil { - return err - } - return cmd.f(parsedArgs, repo) -} - -func parseExpires(arg string) (time.Time, error) { - days, err := strconv.Atoi(arg) - if err != nil { - return time.Time{}, fmt.Errorf("failed to parse --expires arg: %s", err) - } - return time.Now().AddDate(0, 0, days).UTC(), nil -} - -func getPassphrase(role string, confirm bool) ([]byte, error) { - if pass := os.Getenv(fmt.Sprintf("TUF_%s_PASSPHRASE", strings.ToUpper(role))); pass != "" { - return []byte(pass), nil - } - - state, err := term.SaveState(0) - if err != nil { - return nil, err - } - term.DisableEcho(0, state) - defer term.RestoreTerminal(0, state) - - stdin := bufio.NewReader(os.Stdin) - - fmt.Printf("Enter %s keys passphrase: ", role) - passphrase, err := stdin.ReadBytes('\n') - fmt.Println() - if err != nil { - return nil, err - } - passphrase = passphrase[0 : len(passphrase)-1] - - if !confirm { - return passphrase, nil - } - - fmt.Printf("Repeat %s keys passphrase: ", role) - confirmation, err := stdin.ReadBytes('\n') - fmt.Println() - if err != nil { - return nil, err - } - confirmation = confirmation[0 : len(confirmation)-1] - - if !bytes.Equal(passphrase, confirmation) { - return nil, errors.New("The entered passphrases do not match") - } - return passphrase, nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/regenerate.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/regenerate.go deleted file mode 100644 index 42a27c0616..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/regenerate.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "log" - - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("regenerate", cmdRegenerate, ` -usage: tuf regenerate [--consistent-snapshot=false] - -Recreate the targets manifest. - `) -} - -func cmdRegenerate(args *docopt.Args, repo *tuf.Repo) error { - // TODO: implement this - log.Println("not implemented") - return nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/remove.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/remove.go deleted file mode 100644 index 32d607617e..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/remove.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "errors" - - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("remove", cmdRemove, ` -usage: tuf remove [--expires=] [--all] [...] - -Remove target file(s). - -Options: - --all Remove all target files. - --expires= Set the targets manifest to expire days from now. -`) -} - -func cmdRemove(args *docopt.Args, repo *tuf.Repo) error { - paths := args.All[""].([]string) - if len(paths) == 0 && !args.Bool["--all"] { - return errors.New("either specify some paths or set the --all flag to remove all targets") - } - if arg := args.String["--expires"]; arg != "" { - expires, err := parseExpires(arg) - if err != nil { - return err - } - return repo.RemoveTargetsWithExpires(paths, expires) - } - return repo.RemoveTargets(paths) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/revoke_key.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/revoke_key.go deleted file mode 100644 index 1ef7c4058e..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/revoke_key.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("revoke-key", cmdRevokeKey, ` -usage: tuf revoke-key [--expires=] - -Revoke a signing key - -The key will be removed from the root manifest, but the key will remain in the -"keys" directory if present. - -Options: - --expires= Set the root manifest to expire days from now. -`) -} - -func cmdRevokeKey(args *docopt.Args, repo *tuf.Repo) error { - if arg := args.String["--expires"]; arg != "" { - expires, err := parseExpires(arg) - if err != nil { - return err - } - return repo.RevokeKeyWithExpires(args.String[""], args.String[""], expires) - } - return repo.RevokeKey(args.String[""], args.String[""]) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/root_keys.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/root_keys.go deleted file mode 100644 index 4b8115333f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/root_keys.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - "encoding/json" - "os" - - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("root-keys", cmdRootKeys, ` -usage: tuf root-keys - -Outputs a JSON serialized array of root keys to STDOUT. - -The resulting JSON should be distributed to clients for performing initial updates. -`) -} - -func cmdRootKeys(args *docopt.Args, repo *tuf.Repo) error { - keys, err := repo.RootKeys() - if err != nil { - return err - } - return json.NewEncoder(os.Stdout).Encode(keys) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/sign.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/sign.go deleted file mode 100644 index 08cd915a34..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/sign.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("sign", cmdSign, ` -usage: tuf sign - -Sign a manifest. -`) -} - -func cmdSign(args *docopt.Args, repo *tuf.Repo) error { - return repo.Sign(args.String[""]) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/snapshot.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/snapshot.go deleted file mode 100644 index 4894cde618..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/snapshot.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("snapshot", cmdSnapshot, ` -usage: tuf snapshot [--expires=] [--compression=] - -Update the snapshot manifest. - -Options: - --expires= Set the snapshot manifest to expire days from now. -`) -} - -func cmdSnapshot(args *docopt.Args, repo *tuf.Repo) error { - // TODO: parse --compression - if arg := args.String["--expires"]; arg != "" { - expires, err := parseExpires(arg) - if err != nil { - return err - } - return repo.SnapshotWithExpires(tuf.CompressionTypeNone, expires) - } - return repo.Snapshot(tuf.CompressionTypeNone) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/timestamp.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/timestamp.go deleted file mode 100644 index a62a252deb..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/cmd/tuf/timestamp.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "github.com/endophage/go-tuf" - "github.com/flynn/go-docopt" -) - -func init() { - register("timestamp", cmdTimestamp, ` -usage: tuf timestamp [--expires=] - -Update the timestamp manifest. - -Options: - --expires= Set the timestamp manifest to expire days from now. -`) -} - -func cmdTimestamp(args *docopt.Args, repo *tuf.Repo) error { - if arg := args.String["--expires"]; arg != "" { - expires, err := parseExpires(arg) - if err != nil { - return err - } - return repo.TimestampWithExpires(expires) - } - return repo.Timestamp() -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/data/types.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/data/types.go deleted file mode 100644 index 286403a2ef..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/data/types.go +++ /dev/null @@ -1,157 +0,0 @@ -package data - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "time" - - //cjson "github.com/tent/canonical-json-go" -) - -const KeyIDLength = sha256.Size * 2 - -type KeyValue struct { - Public HexBytes `json:"public"` - // Private HexBytes `json:"private,omitempty"` -} - -type Key struct { - Type string `json:"keytype"` - Value KeyValue `json:"keyval"` -} - -func (k *Key) ID() string { - // create a copy so the private key is not included - //data, _ := cjson.Marshal(&Key{ - // Type: k.Type, - // Value: KeyValue{Public: k.Value.Public}, - //}) - //digest := sha256.Sum256(data) - //TODO(mccauley): Bring rufus/go-tuf in line on canonicalization - digest := sha256.Sum256(k.Value.Public) - return hex.EncodeToString(digest[:]) -} - -type Signed struct { - Signed json.RawMessage `json:"signed"` - Signatures []Signature `json:"signatures"` -} - -type Signature struct { - KeyID string `json:"keyid"` - Method string `json:"method"` - Signature HexBytes `json:"sig"` -} - -func DefaultExpires(role string) time.Time { - var t time.Time - switch role { - case "root": - t = time.Now().AddDate(1, 0, 0) - case "targets": - t = time.Now().AddDate(0, 3, 0) - case "snapshot": - t = time.Now().AddDate(0, 0, 7) - case "timestamp": - t = time.Now().AddDate(0, 0, 1) - } - return t.UTC().Round(time.Second) -} - -type Root struct { - Type string `json:"_type"` - Version int `json:"version"` - Expires time.Time `json:"expires"` - Keys map[string]*Key `json:"keys"` - Roles map[string]*Role `json:"roles"` - - ConsistentSnapshot bool `json:"consistent_snapshot"` -} - -func NewRoot() *Root { - return &Root{ - Type: "Root", - Expires: DefaultExpires("root"), - Keys: make(map[string]*Key), - Roles: make(map[string]*Role), - ConsistentSnapshot: true, - } -} - -type Role struct { - KeyIDs []string `json:"keyids"` - Threshold int `json:"threshold"` -} - -func (r *Role) ValidKey(id string) bool { - for _, key := range r.KeyIDs { - if key == id { - return true - } - } - return false -} - -type Files map[string]FileMeta - -type Snapshot struct { - Type string `json:"_type"` - Version int `json:"version"` - Expires time.Time `json:"expires"` - Meta Files `json:"meta"` -} - -func NewSnapshot() *Snapshot { - return &Snapshot{ - Type: "Snapshot", - Expires: DefaultExpires("snapshot"), - Meta: make(Files), - } -} - -type Hashes map[string]HexBytes - -type FileMeta struct { - Length int64 `json:"length"` - Hashes Hashes `json:"hashes"` - Custom *json.RawMessage `json:"custom,omitempty"` -} - -func (f FileMeta) HashAlgorithms() []string { - funcs := make([]string, 0, len(f.Hashes)) - for name := range f.Hashes { - funcs = append(funcs, name) - } - return funcs -} - -type Targets struct { - Type string `json:"_type"` - Version int `json:"version"` - Expires time.Time `json:"expires"` - Targets Files `json:"targets"` -} - -func NewTargets() *Targets { - return &Targets{ - Type: "Targets", - Expires: DefaultExpires("targets"), - Targets: make(Files), - } -} - -type Timestamp struct { - Type string `json:"_type"` - Version int `json:"version"` - Expires time.Time `json:"expires"` - Meta Files `json:"meta"` -} - -func NewTimestamp() *Timestamp { - return &Timestamp{ - Type: "Timestamp", - Expires: DefaultExpires("timestamp"), - Meta: make(Files), - } -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted.go deleted file mode 100644 index 4d174d61f9..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted.go +++ /dev/null @@ -1,226 +0,0 @@ -// Package encrypted provides a simple, secure system for encrypting data -// symmetrically with a passphrase. -// -// It uses scrypt derive a key from the passphrase and the NaCl secret box -// cipher for authenticated encryption. -package encrypted - -import ( - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io" - - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/scrypt" -) - -const saltSize = 32 - -const ( - boxKeySize = 32 - boxNonceSize = 24 -) - -const ( - // N parameter was chosen to be ~100ms of work using the default implementation - // on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook - // Pro (it takes ~113ms). - scryptN = 32768 - scryptR = 8 - scryptP = 1 -) - -const ( - nameScrypt = "scrypt" - nameSecretBox = "nacl/secretbox" -) - -type data struct { - KDF scryptKDF `json:"kdf"` - Cipher secretBoxCipher `json:"cipher"` - Ciphertext []byte `json:"ciphertext"` -} - -type scryptParams struct { - N int `json:"N"` - R int `json:"r"` - P int `json:"p"` -} - -func newScryptKDF() (scryptKDF, error) { - salt := make([]byte, saltSize) - if err := fillRandom(salt); err != nil { - return scryptKDF{}, err - } - return scryptKDF{ - Name: nameScrypt, - Params: scryptParams{ - N: scryptN, - R: scryptR, - P: scryptP, - }, - Salt: salt, - }, nil -} - -type scryptKDF struct { - Name string `json:"name"` - Params scryptParams `json:"params"` - Salt []byte `json:"salt"` -} - -func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) { - return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize) -} - -// CheckParams checks that the encoded KDF parameters are what we expect them to -// be. If we do not do this, an attacker could cause a DoS by tampering with -// them. -func (s *scryptKDF) CheckParams() error { - if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP { - return errors.New("encrypted: unexpected kdf parameters") - } - return nil -} - -func newSecretBoxCipher() (secretBoxCipher, error) { - nonce := make([]byte, boxNonceSize) - if err := fillRandom(nonce); err != nil { - return secretBoxCipher{}, err - } - return secretBoxCipher{ - Name: nameSecretBox, - Nonce: nonce, - }, nil -} - -type secretBoxCipher struct { - Name string `json:"name"` - Nonce []byte `json:"nonce"` - - encrypted bool -} - -func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte { - var keyBytes [boxKeySize]byte - var nonceBytes [boxNonceSize]byte - - if len(key) != len(keyBytes) { - panic("incorrect key size") - } - if len(s.Nonce) != len(nonceBytes) { - panic("incorrect nonce size") - } - - copy(keyBytes[:], key) - copy(nonceBytes[:], s.Nonce) - - // ensure that we don't re-use nonces - if s.encrypted { - panic("Encrypt must only be called once for each cipher instance") - } - s.encrypted = true - - return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes) -} - -func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) { - var keyBytes [boxKeySize]byte - var nonceBytes [boxNonceSize]byte - - if len(key) != len(keyBytes) { - panic("incorrect key size") - } - if len(s.Nonce) != len(nonceBytes) { - // return an error instead of panicking since the nonce is user input - return nil, errors.New("encrypted: incorrect nonce size") - } - - copy(keyBytes[:], key) - copy(nonceBytes[:], s.Nonce) - - res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes) - if !ok { - return nil, errors.New("encrypted: decryption failed") - } - return res, nil -} - -// Encrypt takes a passphrase and plaintext, and returns a JSON object -// containing ciphertext and the details necessary to decrypt it. -func Encrypt(plaintext, passphrase []byte) ([]byte, error) { - k, err := newScryptKDF() - if err != nil { - return nil, err - } - key, err := k.Key(passphrase) - if err != nil { - return nil, err - } - - c, err := newSecretBoxCipher() - if err != nil { - return nil, err - } - - data := &data{ - KDF: k, - Cipher: c, - } - data.Ciphertext = c.Encrypt(plaintext, key) - - return json.Marshal(data) -} - -// Marshal encrypts the JSON encoding of v using passphrase. -func Marshal(v interface{}, passphrase []byte) ([]byte, error) { - data, err := json.MarshalIndent(v, "", "\t") - if err != nil { - return nil, err - } - return Encrypt(data, passphrase) -} - -// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and -// tries to decrypt it using passphrase. If successful, it returns the -// plaintext. -func Decrypt(ciphertext, passphrase []byte) ([]byte, error) { - data := &data{} - if err := json.Unmarshal(ciphertext, data); err != nil { - return nil, err - } - - if data.KDF.Name != nameScrypt { - return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name) - } - if data.Cipher.Name != nameSecretBox { - return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name) - } - if err := data.KDF.CheckParams(); err != nil { - return nil, err - } - - key, err := data.KDF.Key(passphrase) - if err != nil { - return nil, err - } - - return data.Cipher.Decrypt(data.Ciphertext, key) -} - -// Unmarshal decrypts the data using passphrase and unmarshals the resulting -// plaintext into the value pointed to by v. -func Unmarshal(data []byte, v interface{}, passphrase []byte) error { - decrypted, err := Decrypt(data, passphrase) - if err != nil { - return err - } - return json.Unmarshal(decrypted, v) -} - -func fillRandom(b []byte) error { - _, err := io.ReadFull(rand.Reader, b) - return err -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted_test.go deleted file mode 100644 index 31b058252f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/encrypted/encrypted_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package encrypted - -import ( - "encoding/json" - "testing" - - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -type EncryptedSuite struct{} - -var _ = Suite(&EncryptedSuite{}) - -var plaintext = []byte("reallyimportant") - -func (EncryptedSuite) TestRoundtrip(c *C) { - passphrase := []byte("supersecret") - - enc, err := Encrypt(plaintext, passphrase) - c.Assert(err, IsNil) - - // successful decrypt - dec, err := Decrypt(enc, passphrase) - c.Assert(err, IsNil) - c.Assert(dec, DeepEquals, plaintext) - - // wrong passphrase - passphrase[0] = 0 - dec, err = Decrypt(enc, passphrase) - c.Assert(err, NotNil) - c.Assert(dec, IsNil) -} - -func (EncryptedSuite) TestTamperedRoundtrip(c *C) { - passphrase := []byte("supersecret") - - enc, err := Encrypt(plaintext, passphrase) - c.Assert(err, IsNil) - - data := &data{} - err = json.Unmarshal(enc, data) - c.Assert(err, IsNil) - - data.Ciphertext[0] = 0 - data.Ciphertext[1] = 0 - - enc, _ = json.Marshal(data) - - dec, err := Decrypt(enc, passphrase) - c.Assert(err, NotNil) - c.Assert(dec, IsNil) -} - -func (EncryptedSuite) TestDecrypt(c *C) { - enc := []byte(`{"kdf":{"name":"scrypt","params":{"N":32768,"r":8,"p":1},"salt":"N9a7x5JFGbrtB2uBR81jPwp0eiLR4A7FV3mjVAQrg1g="},"cipher":{"name":"nacl/secretbox","nonce":"2h8HxMmgRfuYdpswZBQaU3xJ1nkA/5Ik"},"ciphertext":"SEW6sUh0jf2wfdjJGPNS9+bkk2uB+Cxamf32zR8XkQ=="}`) - passphrase := []byte("supersecret") - - dec, err := Decrypt(enc, passphrase) - c.Assert(err, IsNil) - c.Assert(dec, DeepEquals, plaintext) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/keys/db.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/keys/db.go deleted file mode 100644 index 0f2a6da9b6..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/keys/db.go +++ /dev/null @@ -1,115 +0,0 @@ -package keys - -import ( - "errors" - "fmt" - - "github.com/endophage/go-tuf/data" -) - -var ( - ErrWrongType = errors.New("tuf: invalid key type") - ErrExists = errors.New("tuf: key already in db") - ErrWrongID = errors.New("tuf: key id mismatch") - ErrInvalidKey = errors.New("tuf: invalid key") - ErrInvalidRole = errors.New("tuf: invalid role") - ErrInvalidKeyID = errors.New("tuf: invalid key id") - ErrInvalidThreshold = errors.New("tuf: invalid role threshold") -) - -type PublicKey struct { - data.Key - ID string -} - -func NewPublicKey(keyType string, public []byte) *PublicKey { - // create a copy so the private key is not included - key := data.Key{ - Type: keyType, - Value: data.KeyValue{Public: public}, - } - return &PublicKey{key, key.ID()} -} - -type PrivateKey struct { - PublicKey - Private []byte -} - -type DB struct { - types map[string]int - roles map[string]*data.Role - keys map[string]*PublicKey -} - -func NewDB() *DB { - return &DB{ - roles: make(map[string]*data.Role), - keys: make(map[string]*PublicKey), - } -} - -func (db *DB) AddKey(k *PublicKey) error { - //if _, ok := db.types[k.Type]; !ok { - // return ErrWrongType - //} - //if len(k.Value.Public) != ed25519.PublicKeySize { - // return ErrInvalidKey - //} - fmt.Println("Adding Key", k.ID) - - key := PublicKey{ - Key: data.Key{ - Type: k.Type, - Value: data.KeyValue{ - Public: make([]byte, len(k.Value.Public)), - }, - }, - ID: k.ID, - } - - copy(key.Value.Public, k.Value.Public) - - db.keys[k.ID] = &key - return nil -} - -var validRoles = map[string]struct{}{ - "root": {}, - "targets": {}, - "snapshot": {}, - "timestamp": {}, -} - -func ValidRole(name string) bool { - _, ok := validRoles[name] - return ok -} - -func (db *DB) AddRole(name string, r *data.Role) error { - fmt.Println("Adding Role", name) - if !ValidRole(name) { - return ErrInvalidRole - } - if r.Threshold < 1 { - return ErrInvalidThreshold - } - - // validate all key ids have the correct length - for _, id := range r.KeyIDs { - if len(id) != data.KeyIDLength { - return ErrInvalidKeyID - } - } - - db.roles[name] = r - return nil -} - -func (db *DB) GetKey(id string) *PublicKey { - return db.keys[id] -} - -func (db *DB) GetRole(name string) *data.Role { - return db.roles[name] -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/repo.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/repo.go deleted file mode 100644 index 341712ee65..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/repo.go +++ /dev/null @@ -1,680 +0,0 @@ -package tuf - -import ( - "bytes" - "encoding/json" - "fmt" - //"io" - "path" - "strings" - "time" - - cjson "github.com/tent/canonical-json-go" - - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/errors" - "github.com/endophage/go-tuf/keys" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/store" - "github.com/endophage/go-tuf/util" -) - -type CompressionType uint8 - -const ( - CompressionTypeNone CompressionType = iota - CompressionTypeGzip -) - -// topLevelManifests determines the order signatures are verified when committing. -var topLevelManifests = []string{ - "root.json", - "targets.json", - "snapshot.json", - "timestamp.json", -} - -// snapshotManifests is the list of default filenames that should be included in the -// snapshots.json. If using delegated targets, additional, dynamic files should also -// be included in snapshots. -var snapshotManifests = []string{ - "root.json", - "targets.json", -} - -// Repo represents an instance of a TUF repo -type Repo struct { - trust *signed.Signer - local store.LocalStore - hashAlgorithms []string - meta map[string]json.RawMessage -} - -// NewRepo is a factory function for instantiating new TUF repos objects. -// If the local store is already populated, local.GetMeta() will initialise -// the Repo with the appropriate state. -func NewRepo(trust signed.TrustService, local store.LocalStore, hashAlgorithms ...string) (*Repo, error) { - r := &Repo{trust: signed.NewSigner(trust), local: local, hashAlgorithms: hashAlgorithms} - - var err error - r.meta, err = local.GetMeta() - if err != nil { - return nil, err - } - return r, nil -} - -// Init attempts to initialize a brand new TUF repo. It will fail if -// an existing targets file is detected. -func (r *Repo) Init(consistentSnapshot bool) error { - t, err := r.targets() - if err != nil { - return err - } - if len(t.Targets) > 0 { - return errors.ErrInitNotAllowed - } - root := data.NewRoot() - root.ConsistentSnapshot = consistentSnapshot - err = r.setMeta("root.json", root) - if err != nil { - return err - } - _, err = r.GenKey("root") - if err != nil { - return err - } - _, err = r.GenKey("targets") - if err != nil { - return err - } - _, err = r.GenKey("snapshot") - if err != nil { - return err - } - _, err = r.GenKey("timestamp") - if err != nil { - return err - } - t.Expires = data.DefaultExpires("targets").Round(time.Second) - t.Version++ - err = r.setMeta("targets.json", t) - if err != nil { - return err - } - err = r.Snapshot(CompressionTypeNone) - if err != nil { - return err - } - err = r.Timestamp() - if err != nil { - return err - } - return nil -} - -func (r *Repo) db() (*keys.DB, error) { - db := keys.NewDB() - root, err := r.root() - if err != nil { - return nil, err - } - for _, k := range root.Keys { - if err := db.AddKey(&keys.PublicKey{*k, k.ID()}); err != nil { - return nil, err - } - } - for name, role := range root.Roles { - if err := db.AddRole(name, role); err != nil { - return nil, err - } - } - return db, nil -} - -func (r *Repo) root() (*data.Root, error) { - rootJSON, ok := r.meta["root.json"] - if !ok { - return data.NewRoot(), nil - } - s := &data.Signed{} - if err := json.Unmarshal(rootJSON, s); err != nil { - return nil, err - } - root := data.NewRoot() - if err := json.Unmarshal(s.Signed, root); err != nil { - return nil, err - } - return root, nil -} - -func (r *Repo) snapshot() (*data.Snapshot, error) { - snapshotJSON, ok := r.meta["snapshot.json"] - if !ok { - return data.NewSnapshot(), nil - } - s := &data.Signed{} - if err := json.Unmarshal(snapshotJSON, s); err != nil { - return nil, err - } - snapshot := data.NewSnapshot() - if err := json.Unmarshal(s.Signed, snapshot); err != nil { - return nil, err - } - return snapshot, nil -} - -func (r *Repo) targets() (*data.Targets, error) { - targetsJSON, ok := r.meta["targets.json"] - if !ok { - return data.NewTargets(), nil - } - s := &data.Signed{} - if err := json.Unmarshal(targetsJSON, s); err != nil { - return nil, err - } - targets := data.NewTargets() - if err := json.Unmarshal(s.Signed, targets); err != nil { - return nil, err - } - return targets, nil -} - -func (r *Repo) timestamp() (*data.Timestamp, error) { - timestampJSON, ok := r.meta["timestamp.json"] - if !ok { - return data.NewTimestamp(), nil - } - s := &data.Signed{} - if err := json.Unmarshal(timestampJSON, s); err != nil { - return nil, err - } - timestamp := data.NewTimestamp() - if err := json.Unmarshal(s.Signed, timestamp); err != nil { - return nil, err - } - return timestamp, nil -} - -func (r *Repo) GenKey(role string) (string, error) { - return r.GenKeyWithExpires(role, data.DefaultExpires("root")) -} - -func (r *Repo) GenKeyWithExpires(keyRole string, expires time.Time) (string, error) { - if !keys.ValidRole(keyRole) { - return "", errors.ErrInvalidRole{keyRole} - } - - if !validExpires(expires) { - return "", errors.ErrInvalidExpires{expires} - } - - root, err := r.root() - if err != nil { - return "", err - } - - key, err := r.trust.Create() - if err != nil { - return "", err - } - if err := r.local.SaveKey(keyRole, &key.Key); err != nil { - return "", err - } - - role, ok := root.Roles[keyRole] - if !ok { - role = &data.Role{KeyIDs: []string{}, Threshold: 1} - root.Roles[keyRole] = role - } - role.KeyIDs = append(role.KeyIDs, key.ID) - - root.Keys[key.ID] = &key.Key - root.Expires = expires.Round(time.Second) - root.Version++ - - return key.ID, r.setMeta("root.json", root) -} - -func validExpires(expires time.Time) bool { - return expires.Sub(time.Now()) > 0 -} - -func (r *Repo) RootKeys() ([]*data.Key, error) { - root, err := r.root() - if err != nil { - return nil, err - } - role, ok := root.Roles["root"] - if !ok { - return nil, nil - } - rootKeys := make([]*data.Key, len(role.KeyIDs)) - for i, id := range role.KeyIDs { - key, ok := root.Keys[id] - if !ok { - return nil, fmt.Errorf("tuf: invalid root metadata") - } - rootKeys[i] = key - } - return rootKeys, nil -} - -func (r *Repo) RevokeKey(role, id string) error { - return r.RevokeKeyWithExpires(role, id, data.DefaultExpires("root")) -} - -func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error { - if !keys.ValidRole(keyRole) { - return errors.ErrInvalidRole{keyRole} - } - - if !validExpires(expires) { - return errors.ErrInvalidExpires{expires} - } - - root, err := r.root() - if err != nil { - return err - } - - if _, ok := root.Keys[id]; !ok { - return errors.ErrKeyNotFound{keyRole, id} - } - - role, ok := root.Roles[keyRole] - if !ok { - return errors.ErrKeyNotFound{keyRole, id} - } - - keyIDs := make([]string, 0, len(role.KeyIDs)) - for _, keyID := range role.KeyIDs { - if keyID == id { - continue - } - keyIDs = append(keyIDs, keyID) - } - if len(keyIDs) == len(role.KeyIDs) { - return errors.ErrKeyNotFound{keyRole, id} - } - role.KeyIDs = keyIDs - - delete(root.Keys, id) - root.Roles[keyRole] = role - root.Expires = expires.Round(time.Second) - root.Version++ - - return r.setMeta("root.json", root) -} - -func (r *Repo) setMeta(name string, meta interface{}) error { - keys, err := r.getKeys(strings.TrimSuffix(name, ".json")) - if err != nil { - return err - } - b, err := cjson.Marshal(meta) - if err != nil { - return err - } - s := &data.Signed{Signed: b} - err = r.trust.Sign(s, keys...) - if err != nil { - return err - } - b, err = json.Marshal(s) - if err != nil { - return err - } - r.meta[name] = b - return r.local.SetMeta(name, b) -} - -func (r *Repo) Sign(name string) error { - role := strings.TrimSuffix(name, ".json") - if !keys.ValidRole(role) { - return errors.ErrInvalidRole{role} - } - - s, err := r.signedMeta(name) - if err != nil { - return err - } - - keys, err := r.getKeys(role) - if err != nil { - return err - } - if len(keys) == 0 { - return errors.ErrInsufficientKeys{name} - } - - r.trust.Sign(s, keys...) - - b, err := json.Marshal(s) - if err != nil { - return err - } - r.meta[name] = b - return r.local.SetMeta(name, b) -} - -// getKeys returns signing keys from local storage. -// -// Only keys contained in the keys db are returned (i.e. local keys which have -// been revoked are omitted), except for the root role in which case all local -// keys are returned (revoked root keys still need to sign new root metadata so -// clients can verify the new root.json and update their keys db accordingly). -func (r *Repo) getKeys(name string) ([]*keys.PublicKey, error) { - localKeys, err := r.local.GetKeys(name) - if err != nil { - return nil, err - } - if name == "root" { - rootkeys := make([]*keys.PublicKey, 0, len(localKeys)) - for _, key := range localKeys { - rootkeys = append(rootkeys, &keys.PublicKey{*key, key.ID()}) - } - return rootkeys, nil - } - db, err := r.db() - if err != nil { - return nil, err - } - role := db.GetRole(name) - if role == nil { - return nil, nil - } - if len(role.KeyIDs) == 0 { - return nil, nil - } - rolekeys := make([]*keys.PublicKey, 0, len(role.KeyIDs)) - for _, key := range localKeys { - if role.ValidKey(key.ID()) { - rolekeys = append(rolekeys, &keys.PublicKey{*key, key.ID()}) - } - } - return rolekeys, nil -} - -func (r *Repo) signedMeta(name string) (*data.Signed, error) { - b, ok := r.meta[name] - if !ok { - return nil, errors.ErrMissingMetadata{name} - } - s := &data.Signed{} - if err := json.Unmarshal(b, s); err != nil { - return nil, err - } - return s, nil -} - -func validManifest(name string) bool { - for _, m := range topLevelManifests { - if m == name { - return true - } - } - return false -} - -func (r *Repo) AddTarget(path string, custom json.RawMessage) error { - return r.AddTargets([]string{path}, custom) -} - -func (r *Repo) AddTargets(paths []string, custom json.RawMessage) error { - return r.AddTargetsWithExpires(paths, custom, data.DefaultExpires("targets")) -} - -func (r *Repo) AddTargetWithExpires(path string, custom json.RawMessage, expires time.Time) error { - return r.AddTargetsWithExpires([]string{path}, custom, expires) -} - -func (r *Repo) AddTargetsWithExpires(paths []string, custom json.RawMessage, expires time.Time) error { - if !validExpires(expires) { - return errors.ErrInvalidExpires{expires} - } - - t, err := r.targets() - if err != nil { - return err - } - normalizedPaths := make([]string, len(paths)) - for i, path := range paths { - normalizedPaths[i] = util.NormalizeTarget(path) - } - if err := r.local.WalkStagedTargets(normalizedPaths, func(path string, meta data.FileMeta) (err error) { - t.Targets[util.NormalizeTarget(path)] = meta - return nil - }); err != nil { - return err - } - t.Expires = expires.Round(time.Second) - t.Version++ - return r.setMeta("targets.json", t) -} - -func (r *Repo) RemoveTarget(path string) error { - return r.RemoveTargets([]string{path}) -} - -func (r *Repo) RemoveTargets(paths []string) error { - return r.RemoveTargetsWithExpires(paths, data.DefaultExpires("targets")) -} - -func (r *Repo) RemoveTargetWithExpires(path string, expires time.Time) error { - return r.RemoveTargetsWithExpires([]string{path}, expires) -} - -// If paths is empty, all targets will be removed. -func (r *Repo) RemoveTargetsWithExpires(paths []string, expires time.Time) error { - if !validExpires(expires) { - return errors.ErrInvalidExpires{expires} - } - - t, err := r.targets() - if err != nil { - return err - } - if len(paths) == 0 { - t.Targets = make(data.Files) - } else { - removed := false - for _, path := range paths { - path = util.NormalizeTarget(path) - if _, ok := t.Targets[path]; !ok { - continue - } - removed = true - delete(t.Targets, path) - } - if !removed { - return nil - } - } - t.Expires = expires.Round(time.Second) - t.Version++ - return r.setMeta("targets.json", t) -} - -func (r *Repo) Snapshot(t CompressionType) error { - return r.SnapshotWithExpires(t, data.DefaultExpires("snapshot")) -} - -func (r *Repo) SnapshotWithExpires(t CompressionType, expires time.Time) error { - if !validExpires(expires) { - return errors.ErrInvalidExpires{expires} - } - - snapshot, err := r.snapshot() - if err != nil { - return err - } - db, err := r.db() - if err != nil { - return err - } - // TODO: generate compressed manifests - for _, name := range snapshotManifests { - if err := r.verifySignature(name, db); err != nil { - return err - } - var err error - snapshot.Meta[name], err = r.fileMeta(name) - if err != nil { - return err - } - } - snapshot.Expires = expires.Round(time.Second) - snapshot.Version++ - return r.setMeta("snapshot.json", snapshot) -} - -func (r *Repo) Timestamp() error { - return r.TimestampWithExpires(data.DefaultExpires("timestamp")) -} - -func (r *Repo) TimestampWithExpires(expires time.Time) error { - if !validExpires(expires) { - return errors.ErrInvalidExpires{expires} - } - - db, err := r.db() - if err != nil { - return err - } - if err := r.verifySignature("snapshot.json", db); err != nil { - return err - } - timestamp, err := r.timestamp() - if err != nil { - return err - } - timestamp.Meta["snapshot.json"], err = r.fileMeta("snapshot.json") - if err != nil { - return err - } - timestamp.Expires = expires.Round(time.Second) - timestamp.Version++ - return r.setMeta("timestamp.json", timestamp) -} - -func (r *Repo) fileHashes() (map[string]data.Hashes, error) { - hashes := make(map[string]data.Hashes) - addHashes := func(name string, meta data.Files) { - if m, ok := meta[name]; ok { - hashes[name] = m.Hashes - } - } - timestamp, err := r.timestamp() - if err != nil { - return nil, err - } - snapshot, err := r.snapshot() - if err != nil { - return nil, err - } - addHashes("root.json", snapshot.Meta) - addHashes("targets.json", snapshot.Meta) - addHashes("snapshot.json", timestamp.Meta) - t, err := r.targets() - if err != nil { - return nil, err - } - for name, meta := range t.Targets { - hashes[path.Join("targets", name)] = meta.Hashes - } - return hashes, nil -} - -func (r *Repo) Commit() error { - // check we have all the metadata - for _, name := range topLevelManifests { - if _, ok := r.meta[name]; !ok { - return errors.ErrMissingMetadata{name} - } - } - - // check roles are valid - root, err := r.root() - if err != nil { - return err - } - for name, role := range root.Roles { - if len(role.KeyIDs) < role.Threshold { - return errors.ErrNotEnoughKeys{name, len(role.KeyIDs), role.Threshold} - } - } - - // verify hashes in snapshot.json are up to date - snapshot, err := r.snapshot() - if err != nil { - return err - } - for _, name := range snapshotManifests { - expected, ok := snapshot.Meta[name] - if !ok { - return fmt.Errorf("tuf: snapshot.json missing hash for %s", name) - } - actual, err := r.fileMeta(name) - if err != nil { - return err - } - if err := util.FileMetaEqual(actual, expected); err != nil { - return fmt.Errorf("tuf: invalid %s in snapshot.json: %s", name, err) - } - } - - // verify hashes in timestamp.json are up to date - timestamp, err := r.timestamp() - if err != nil { - return err - } - snapshotMeta, err := r.fileMeta("snapshot.json") - if err != nil { - return err - } - if err := util.FileMetaEqual(snapshotMeta, timestamp.Meta["snapshot.json"]); err != nil { - return fmt.Errorf("tuf: invalid snapshot.json in timestamp.json: %s", err) - } - - // verify all signatures are correct - db, err := r.db() - if err != nil { - return err - } - for _, name := range topLevelManifests { - if err := r.verifySignature(name, db); err != nil { - return err - } - } - - hashes, err := r.fileHashes() - if err != nil { - return err - } - return r.local.Commit(r.meta, root.ConsistentSnapshot, hashes) -} - -func (r *Repo) Clean() error { - return r.local.Clean() -} - -func (r *Repo) verifySignature(name string, db *keys.DB) error { - s, err := r.signedMeta(name) - if err != nil { - return err - } - role := strings.TrimSuffix(name, ".json") - if err := signed.Verify(s, role, 0, db); err != nil { - return errors.ErrInsufficientSignatures{name, err} - } - return nil -} - -func (r *Repo) fileMeta(name string) (data.FileMeta, error) { - b, ok := r.meta[name] - if !ok { - return data.FileMeta{}, errors.ErrMissingMetadata{name} - } - return util.GenerateFileMeta(bytes.NewReader(b), r.hashAlgorithms...) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/repo_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/repo_test.go deleted file mode 100644 index 0a80efff7c..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/repo_test.go +++ /dev/null @@ -1,936 +0,0 @@ -package tuf - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/agl/ed25519" - . "gopkg.in/check.v1" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/store" - // "github.com/endophage/go-tuf/encrypted" - tuferr "github.com/endophage/go-tuf/errors" - "github.com/endophage/go-tuf/signed" - "github.com/endophage/go-tuf/util" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -type RepoSuite struct{} - -var _ = Suite(&RepoSuite{}) - -func (RepoSuite) TestNewRepo(c *C) { - trust := signed.NewEd25519() - - meta := map[string]json.RawMessage{ - "root.json": []byte(`{ - "signed": { - "_type": "root", - "version": 1, - "expires": "2015-12-26T03:26:55.821520874Z", - "keys": {}, - "roles": {} - }, - "signatures": [] - }`), - "targets.json": []byte(`{ - "signed": { - "_type": "targets", - "version": 1, - "expires": "2015-03-26T03:26:55.82155686Z", - "targets": {} - }, - "signatures": [] - }`), - "snapshot.json": []byte(`{ - "signed": { - "_type": "snapshot", - "version": 1, - "expires": "2015-01-02T03:26:55.821585981Z", - "meta": {} - }, - "signatures": [] - }`), - "timestamp.json": []byte(`{ - "signed": { - "_type": "timestamp", - "version": 1, - "expires": "2014-12-27T03:26:55.821599702Z", - "meta": {} - }, - "signatures": [] - }`), - } - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore(db, "") - - for k, v := range meta { - local.SetMeta(k, v) - } - - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - root, err := r.root() - c.Assert(err, IsNil) - c.Assert(root.Type, Equals, "root") - c.Assert(root.Version, Equals, 1) - c.Assert(root.Keys, NotNil) - c.Assert(root.Keys, HasLen, 0) - - targets, err := r.targets() - c.Assert(err, IsNil) - c.Assert(targets.Type, Equals, "targets") - c.Assert(targets.Version, Equals, 1) - c.Assert(targets.Targets, NotNil) - c.Assert(targets.Targets, HasLen, 0) - - snapshot, err := r.snapshot() - c.Assert(err, IsNil) - c.Assert(snapshot.Type, Equals, "snapshot") - c.Assert(snapshot.Version, Equals, 1) - c.Assert(snapshot.Meta, NotNil) - c.Assert(snapshot.Meta, HasLen, 0) - - timestamp, err := r.timestamp() - c.Assert(err, IsNil) - c.Assert(timestamp.Type, Equals, "timestamp") - c.Assert(timestamp.Version, Equals, 1) - c.Assert(timestamp.Meta, NotNil) - c.Assert(timestamp.Meta, HasLen, 0) -} - -func (RepoSuite) TestInit(c *C) { - trust := signed.NewEd25519() - - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore( - db, - "", - //map[string][]byte{"/foo.txt": []byte("foo")}, - ) - local.AddBlob("/foo.txt", util.SampleMeta()) - - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - // Init() sets root.ConsistentSnapshot - for _, v := range []bool{true, false} { - c.Assert(r.Init(v), IsNil) - root, err := r.root() - c.Assert(err, IsNil) - c.Assert(root.ConsistentSnapshot, Equals, v) - } - - // Init() fails if targets have been added - c.Assert(r.AddTarget("foo.txt", nil), IsNil) - c.Assert(r.Init(true), Equals, tuferr.ErrInitNotAllowed) -} - -func genKey(c *C, r *Repo, role string) string { - id, err := r.GenKey(role) - c.Assert(err, IsNil) - return id -} - -func (RepoSuite) TestGenKey(c *C) { - trust := signed.NewEd25519() - - sqldb := util.GetSqliteDB() - defer util.FlushDB(sqldb) - local := store.DBStore(sqldb, "") - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - // generate a key for an unknown role - _, err = r.GenKey("foo") - c.Assert(err, Equals, tuferr.ErrInvalidRole{"foo"}) - - // generate a root key - id := genKey(c, r, "root") - - // check root metadata is correct - root, err := r.root() - c.Assert(err, IsNil) - c.Assert(root.Roles, NotNil) - c.Assert(root.Roles, HasLen, 1) - c.Assert(root.Keys, NotNil) - c.Assert(root.Keys, HasLen, 1) - rootRole, ok := root.Roles["root"] - if !ok { - c.Fatal("missing root role") - } - c.Assert(rootRole.KeyIDs, HasLen, 1) - keyID := rootRole.KeyIDs[0] - c.Assert(keyID, Equals, id) - k, ok := root.Keys[keyID] - if !ok { - c.Fatal("missing key") - } - c.Assert(k.ID(), Equals, keyID) - c.Assert(k.Value.Public, HasLen, ed25519.PublicKeySize) - //c.Assert(k.Value.Private, IsNil) - - // check root key + role are in db - db, err := r.db() - c.Assert(err, IsNil) - rootKey := db.GetKey(keyID) - c.Assert(rootKey, NotNil) - c.Assert(rootKey.ID, Equals, keyID) - role := db.GetRole("root") - c.Assert(role.KeyIDs, DeepEquals, []string{keyID}) - - // check the key was saved correctly - localKeys, err := local.GetKeys("root") - c.Assert(err, IsNil) - c.Assert(localKeys, HasLen, 1) - c.Assert(localKeys[0].ID(), Equals, keyID) - - // check RootKeys() is correct - rootKeys, err := r.RootKeys() - c.Assert(err, IsNil) - c.Assert(rootKeys, HasLen, 1) - c.Assert(rootKeys[0].ID(), Equals, rootKey.ID) - c.Assert(rootKeys[0].Value.Public, DeepEquals, rootKey.Key.Value.Public) - //c.Assert(rootKeys[0].Value.Private, IsNil) - - // generate two targets keys - genKey(c, r, "targets") - genKey(c, r, "targets") - - // check root metadata is correct - root, err = r.root() - c.Assert(err, IsNil) - c.Assert(root.Roles, HasLen, 2) - c.Assert(root.Keys, HasLen, 3) - targetsRole, ok := root.Roles["targets"] - if !ok { - c.Fatal("missing targets role") - } - c.Assert(targetsRole.KeyIDs, HasLen, 2) - targetKeyIDs := make([]string, 0, 2) - db, err = r.db() - c.Assert(err, IsNil) - for _, id := range targetsRole.KeyIDs { - targetKeyIDs = append(targetKeyIDs, id) - _, ok = root.Keys[id] - if !ok { - c.Fatal("missing key") - } - key := db.GetKey(id) - c.Assert(key, NotNil) - c.Assert(key.ID, Equals, id) - } - role = db.GetRole("targets") - c.Assert(role.KeyIDs, DeepEquals, targetKeyIDs) - - // check RootKeys() is unchanged - rootKeys, err = r.RootKeys() - c.Assert(err, IsNil) - c.Assert(rootKeys, HasLen, 1) - c.Assert(rootKeys[0].ID(), Equals, rootKey.ID) - - // check the keys were saved correctly - localKeys, err = local.GetKeys("targets") - c.Assert(err, IsNil) - c.Assert(localKeys, HasLen, 2) - for _, key := range localKeys { - found := false - for _, id := range targetsRole.KeyIDs { - if id == key.ID() { - found = true - } - } - if !found { - c.Fatal("missing key") - } - } - - // check root.json got staged - meta, err := local.GetMeta() - c.Assert(err, IsNil) - rootJSON, ok := meta["root.json"] - if !ok { - c.Fatal("missing root metadata") - } - s := &data.Signed{} - c.Assert(json.Unmarshal(rootJSON, s), IsNil) - stagedRoot := &data.Root{} - c.Assert(json.Unmarshal(s.Signed, stagedRoot), IsNil) - c.Assert(stagedRoot.Type, Equals, root.Type) - c.Assert(stagedRoot.Version, Equals, root.Version) - c.Assert(stagedRoot.Expires.UnixNano(), Equals, root.Expires.UnixNano()) - c.Assert(stagedRoot.Keys, DeepEquals, root.Keys) - c.Assert(stagedRoot.Roles, DeepEquals, root.Roles) -} - -func (RepoSuite) TestRevokeKey(c *C) { - trust := signed.NewEd25519() - - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore(db, "") - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - // revoking a key for an unknown role returns ErrInvalidRole - c.Assert(r.RevokeKey("foo", ""), DeepEquals, tuferr.ErrInvalidRole{"foo"}) - - // revoking a key which doesn't exist returns ErrKeyNotFound - c.Assert(r.RevokeKey("root", "nonexistent"), DeepEquals, tuferr.ErrKeyNotFound{"root", "nonexistent"}) - - // generate keys - genKey(c, r, "root") - genKey(c, r, "targets") - genKey(c, r, "targets") - genKey(c, r, "snapshot") - genKey(c, r, "timestamp") - root, err := r.root() - c.Assert(err, IsNil) - c.Assert(root.Roles, NotNil) - c.Assert(root.Roles, HasLen, 4) - c.Assert(root.Keys, NotNil) - c.Assert(root.Keys, HasLen, 5) - - // revoke a key - targetsRole, ok := root.Roles["targets"] - if !ok { - c.Fatal("missing targets role") - } - c.Assert(targetsRole.KeyIDs, HasLen, 2) - id := targetsRole.KeyIDs[0] - c.Assert(r.RevokeKey("targets", id), IsNil) - - // check root was updated - root, err = r.root() - c.Assert(err, IsNil) - c.Assert(root.Roles, NotNil) - c.Assert(root.Roles, HasLen, 4) - c.Assert(root.Keys, NotNil) - c.Assert(root.Keys, HasLen, 4) - targetsRole, ok = root.Roles["targets"] - if !ok { - c.Fatal("missing targets role") - } - c.Assert(targetsRole.KeyIDs, HasLen, 1) - c.Assert(targetsRole.KeyIDs[0], Not(Equals), id) -} - -func (RepoSuite) TestSign(c *C) { - trust := signed.NewEd25519() - - baseMeta := map[string]json.RawMessage{"root.json": []byte(`{"signed":{},"signatures":[]}`)} - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore(db, "") - local.SetMeta("root.json", baseMeta["root.json"]) - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - // signing with no keys returns ErrInsufficientKeys - c.Assert(r.Sign("root.json"), Equals, tuferr.ErrInsufficientKeys{"root.json"}) - - checkSigIDs := func(keyIDs ...string) { - meta, err := local.GetMeta() - if err != nil { - c.Fatal("failed to retrieve meta") - } - rootJSON, ok := meta["root.json"] - if !ok { - c.Fatal("missing root.json") - } - s := &data.Signed{} - c.Assert(json.Unmarshal(rootJSON, s), IsNil) - c.Assert(s.Signatures, HasLen, len(keyIDs)) - for i, id := range keyIDs { - c.Assert(s.Signatures[i].KeyID, Equals, id) - } - } - - // signing with an available key generates a signature - //key, err := signer.Create() - kID, err := r.GenKey("root") - c.Assert(err, IsNil) - //c.Assert(local.SaveKey("root", key.SerializePrivate()), IsNil) - c.Assert(r.Sign("root.json"), IsNil) - checkSigIDs(kID) - - // signing again does not generate a duplicate signature - c.Assert(r.Sign("root.json"), IsNil) - checkSigIDs(kID) - - // signing with a new available key generates another signature - //newKey, err := signer.Create() - newkID, err := r.GenKey("root") - c.Assert(err, IsNil) - //c.Assert(local.SaveKey("root", newKey.SerializePrivate()), IsNil) - c.Assert(r.Sign("root.json"), IsNil) - checkSigIDs(kID, newkID) -} - -func (RepoSuite) TestCommit(c *C) { - trust := signed.NewEd25519() - - //files := map[string][]byte{"/foo.txt": []byte("foo"), "/bar.txt": []byte("bar")} - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore(db, "") - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - // commit without root.json - c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"root.json"}) - - // commit without targets.json - genKey(c, r, "root") - c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"targets.json"}) - - // commit without snapshot.json - genKey(c, r, "targets") - local.AddBlob("/foo.txt", util.SampleMeta()) - c.Assert(r.AddTarget("foo.txt", nil), IsNil) - c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"snapshot.json"}) - - // commit without timestamp.json - genKey(c, r, "snapshot") - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Commit(), DeepEquals, tuferr.ErrMissingMetadata{"timestamp.json"}) - - // commit with timestamp.json but no timestamp key - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), DeepEquals, tuferr.ErrInsufficientSignatures{"timestamp.json", signed.ErrNoSignatures}) - - // commit success - genKey(c, r, "timestamp") - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - - // commit with an invalid root hash in snapshot.json due to new key creation - genKey(c, r, "targets") - c.Assert(r.Sign("targets.json"), IsNil) - c.Assert(r.Commit(), DeepEquals, errors.New("tuf: invalid root.json in snapshot.json: wrong length")) - - // commit with an invalid targets hash in snapshot.json - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - local.AddBlob("/bar.txt", util.SampleMeta()) - c.Assert(r.AddTarget("bar.txt", nil), IsNil) - c.Assert(r.Commit(), DeepEquals, errors.New("tuf: invalid targets.json in snapshot.json: wrong length")) - - // commit with an invalid timestamp - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - // TODO: Change this test once Snapshot() supports compression and we - // can guarantee the error will end in "wrong length" by - // compressing a file and thus changing the size of snapshot.json - err = r.Commit() - c.Assert(err, NotNil) - c.Assert(err.Error()[0:44], Equals, "tuf: invalid snapshot.json in timestamp.json") - - // commit with a role's threshold greater than number of keys - root, err := r.root() - c.Assert(err, IsNil) - role, ok := root.Roles["timestamp"] - if !ok { - c.Fatal("missing timestamp role") - } - c.Assert(role.KeyIDs, HasLen, 1) - c.Assert(role.Threshold, Equals, 1) - c.Assert(r.RevokeKey("timestamp", role.KeyIDs[0]), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), DeepEquals, tuferr.ErrNotEnoughKeys{"timestamp", 0, 1}) -} - -type tmpDir struct { - path string - c *C -} - -func newTmpDir(c *C) *tmpDir { - return &tmpDir{path: c.MkDir(), c: c} -} - -func (t *tmpDir) assertExists(path string) { - if _, err := os.Stat(filepath.Join(t.path, path)); os.IsNotExist(err) { - t.c.Fatalf("expected path to exist but it doesn't: %s", path) - } -} - -func (t *tmpDir) assertNotExist(path string) { - if _, err := os.Stat(filepath.Join(t.path, path)); !os.IsNotExist(err) { - t.c.Fatalf("expected path to not exist but it does: %s", path) - } -} - -func (t *tmpDir) assertHashedFilesExist(path string, hashes data.Hashes) { - t.c.Assert(len(hashes) > 0, Equals, true) - for _, path := range util.HashedPaths(path, hashes) { - t.assertExists(path) - } -} - -func (t *tmpDir) assertHashedFilesNotExist(path string, hashes data.Hashes) { - t.c.Assert(len(hashes) > 0, Equals, true) - for _, path := range util.HashedPaths(path, hashes) { - t.assertNotExist(path) - } -} - -func (t *tmpDir) assertEmpty(dir string) { - path := filepath.Join(t.path, dir) - f, err := os.Stat(path) - if os.IsNotExist(err) { - t.c.Fatalf("expected dir to exist but it doesn't: %s", dir) - } - t.c.Assert(err, IsNil) - t.c.Assert(f.IsDir(), Equals, true) - entries, err := ioutil.ReadDir(path) - t.c.Assert(err, IsNil) - // check that all (if any) entries are also empty - for _, e := range entries { - t.assertEmpty(filepath.Join(dir, e.Name())) - } -} - -func (t *tmpDir) assertFileContent(path, content string) { - actual := t.readFile(path) - t.c.Assert(string(actual), Equals, content) -} - -func (t *tmpDir) stagedTargetPath(path string) string { - return filepath.Join(t.path, "staged", "targets", path) -} - -func (t *tmpDir) writeStagedTarget(path, data string) { - path = t.stagedTargetPath(path) - t.c.Assert(os.MkdirAll(filepath.Dir(path), 0755), IsNil) - t.c.Assert(ioutil.WriteFile(path, []byte(data), 0644), IsNil) -} - -func (t *tmpDir) readFile(path string) []byte { - t.assertExists(path) - data, err := ioutil.ReadFile(filepath.Join(t.path, path)) - t.c.Assert(err, IsNil) - return data -} - -func (RepoSuite) TestCommitFileSystem(c *C) { - trust := signed.NewEd25519() - tmp := newTmpDir(c) - local := store.FileSystemStore(tmp.path, nil) - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - // don't use consistent snapshots to make the checks simpler - c.Assert(r.Init(false), IsNil) - - // generating keys should stage root.json and create repo dirs - genKey(c, r, "root") - genKey(c, r, "targets") - genKey(c, r, "snapshot") - genKey(c, r, "timestamp") - tmp.assertExists("staged/root.json") - tmp.assertEmpty("repository") - tmp.assertEmpty("staged/targets") - - // adding a non-existent file fails - c.Assert(r.AddTarget("foo.txt", nil), Equals, tuferr.ErrFileNotFound{tmp.stagedTargetPath("foo.txt")}) - tmp.assertEmpty("repository") - - // adding a file stages targets.json - tmp.writeStagedTarget("foo.txt", "foo") - c.Assert(r.AddTarget("foo.txt", nil), IsNil) - tmp.assertExists("staged/targets.json") - tmp.assertEmpty("repository") - t, err := r.targets() - c.Assert(err, IsNil) - c.Assert(t.Targets, HasLen, 1) - if _, ok := t.Targets["/foo.txt"]; !ok { - c.Fatal("missing target file: /foo.txt") - } - - // Snapshot() stages snapshot.json - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - tmp.assertExists("staged/snapshot.json") - tmp.assertEmpty("repository") - - // Timestamp() stages timestamp.json - c.Assert(r.Timestamp(), IsNil) - tmp.assertExists("staged/timestamp.json") - tmp.assertEmpty("repository") - - // committing moves files from staged -> repository - c.Assert(r.Commit(), IsNil) - tmp.assertExists("repository/root.json") - tmp.assertExists("repository/targets.json") - tmp.assertExists("repository/snapshot.json") - tmp.assertExists("repository/timestamp.json") - tmp.assertFileContent("repository/targets/foo.txt", "foo") - tmp.assertEmpty("staged/targets") - tmp.assertEmpty("staged") - - // adding and committing another file moves it into repository/targets - tmp.writeStagedTarget("path/to/bar.txt", "bar") - c.Assert(r.AddTarget("path/to/bar.txt", nil), IsNil) - tmp.assertExists("staged/targets.json") - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - tmp.assertFileContent("repository/targets/foo.txt", "foo") - tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar") - tmp.assertEmpty("staged/targets") - tmp.assertEmpty("staged") - - // removing and committing a file removes it from repository/targets - c.Assert(r.RemoveTarget("foo.txt"), IsNil) - tmp.assertExists("staged/targets.json") - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - tmp.assertNotExist("repository/targets/foo.txt") - tmp.assertFileContent("repository/targets/path/to/bar.txt", "bar") - tmp.assertEmpty("staged/targets") - tmp.assertEmpty("staged") -} - -func (RepoSuite) TestConsistentSnapshot(c *C) { - trust := signed.NewEd25519() - tmp := newTmpDir(c) - local := store.FileSystemStore(tmp.path, nil) - r, err := NewRepo(trust, local, "sha512", "sha256") - c.Assert(err, IsNil) - - genKey(c, r, "root") - genKey(c, r, "targets") - genKey(c, r, "snapshot") - genKey(c, r, "timestamp") - tmp.writeStagedTarget("foo.txt", "foo") - c.Assert(r.AddTarget("foo.txt", nil), IsNil) - tmp.writeStagedTarget("dir/bar.txt", "bar") - c.Assert(r.AddTarget("dir/bar.txt", nil), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - - hashes, err := r.fileHashes() - c.Assert(err, IsNil) - - // root.json, targets.json and snapshot.json should exist at both hashed and unhashed paths - for _, path := range []string{"root.json", "targets.json", "snapshot.json"} { - repoPath := filepath.Join("repository", path) - tmp.assertHashedFilesExist(repoPath, hashes[path]) - tmp.assertExists(repoPath) - } - - // target files should exist at hashed but not unhashed paths - for _, path := range []string{"targets/foo.txt", "targets/dir/bar.txt"} { - repoPath := filepath.Join("repository", path) - tmp.assertHashedFilesExist(repoPath, hashes[path]) - tmp.assertNotExist(repoPath) - } - - // timestamp.json should exist at an unhashed path (it doesn't have a hash) - tmp.assertExists("repository/timestamp.json") - - // removing a file should remove the hashed files - c.Assert(r.RemoveTarget("foo.txt"), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - tmp.assertHashedFilesNotExist("repository/targets/foo.txt", hashes["targets/foo.txt"]) - tmp.assertNotExist("repository/targets/foo.txt") - - // targets should be returned by new repo - newRepo, err := NewRepo(trust, local, "sha512", "sha256") - c.Assert(err, IsNil) - t, err := newRepo.targets() - c.Assert(err, IsNil) - c.Assert(t.Targets, HasLen, 1) - if _, ok := t.Targets["/dir/bar.txt"]; !ok { - c.Fatal("missing targets file: dir/bar.txt") - } -} - -func (RepoSuite) TestExpiresAndVersion(c *C) { - trust := signed.NewEd25519() - - //files := map[string][]byte{"/foo.txt": []byte("foo")} - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore(db, "") - r, err := NewRepo(trust, local, "sha256") - c.Assert(err, IsNil) - - past := time.Now().Add(-1 * time.Second) - _, genKeyErr := r.GenKeyWithExpires("root", past) - for _, err := range []error{ - genKeyErr, - r.AddTargetWithExpires("foo.txt", nil, past), - r.RemoveTargetWithExpires("foo.txt", past), - r.SnapshotWithExpires(CompressionTypeNone, past), - r.TimestampWithExpires(past), - } { - c.Assert(err, Equals, tuferr.ErrInvalidExpires{past}) - } - - genKey(c, r, "root") - root, err := r.root() - c.Assert(err, IsNil) - c.Assert(root.Version, Equals, 1) - - expires := time.Now().Add(24 * time.Hour) - _, err = r.GenKeyWithExpires("root", expires) - c.Assert(err, IsNil) - root, err = r.root() - c.Assert(err, IsNil) - c.Assert(root.Expires.Unix(), DeepEquals, expires.Round(time.Second).Unix()) - c.Assert(root.Version, Equals, 2) - - expires = time.Now().Add(12 * time.Hour) - role, ok := root.Roles["root"] - if !ok { - c.Fatal("missing root role") - } - c.Assert(role.KeyIDs, HasLen, 2) - c.Assert(r.RevokeKeyWithExpires("root", role.KeyIDs[0], expires), IsNil) - root, err = r.root() - c.Assert(err, IsNil) - c.Assert(root.Expires.Unix(), DeepEquals, expires.Round(time.Second).Unix()) - c.Assert(root.Version, Equals, 3) - - expires = time.Now().Add(6 * time.Hour) - genKey(c, r, "targets") - local.AddBlob("/foo.txt", util.SampleMeta()) - c.Assert(r.AddTargetWithExpires("foo.txt", nil, expires), IsNil) - targets, err := r.targets() - c.Assert(err, IsNil) - c.Assert(targets.Expires.Unix(), Equals, expires.Round(time.Second).Unix()) - c.Assert(targets.Version, Equals, 1) - - expires = time.Now().Add(2 * time.Hour) - c.Assert(r.RemoveTargetWithExpires("foo.txt", expires), IsNil) - targets, err = r.targets() - c.Assert(err, IsNil) - c.Assert(targets.Expires.Unix(), Equals, expires.Round(time.Second).Unix()) - c.Assert(targets.Version, Equals, 2) - - expires = time.Now().Add(time.Hour) - genKey(c, r, "snapshot") - c.Assert(r.SnapshotWithExpires(CompressionTypeNone, expires), IsNil) - snapshot, err := r.snapshot() - c.Assert(err, IsNil) - c.Assert(snapshot.Expires.Unix(), Equals, expires.Round(time.Second).Unix()) - c.Assert(snapshot.Version, Equals, 1) - - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - snapshot, err = r.snapshot() - c.Assert(err, IsNil) - c.Assert(snapshot.Version, Equals, 2) - - expires = time.Now().Add(10 * time.Minute) - genKey(c, r, "timestamp") - c.Assert(r.TimestampWithExpires(expires), IsNil) - timestamp, err := r.timestamp() - c.Assert(err, IsNil) - c.Assert(timestamp.Expires.Unix(), Equals, expires.Round(time.Second).Unix()) - c.Assert(timestamp.Version, Equals, 1) - - c.Assert(r.Timestamp(), IsNil) - timestamp, err = r.timestamp() - c.Assert(err, IsNil) - c.Assert(timestamp.Version, Equals, 2) -} - -func (RepoSuite) TestHashAlgorithm(c *C) { - trust := signed.NewEd25519() - - //files := map[string][]byte{"/foo.txt": []byte("foo")} - db := util.GetSqliteDB() - defer util.FlushDB(db) - local := store.DBStore(db, "docker.io/testImage") - type hashTest struct { - args []string - expected []string - } - for _, test := range []hashTest{ - {args: []string{}, expected: []string{"sha512"}}, - {args: []string{"sha256"}}, - {args: []string{"sha512", "sha256"}}, - } { - // generate metadata with specific hash functions - r, err := NewRepo(trust, local, test.args...) - c.Assert(err, IsNil) - genKey(c, r, "root") - genKey(c, r, "targets") - genKey(c, r, "snapshot") - local.AddBlob("/foo.txt", util.SampleMeta()) - c.Assert(r.AddTarget("foo.txt", nil), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - - // check metadata has correct hash functions - if test.expected == nil { - test.expected = test.args - } - targets, err := r.targets() - c.Assert(err, IsNil) - snapshot, err := r.snapshot() - c.Assert(err, IsNil) - timestamp, err := r.timestamp() - c.Assert(err, IsNil) - for name, file := range map[string]data.FileMeta{ - "foo.txt": targets.Targets["/foo.txt"], - "root.json": snapshot.Meta["root.json"], - "targets.json": snapshot.Meta["targets.json"], - "snapshot.json": timestamp.Meta["snapshot.json"], - } { - for _, hashAlgorithm := range test.expected { - if _, ok := file.Hashes[hashAlgorithm]; !ok { - c.Fatalf("expected %s hash to contain hash func %s, got %s", name, hashAlgorithm, file.HashAlgorithms()) - } - } - } - } -} - -func testPassphraseFunc(p []byte) util.PassphraseFunc { - return func(string, bool) ([]byte, error) { return p, nil } -} - -//func (RepoSuite) TestKeyPersistence(c *C) { -// tmp := newTmpDir(c) -// passphrase := []byte("s3cr3t") -// store := FileSystemStore(tmp.path, testPassphraseFunc(passphrase)) -// -// assertEqual := func(actual []*data.Key, expected []*keys.Key) { -// c.Assert(actual, HasLen, len(expected)) -// for i, key := range expected { -// c.Assert(actual[i].ID(), Equals, key.ID) -// c.Assert(actual[i].Value.Public, DeepEquals, data.HexBytes(key.Public[:])) -// c.Assert(actual[i].Value.Private, DeepEquals, data.HexBytes(key.Private[:])) -// } -// } -// -// assertKeys := func(role string, enc bool, expected []*keys.Key) { -// keysJSON := tmp.readFile("keys/" + role + ".json") -// pk := &persistedKeys{} -// c.Assert(json.Unmarshal(keysJSON, pk), IsNil) -// -// // check the persisted keys are correct -// var actual []*data.Key -// if enc { -// c.Assert(pk.Encrypted, Equals, true) -// decrypted, err := encrypted.Decrypt(pk.Data, passphrase) -// c.Assert(err, IsNil) -// c.Assert(json.Unmarshal(decrypted, &actual), IsNil) -// } else { -// c.Assert(pk.Encrypted, Equals, false) -// c.Assert(json.Unmarshal(pk.Data, &actual), IsNil) -// } -// assertEqual(actual, expected) -// -// // check GetKeys is correct -// actual, err := store.GetKeys(role) -// c.Assert(err, IsNil) -// assertEqual(actual, expected) -// } -// -// // save a key and check it gets encrypted -// key, err := keys.NewKey() -// c.Assert(err, IsNil) -// c.Assert(store.SaveKey("root", key.SerializePrivate()), IsNil) -// assertKeys("root", true, []*keys.Key{key}) -// -// // save another key and check it gets added to the existing keys -// newKey, err := keys.NewKey() -// c.Assert(err, IsNil) -// c.Assert(store.SaveKey("root", newKey.SerializePrivate()), IsNil) -// assertKeys("root", true, []*keys.Key{key, newKey}) -// -// // check saving a key to an encrypted file without a passphrase fails -// insecureStore := FileSystemStore(tmp.path, nil) -// key, err = keys.NewKey() -// c.Assert(err, IsNil) -// c.Assert(insecureStore.SaveKey("root", key.SerializePrivate()), Equals, ErrPassphraseRequired{"root"}) -// -// // save a key to an insecure store and check it is not encrypted -// key, err = keys.NewKey() -// c.Assert(err, IsNil) -// c.Assert(insecureStore.SaveKey("targets", key.SerializePrivate()), IsNil) -// assertKeys("targets", false, []*keys.Key{key}) -//} - -func (RepoSuite) TestManageMultipleTargets(c *C) { - trust := signed.NewEd25519() - tmp := newTmpDir(c) - local := store.FileSystemStore(tmp.path, nil) - r, err := NewRepo(trust, local) - c.Assert(err, IsNil) - // don't use consistent snapshots to make the checks simpler - c.Assert(r.Init(false), IsNil) - genKey(c, r, "root") - genKey(c, r, "targets") - genKey(c, r, "snapshot") - genKey(c, r, "timestamp") - - assertRepoTargets := func(paths ...string) { - t, err := r.targets() - c.Assert(err, IsNil) - for _, path := range paths { - if _, ok := t.Targets[path]; !ok { - c.Fatalf("missing target file: %s", path) - } - } - } - - // adding and committing multiple files moves correct targets from staged -> repository - tmp.writeStagedTarget("foo.txt", "foo") - tmp.writeStagedTarget("bar.txt", "bar") - c.Assert(r.AddTargets([]string{"foo.txt", "bar.txt"}, nil), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - assertRepoTargets("/foo.txt", "/bar.txt") - tmp.assertExists("repository/targets/foo.txt") - tmp.assertExists("repository/targets/bar.txt") - - // adding all targets moves them all from staged -> repository - count := 10 - files := make([]string, count) - for i := 0; i < count; i++ { - files[i] = fmt.Sprintf("/file%d.txt", i) - tmp.writeStagedTarget(files[i], "data") - } - c.Assert(r.AddTargets(nil, nil), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - tmp.assertExists("repository/targets/foo.txt") - tmp.assertExists("repository/targets/bar.txt") - assertRepoTargets(files...) - for _, file := range files { - tmp.assertExists("repository/targets/" + file) - } - tmp.assertEmpty("staged/targets") - tmp.assertEmpty("staged") - - // removing all targets removes them from the repository and targets.json - c.Assert(r.RemoveTargets(nil), IsNil) - c.Assert(r.Snapshot(CompressionTypeNone), IsNil) - c.Assert(r.Timestamp(), IsNil) - c.Assert(r.Commit(), IsNil) - tmp.assertEmpty("repository/targets") - t, err := r.targets() - c.Assert(err, IsNil) - c.Assert(t.Targets, HasLen, 0) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/interface.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/interface.go deleted file mode 100644 index 1b69e538a3..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/interface.go +++ /dev/null @@ -1,20 +0,0 @@ -package signed - -import ( - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" -) - -type SigningService interface { - Sign(keyIDs []string, data []byte) ([]data.Signature, error) -} - -type KeyService interface { - Create() (*keys.PublicKey, error) - PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error) -} - -type TrustService interface { - SigningService - KeyService -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore.go deleted file mode 100644 index f0ebd262e9..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore.go +++ /dev/null @@ -1,265 +0,0 @@ -package store - -import ( - "database/sql" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "os" - "path" - "strings" - - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/util" -) - -const ( - tufLoc string = "/tmp/tuf" -) - -// implements LocalStore -type dbStore struct { - db sql.DB - imageName string -} - -// DBStore takes a database connection and the QDN of the image -func DBStore(db *sql.DB, imageName string) *dbStore { - store := dbStore{ - db: *db, - imageName: imageName, - } - - return &store -} - -// GetMeta loads existing TUF metadata files -func (dbs *dbStore) GetMeta() (map[string]json.RawMessage, error) { - metadataDir := path.Join(tufLoc, dbs.imageName) - var err error - meta := make(map[string]json.RawMessage) - files, err := ioutil.ReadDir(metadataDir) - if err != nil { - if _, ok := err.(*os.PathError); ok { - return meta, nil - } - return nil, err - } - for _, file := range files { - if strings.HasSuffix(file.Name(), ".json") { - data, err := dbs.readFile(file.Name()) - if err != nil { - continue - } - meta[file.Name()] = json.RawMessage(data) - } - } - return meta, err -} - -// SetMeta writes individual TUF metadata files -func (dbs *dbStore) SetMeta(name string, meta json.RawMessage) error { - return dbs.writeFile(name, meta) -} - -// WalkStagedTargets walks all targets in scope -func (dbs *dbStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error { - if len(paths) == 0 { - files := dbs.loadTargets("") - for path, meta := range files { - if err := targetsFn(path, meta); err != nil { - return err - } - } - return nil - } - - for _, path := range paths { - files := dbs.loadTargets(path) - meta, ok := files[path] - if !ok { - return fmt.Errorf("File Not Found") - } - if err := targetsFn(path, meta); err != nil { - return err - } - } - return nil -} - -// Commit writes a set of consistent (possibly) TUF metadata files -func (dbs *dbStore) Commit(metafiles map[string]json.RawMessage, consistent bool, hashes map[string]data.Hashes) error { - // TODO (endophage): write meta files to cache - return nil - -} - -// GetKeys returns private keys -func (dbs *dbStore) GetKeys(role string) ([]*data.Key, error) { - keys := []*data.Key{} - var r *sql.Rows - var err error - sql := "SELECT `key` FROM `keys` WHERE `role` = ? AND `namespace` = ?;" - tx, err := dbs.db.Begin() - defer tx.Rollback() - r, err = tx.Query(sql, role, dbs.imageName) - if err != nil { - return nil, err - } - defer r.Close() - for r.Next() { - var jsonStr string - key := data.Key{} - r.Scan(&jsonStr) - err := json.Unmarshal([]byte(jsonStr), &key) - if err != nil { - return nil, err - } - keys = append(keys, &key) - } - return keys, nil -} - -// SaveKey saves a new private key -func (dbs *dbStore) SaveKey(role string, key *data.Key) error { - jsonBytes, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("Could not JSON Marshal Key") - } - tx, err := dbs.db.Begin() - if err != nil { - fmt.Println(err) - return err - } - _, err = tx.Exec("INSERT INTO `keys` (`namespace`, `role`, `key`) VALUES (?,?,?);", dbs.imageName, role, string(jsonBytes)) - tx.Commit() - return err -} - -// Clean removes staged targets -func (dbs *dbStore) Clean() error { - // TODO (endophage): purge stale items from db? May just/also need a remove method - return nil -} - -// AddBlob adds an object to the store -func (dbs *dbStore) AddBlob(path string, meta data.FileMeta) { - path = util.NormalizeTarget(path) - jsonbytes := []byte{} - if meta.Custom != nil { - jsonbytes, _ = meta.Custom.MarshalJSON() - } - - tx, err := dbs.db.Begin() - if err != nil { - fmt.Println(err) - return - } - _, err = tx.Exec("INSERT OR REPLACE INTO `filemeta` VALUES (?,?,?,?);", dbs.imageName, path, meta.Length, jsonbytes) - if err != nil { - fmt.Println(err) - } - tx.Commit() - dbs.addBlobHashes(path, meta.Hashes) -} - -func (dbs *dbStore) addBlobHashes(path string, hashes data.Hashes) { - tx, err := dbs.db.Begin() - if err != nil { - fmt.Println(err) - } - for alg, hash := range hashes { - _, err := tx.Exec("INSERT OR REPLACE INTO `filehashes` VALUES (?,?,?,?);", dbs.imageName, path, alg, hex.EncodeToString(hash)) - if err != nil { - fmt.Println(err) - } - } - tx.Commit() -} - -// RemoveBlob removes an object from the store -func (dbs *dbStore) RemoveBlob(path string) error { - tx, err := dbs.db.Begin() - if err != nil { - fmt.Println(err) - return err - } - _, err = tx.Exec("DELETE FROM `filemeta` WHERE `path`=? AND `namespace`=?", path, dbs.imageName) - if err == nil { - tx.Commit() - } else { - tx.Rollback() - } - return err -} - -func (dbs *dbStore) loadTargets(path string) map[string]data.FileMeta { - var err error - var r *sql.Rows - tx, err := dbs.db.Begin() - defer tx.Rollback() - files := make(map[string]data.FileMeta) - sql := "SELECT `filemeta`.`path`, `size`, `alg`, `hash`, `custom` FROM `filemeta` JOIN `filehashes` ON `filemeta`.`path` = `filehashes`.`path` AND `filemeta`.`namespace` = `filehashes`.`namespace` WHERE `filemeta`.`namespace`=?" - if path != "" { - sql = fmt.Sprintf("%s %s", sql, "AND `filemeta`.`path`=?") - r, err = tx.Query(sql, dbs.imageName, path) - } else { - r, err = tx.Query(sql, dbs.imageName) - } - if err != nil { - return files - } - defer r.Close() - for r.Next() { - var absPath, alg, hash string - var size int64 - var custom json.RawMessage - r.Scan(&absPath, &size, &alg, &hash, &custom) - hashBytes, err := hex.DecodeString(hash) - if err != nil { - // We're going to skip items with unparseable hashes as they - // won't be valid in the targets.json - fmt.Println("Hash was not stored in hex as expected") - continue - } - if file, ok := files[absPath]; ok { - file.Hashes[alg] = hashBytes - } else { - file = data.FileMeta{ - Length: size, - Hashes: data.Hashes{ - alg: hashBytes, - }, - } - if custom != nil { - file.Custom = &custom - } - files[absPath] = file - } - } - return files -} - -func (dbs *dbStore) writeFile(name string, content []byte) error { - fullPath := path.Join(tufLoc, dbs.imageName, name) - dirPath := path.Dir(fullPath) - err := os.MkdirAll(dirPath, 0744) - if err != nil { - log.Printf("error creating directory path to TUF cache") - return err - } - - err = ioutil.WriteFile(fullPath, content, 0744) - if err != nil { - log.Printf("Error writing file") - } - return err -} - -func (dbs *dbStore) readFile(name string) ([]byte, error) { - fullPath := path.Join(tufLoc, dbs.imageName, name) - content, err := ioutil.ReadFile(fullPath) - return content, err -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore_test.go deleted file mode 100644 index e098c81444..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/dbstore_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package store - -import ( - // "fmt" - "testing" - - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/util" -) - -// TestDBStore just ensures we can initialize an empty store. -// Nothing to test, just ensure no crashes :-) -func TestDBStore(t *testing.T) { - db := util.GetSqliteDB() - defer util.FlushDB(db) - _ = DBStore(db, "") -} - -func TestLoadFiles(t *testing.T) { - db := util.GetSqliteDB() - defer util.FlushDB(db) - store := DBStore(db, "docker.io/testImage") - testmeta := util.SampleMeta() - store.AddBlob("/foo.txt", testmeta) - - called := false - check := func(path string, meta data.FileMeta) error { - if called { - t.Fatal("Store only has one item but check called > once.") - } else { - called = true - } - - if path != "/foo.txt" { - t.Fatal("Path is incorrect", path) - } - - if meta.Length != testmeta.Length { - t.Fatal("Length is incorrect") - } - - if len(meta.Hashes) != len(testmeta.Hashes) { - t.Fatal("Hashes map has been modified") - } - - return nil - } - store.WalkStagedTargets([]string{}, check) - if !called { - t.Fatal("Walk func never called") - } -} - -func TestAddBlob(t *testing.T) { - db := util.GetSqliteDB() - defer util.FlushDB(db) - store := DBStore(db, "docker.io/testImage") - testmeta := util.SampleMeta() - store.AddBlob("/foo.txt", testmeta) - - called := false - check := func(path string, meta data.FileMeta) error { - if called { - t.Fatal("Store only has one item but check called > once.") - } else { - called = true - } - - if path != "/foo.txt" { - t.Fatal("Path is incorrect") - } - - if meta.Length != 1 { - t.Fatal("Length is incorrect") - } - - sha256, ok256 := meta.Hashes["sha256"] - sha512, ok512 := meta.Hashes["sha512"] - if len(meta.Hashes) != 2 || !ok256 || !ok512 { - t.Fatal("Hashes map has been modified") - } - - hash := data.HexBytes{0x01, 0x02} - if sha256[0] != hash[0] || sha256[1] != hash[1] { - t.Fatal("SHA256 has been modified") - } - hash = data.HexBytes{0x03, 0x04} - if sha512[0] != hash[0] || sha512[1] != hash[1] { - t.Fatal("SHA512 has been modified") - } - return nil - } - - store.WalkStagedTargets([]string{}, check) - - if !called { - t.Fatal("Walk func never called") - } -} - -func TestRemoveBlob(t *testing.T) { - testPath := "/foo.txt" - db := util.GetSqliteDB() - defer util.FlushDB(db) - store := DBStore(db, "docker.io/testImage") - meta := util.SampleMeta() - - store.AddBlob(testPath, meta) - - called := false - check := func(path string, meta data.FileMeta) error { - called = true - return nil - } - - store.RemoveBlob(testPath) - - store.WalkStagedTargets([]string{}, check) - - if called { - t.Fatal("Walk func called on empty db") - } - -} - -func TestLoadFilesWithPath(t *testing.T) { - db := util.GetSqliteDB() - defer util.FlushDB(db) - store := DBStore(db, "docker.io/testImage") - meta := util.SampleMeta() - - store.AddBlob("/foo.txt", meta) - store.AddBlob("/bar.txt", meta) - - called := false - check := func(path string, meta data.FileMeta) error { - if called { - t.Fatal("Store only has one item but check called > once.") - } else { - called = true - } - - if path != "/foo.txt" { - t.Fatal("Path is incorrect") - } - - return nil - } - - store.WalkStagedTargets([]string{"/foo.txt"}, check) - - if !called { - t.Fatal("Walk func never called") - } -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/filestore.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/store/filestore.go deleted file mode 100644 index 961f1fc7b1..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/filestore.go +++ /dev/null @@ -1,355 +0,0 @@ -package store - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/encrypted" - "github.com/endophage/go-tuf/errors" - "github.com/endophage/go-tuf/util" -) - -// topLevelManifests determines the order signatures are verified when committing. -var topLevelManifests = []string{ - "root.json", - "targets.json", - "snapshot.json", - "timestamp.json", -} - -type persistedKeys struct { - Encrypted bool `json:"encrypted"` - Data json.RawMessage `json:"data"` -} - -func FileSystemStore(dir string, p util.PassphraseFunc) LocalStore { - return &fileSystemStore{ - dir: dir, - passphraseFunc: p, - keys: make(map[string][]*data.Key), - } -} - -type fileSystemStore struct { - dir string - passphraseFunc util.PassphraseFunc - - // keys is a cache of persisted keys to avoid decrypting multiple times - keys map[string][]*data.Key -} - -func (f *fileSystemStore) repoDir() string { - return filepath.Join(f.dir, "repository") -} - -func (f *fileSystemStore) stagedDir() string { - return filepath.Join(f.dir, "staged") -} - -func (f *fileSystemStore) GetMeta() (map[string]json.RawMessage, error) { - meta := make(map[string]json.RawMessage) - var err error - notExists := func(path string) bool { - _, err := os.Stat(path) - return os.IsNotExist(err) - } - for _, name := range topLevelManifests { - path := filepath.Join(f.stagedDir(), name) - if notExists(path) { - path = filepath.Join(f.repoDir(), name) - if notExists(path) { - continue - } - } - meta[name], err = ioutil.ReadFile(path) - if err != nil { - return nil, err - } - } - return meta, nil -} - -func (f *fileSystemStore) SetMeta(name string, meta json.RawMessage) error { - if err := f.createDirs(); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(f.stagedDir(), name), meta, 0644); err != nil { - return err - } - return nil -} - -func (f *fileSystemStore) createDirs() error { - for _, dir := range []string{"keys", "repository", "staged/targets"} { - if err := os.MkdirAll(filepath.Join(f.dir, dir), 0755); err != nil { - return err - } - } - return nil -} - -func (f *fileSystemStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error { - if len(paths) == 0 { - walkFunc := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() || !info.Mode().IsRegular() { - return nil - } - rel, err := filepath.Rel(filepath.Join(f.stagedDir(), "targets"), path) - if err != nil { - return err - } - file, err := os.Open(path) - if err != nil { - return err - } - defer file.Close() - meta, err := util.GenerateFileMeta(file, "sha256") - if err != nil { - return err - } - return targetsFn(rel, meta) - } - return filepath.Walk(filepath.Join(f.stagedDir(), "targets"), walkFunc) - } - - // check all the files exist before processing any files - for _, path := range paths { - realPath := filepath.Join(f.stagedDir(), "targets", path) - if _, err := os.Stat(realPath); err != nil { - if os.IsNotExist(err) { - return errors.ErrFileNotFound{realPath} - } - return err - } - } - - for _, path := range paths { - realPath := filepath.Join(f.stagedDir(), "targets", path) - file, err := os.Open(realPath) - if err != nil { - if os.IsNotExist(err) { - return errors.ErrFileNotFound{realPath} - } - return err - } - meta, err := util.GenerateFileMeta(file, "sha256") - if err != nil { - return err - } - err = targetsFn(path, meta) - file.Close() - if err != nil { - return err - } - } - return nil -} - -func (f *fileSystemStore) createRepoFile(path string) (*os.File, error) { - dst := filepath.Join(f.repoDir(), path) - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return nil, err - } - return os.Create(dst) -} - -func (f *fileSystemStore) Commit(meta map[string]json.RawMessage, consistentSnapshot bool, hashes map[string]data.Hashes) error { - isTarget := func(path string) bool { - return strings.HasPrefix(path, "targets/") - } - shouldCopyHashed := func(path string) bool { - return consistentSnapshot && path != "timestamp.json" - } - shouldCopyUnhashed := func(path string) bool { - return !consistentSnapshot || !isTarget(path) - } - copyToRepo := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() || !info.Mode().IsRegular() { - return nil - } - rel, err := filepath.Rel(f.stagedDir(), path) - if err != nil { - return err - } - var paths []string - if shouldCopyHashed(rel) { - paths = append(paths, util.HashedPaths(rel, hashes[rel])...) - } - if shouldCopyUnhashed(rel) { - paths = append(paths, rel) - } - var files []io.Writer - for _, path := range paths { - file, err := f.createRepoFile(path) - if err != nil { - return err - } - defer file.Close() - files = append(files, file) - } - staged, err := os.Open(path) - if err != nil { - return err - } - defer staged.Close() - if _, err = io.Copy(io.MultiWriter(files...), staged); err != nil { - return err - } - return nil - } - needsRemoval := func(path string) bool { - if consistentSnapshot { - // strip out the hash - name := strings.SplitN(filepath.Base(path), ".", 2) - if name[1] == "" { - return false - } - path = filepath.Join(filepath.Dir(path), name[1]) - } - _, ok := hashes[path] - return !ok - } - removeFile := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - rel, err := filepath.Rel(f.repoDir(), path) - if err != nil { - return err - } - if !info.IsDir() && isTarget(rel) && needsRemoval(rel) { - if err := os.Remove(path); err != nil { - // TODO: log / handle error - } - // TODO: remove empty directory - } - return nil - } - if err := filepath.Walk(f.stagedDir(), copyToRepo); err != nil { - return err - } - if err := filepath.Walk(f.repoDir(), removeFile); err != nil { - return err - } - return f.Clean() -} - -func (f *fileSystemStore) GetKeys(role string) ([]*data.Key, error) { - if keys, ok := f.keys[role]; ok { - return keys, nil - } - keys, _, err := f.loadKeys(role) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - return keys, nil -} - -func (f *fileSystemStore) SaveKey(role string, key *data.Key) error { - if err := f.createDirs(); err != nil { - return err - } - - // add the key to the existing keys (if any) - keys, pass, err := f.loadKeys(role) - if err != nil && !os.IsNotExist(err) { - return err - } - keys = append(keys, key) - - // if loadKeys didn't return a passphrase (because no keys yet exist) - // and passphraseFunc is set, get the passphrase so the keys file can - // be encrypted later (passphraseFunc being nil indicates the keys file - // should not be encrypted) - if pass == nil && f.passphraseFunc != nil { - pass, err = f.passphraseFunc(role, true) - if err != nil { - return err - } - } - - pk := &persistedKeys{} - if pass != nil { - pk.Data, err = encrypted.Marshal(keys, pass) - if err != nil { - return err - } - pk.Encrypted = true - } else { - pk.Data, err = json.MarshalIndent(keys, "", "\t") - if err != nil { - return err - } - } - data, err := json.MarshalIndent(pk, "", "\t") - if err != nil { - return err - } - if err := ioutil.WriteFile(f.keysPath(role), append(data, '\n'), 0600); err != nil { - return err - } - f.keys[role] = keys - return nil -} - -// loadKeys loads keys for the given role and returns them along with the -// passphrase (if read) so that callers don't need to re-read it. -func (f *fileSystemStore) loadKeys(role string) ([]*data.Key, []byte, error) { - file, err := os.Open(f.keysPath(role)) - if err != nil { - return nil, nil, err - } - defer file.Close() - - pk := &persistedKeys{} - if err := json.NewDecoder(file).Decode(pk); err != nil { - return nil, nil, err - } - - var keys []*data.Key - if !pk.Encrypted { - if err := json.Unmarshal(pk.Data, &keys); err != nil { - return nil, nil, err - } - return keys, nil, nil - } - - // the keys are encrypted so cannot be loaded if passphraseFunc is not set - if f.passphraseFunc == nil { - return nil, nil, errors.ErrPassphraseRequired{role} - } - - pass, err := f.passphraseFunc(role, false) - if err != nil { - return nil, nil, err - } - if err := encrypted.Unmarshal(pk.Data, &keys, pass); err != nil { - return nil, nil, err - } - return keys, pass, nil -} - -func (f *fileSystemStore) keysPath(role string) string { - return filepath.Join(f.dir, "keys", role+".json") -} - -func (f *fileSystemStore) Clean() error { - if err := os.RemoveAll(f.stagedDir()); err != nil { - return err - } - return os.MkdirAll(filepath.Join(f.stagedDir(), "targets"), 0755) -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/interfaces.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/store/interfaces.go deleted file mode 100644 index 9d7a20c47f..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/interfaces.go +++ /dev/null @@ -1,24 +0,0 @@ -package store - -import ( - "encoding/json" - - "github.com/endophage/go-tuf/data" -) - -type targetsWalkFunc func(path string, meta data.FileMeta) error - -type LocalStore interface { - GetMeta() (map[string]json.RawMessage, error) - SetMeta(string, json.RawMessage) error - - // WalkStagedTargets calls targetsFn for each staged target file in paths. - // - // If paths is empty, all staged target files will be walked. - WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error - - Commit(map[string]json.RawMessage, bool, map[string]data.Hashes) error - GetKeys(string) ([]*data.Key, error) - SaveKey(string, *data.Key) error - Clean() error -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/memorystore.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/store/memorystore.go deleted file mode 100644 index f17a29602a..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/store/memorystore.go +++ /dev/null @@ -1,90 +0,0 @@ -package store - -import ( - "bytes" - "encoding/json" - - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/errors" - "github.com/endophage/go-tuf/util" -) - -func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) LocalStore { - if meta == nil { - meta = make(map[string]json.RawMessage) - } - return &memoryStore{ - meta: meta, - files: files, - keys: make(map[string][]*data.Key), - } -} - -type memoryStore struct { - meta map[string]json.RawMessage - files map[string][]byte - keys map[string][]*data.Key -} - -func (m *memoryStore) GetMeta() (map[string]json.RawMessage, error) { - return m.meta, nil -} - -func (m *memoryStore) SetMeta(name string, meta json.RawMessage) error { - m.meta[name] = meta - return nil -} - -func (m *memoryStore) AddBlob(path string, meta data.FileMeta) { - -} - -func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error { - if len(paths) == 0 { - for path, data := range m.files { - meta, err := util.GenerateFileMeta(bytes.NewReader(data), "sha256") - if err != nil { - return err - } - if err = targetsFn(path, meta); err != nil { - return err - } - } - return nil - } - - for _, path := range paths { - data, ok := m.files[path] - if !ok { - return errors.ErrFileNotFound{path} - } - meta, err := util.GenerateFileMeta(bytes.NewReader(data), "sha256") - if err != nil { - return err - } - if err = targetsFn(path, meta); err != nil { - return err - } - } - return nil -} - -func (m *memoryStore) Commit(map[string]json.RawMessage, bool, map[string]data.Hashes) error { - return nil -} - -func (m *memoryStore) GetKeys(role string) ([]*data.Key, error) { - return m.keys[role], nil -} - -func (m *memoryStore) SaveKey(role string, key *data.Key) error { - if _, ok := m.keys[role]; !ok { - m.keys[role] = make([]*data.Key, 0) - } - m.keys[role] = append(m.keys[role], key) - return nil -} - -func (m *memoryStore) Clean() error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/util/testutils.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/util/testutils.go deleted file mode 100644 index cf7fae5e5c..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/util/testutils.go +++ /dev/null @@ -1,46 +0,0 @@ -package util - -import ( - "database/sql" - "fmt" - "os" - - _ "code.google.com/p/gosqlite/sqlite3" - "github.com/endophage/go-tuf/data" -) - -var counter int = 1 - -func SampleMeta() data.FileMeta { - meta := data.FileMeta{ - Length: 1, - Hashes: data.Hashes{ - "sha256": data.HexBytes{0x01, 0x02}, - "sha512": data.HexBytes{0x03, 0x04}, - }, - } - return meta -} - -func GetSqliteDB() *sql.DB { - conn, err := sql.Open("sqlite3", fmt.Sprintf("/tmp/sqlite/file%d.db", counter)) - if err != nil { - panic("can't connect to db") - } - counter++ - tx, _ := conn.Begin() - tx.Exec("CREATE TABLE keys (id int auto_increment, namespace varchar(255) not null, role varchar(255) not null, key text not null, primary key (id));") - tx.Exec("CREATE TABLE filehashes(namespace varchar(255) not null, path varchar(255) not null, alg varchar(10) not null, hash varchar(128) not null, primary key (namespace, path, alg));") - tx.Exec("CREATE TABLE filemeta(namespace varchar(255) not null, path varchar(255) not null, size int not null, custom text default null, primary key (namespace, path));") - tx.Commit() - return conn -} - -func FlushDB(db *sql.DB) { - tx, _ := db.Begin() - tx.Exec("DELETE FROM `filemeta`") - tx.Exec("DELETE FROM `filehashes`") - tx.Exec("DELETE FROM `keys`") - tx.Commit() - os.RemoveAll("/tmp/tuf") -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/util/util.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/util/util.go deleted file mode 100644 index 0b440ff206..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/util/util.go +++ /dev/null @@ -1,118 +0,0 @@ -package util - -import ( - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - gopath "path" - "path/filepath" - - "github.com/endophage/go-tuf/data" -) - -var ErrWrongLength = errors.New("wrong length") - -type ErrWrongHash struct { - Type string - Expected data.HexBytes - Actual data.HexBytes -} - -func (e ErrWrongHash) Error() string { - return fmt.Sprintf("wrong %s hash, expected %s got %s", e.Type, hex.EncodeToString(e.Expected), hex.EncodeToString(e.Actual)) -} - -type ErrNoCommonHash struct { - Expected data.Hashes - Actual data.Hashes -} - -func (e ErrNoCommonHash) Error() string { - types := func(a data.Hashes) []string { - t := make([]string, 0, len(a)) - for typ := range a { - t = append(t, typ) - } - return t - } - return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual)) -} - -type ErrUnknownHashAlgorithm struct { - Name string -} - -func (e ErrUnknownHashAlgorithm) Error() string { - return fmt.Sprintf("unknown hash algorithm: %s", e.Name) -} - -type PassphraseFunc func(role string, confirm bool) ([]byte, error) - -func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error { - if actual.Length != expected.Length { - return ErrWrongLength - } - hashChecked := false - for typ, hash := range expected.Hashes { - if h, ok := actual.Hashes[typ]; ok { - hashChecked = true - if !hmac.Equal(h, hash) { - return ErrWrongHash{typ, hash, h} - } - } - } - if !hashChecked { - return ErrNoCommonHash{expected.Hashes, actual.Hashes} - } - return nil -} - -const defaultHashAlgorithm = "sha512" - -func GenerateFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, error) { - if len(hashAlgorithms) == 0 { - hashAlgorithms = []string{defaultHashAlgorithm} - } - hashes := make(map[string]hash.Hash, len(hashAlgorithms)) - for _, hashAlgorithm := range hashAlgorithms { - var h hash.Hash - switch hashAlgorithm { - case "sha256": - h = sha256.New() - case "sha512": - h = sha512.New() - default: - return data.FileMeta{}, ErrUnknownHashAlgorithm{hashAlgorithm} - } - hashes[hashAlgorithm] = h - r = io.TeeReader(r, h) - } - n, err := io.Copy(ioutil.Discard, r) - if err != nil { - return data.FileMeta{}, err - } - m := data.FileMeta{Length: n, Hashes: make(data.Hashes, len(hashes))} - for hashAlgorithm, h := range hashes { - m.Hashes[hashAlgorithm] = h.Sum(nil) - } - return m, nil -} - -func NormalizeTarget(path string) string { - return gopath.Join("/", path) -} - -func HashedPaths(path string, hashes data.Hashes) []string { - paths := make([]string, 0, len(hashes)) - for _, hash := range hashes { - hashedPath := filepath.Join(filepath.Dir(path), hash.String()+"."+filepath.Base(path)) - paths = append(paths, hashedPath) - } - return paths -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/util/util_test.go b/Godeps/_workspace/src/github.com/endophage/go-tuf/util/util_test.go deleted file mode 100644 index a3da6d3eea..0000000000 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/util/util_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package util - -import ( - "bytes" - "encoding/hex" - "testing" - - . "gopkg.in/check.v1" - "github.com/endophage/go-tuf/data" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -type UtilSuite struct{} - -var _ = Suite(&UtilSuite{}) - -func (UtilSuite) TestGenerateFileMetaDefault(c *C) { - // default is sha512 - r := bytes.NewReader([]byte("foo")) - meta, err := GenerateFileMeta(r) - c.Assert(err, IsNil) - c.Assert(meta.Length, Equals, int64(3)) - hashes := meta.Hashes - c.Assert(hashes, HasLen, 1) - hash, ok := hashes["sha512"] - if !ok { - c.Fatal("missing sha512 hash") - } - c.Assert(hash.String(), DeepEquals, "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7") -} - -func (UtilSuite) TestGenerateFileMetaExplicit(c *C) { - r := bytes.NewReader([]byte("foo")) - meta, err := GenerateFileMeta(r, "sha256", "sha512") - c.Assert(err, IsNil) - c.Assert(meta.Length, Equals, int64(3)) - hashes := meta.Hashes - c.Assert(hashes, HasLen, 2) - for name, val := range map[string]string{ - "sha256": "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", - "sha512": "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7", - } { - hash, ok := hashes[name] - if !ok { - c.Fatalf("missing %s hash", name) - } - c.Assert(hash.String(), DeepEquals, val) - } -} - -func (UtilSuite) TestFileMetaEqual(c *C) { - type test struct { - name string - b data.FileMeta - a data.FileMeta - err func(test) error - } - fileMeta := func(length int64, hashes map[string]string) data.FileMeta { - m := data.FileMeta{Length: length, Hashes: make(map[string]data.HexBytes, len(hashes))} - for typ, hash := range hashes { - v, err := hex.DecodeString(hash) - c.Assert(err, IsNil) - m.Hashes[typ] = v - } - return m - } - tests := []test{ - { - name: "wrong length", - a: data.FileMeta{Length: 1}, - b: data.FileMeta{Length: 2}, - err: func(test) error { return ErrWrongLength }, - }, - { - name: "wrong sha512 hash", - a: fileMeta(10, map[string]string{"sha512": "111111"}), - b: fileMeta(10, map[string]string{"sha512": "222222"}), - err: func(t test) error { return ErrWrongHash{"sha512", t.b.Hashes["sha512"], t.a.Hashes["sha512"]} }, - }, - { - name: "intersecting hashes", - a: fileMeta(10, map[string]string{"sha512": "111111", "md5": "222222"}), - b: fileMeta(10, map[string]string{"sha512": "111111", "sha256": "333333"}), - err: func(test) error { return nil }, - }, - { - name: "no common hashes", - a: fileMeta(10, map[string]string{"sha512": "111111"}), - b: fileMeta(10, map[string]string{"sha256": "222222", "md5": "333333"}), - err: func(t test) error { return ErrNoCommonHash{t.b.Hashes, t.a.Hashes} }, - }, - } - for _, t := range tests { - c.Assert(FileMetaEqual(t.a, t.b), DeepEquals, t.err(t), Commentf("name = %s", t.name)) - } -} - -func (UtilSuite) TestNormalizeTarget(c *C) { - for before, after := range map[string]string{ - "": "/", - "foo.txt": "/foo.txt", - "/bar.txt": "/bar.txt", - "foo//bar.txt": "/foo/bar.txt", - "/with/./a/dot": "/with/a/dot", - "/with/double/../dot": "/with/dot", - } { - c.Assert(NormalizeTarget(before), Equals, after) - } -} - -func (UtilSuite) TestHashedPaths(c *C) { - hexBytes := func(s string) data.HexBytes { - v, err := hex.DecodeString(s) - c.Assert(err, IsNil) - return v - } - hashes := data.Hashes{ - "sha512": hexBytes("abc123"), - "sha256": hexBytes("def456"), - } - paths := HashedPaths("foo/bar.txt", hashes) - // cannot use DeepEquals as the returned order is non-deterministic - c.Assert(paths, HasLen, 2) - expected := map[string]struct{}{"foo/abc123.bar.txt": {}, "foo/def456.bar.txt": {}} - for _, path := range paths { - if _, ok := expected[path]; !ok { - c.Fatalf("unexpected path: %s", path) - } - delete(expected, path) - } -} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/data/hex_bytes.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/hex_bytes.go similarity index 100% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/data/hex_bytes.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/data/hex_bytes.go diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/data/hex_bytes_test.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/hex_bytes_test.go similarity index 100% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/data/hex_bytes_test.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/data/hex_bytes_test.go diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/keys.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/keys.go new file mode 100644 index 0000000000..7e1b27c04e --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/keys.go @@ -0,0 +1,105 @@ +package data + +import ( + "crypto/sha256" + "encoding/hex" + + "github.com/Sirupsen/logrus" + cjson "github.com/tent/canonical-json-go" +) + +type Key interface { + ID() string + Cipher() string + Public() string + Private() string +} + +type KeyPair struct { + Public string `json:"public"` + Private string `json:"private"` +} + +type TUFKey struct { + id string `json:"-"` + Type string `json:"keytype"` + Value KeyPair `json:"keyval"` +} + +func NewTUFKey(cipher, public, private string) *TUFKey { + return &TUFKey{ + Type: cipher, + Value: KeyPair{ + Public: public, + Private: private, + }, + } +} + +func (k TUFKey) Cipher() string { + return k.Type +} + +func (k *TUFKey) ID() string { + if k.id == "" { + logrus.Debug("Generating Key ID") + pubK := NewTUFKey(k.Cipher(), k.Public(), "") + data, err := cjson.Marshal(&pubK) + if err != nil { + logrus.Error("Error generating key ID:", err) + } + digest := sha256.Sum256(data) + k.id = hex.EncodeToString(digest[:]) + } + return k.id +} + +func (k TUFKey) Public() string { + return k.Value.Public +} + +type PublicKey struct { + TUFKey +} + +func (k PublicKey) Private() string { + return "" +} + +func NewPublicKey(cipher, public string) *PublicKey { + return &PublicKey{ + TUFKey{ + Type: cipher, + Value: KeyPair{ + Public: public, + Private: "", + }, + }, + } +} + +func PublicKeyFromPrivate(pk PrivateKey) *PublicKey { + return &PublicKey{ + pk.TUFKey, + } +} + +type PrivateKey struct { + TUFKey +} + +func NewPrivateKey(cipher, public, private string) *PrivateKey { + return &PrivateKey{ + TUFKey{ + Type: cipher, + Value: KeyPair{ + Public: public, + Private: private, + }, + }, + } +} + +func (k PrivateKey) Private() string { + return k.Value.Private +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/roles.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/roles.go new file mode 100644 index 0000000000..d77529bb04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/roles.go @@ -0,0 +1,117 @@ +package data + +import ( + "fmt" + "strings" + + "github.com/endophage/gotuf/errors" +) + +var ValidRoles = map[string]string{ + "root": "root", + "targets": "targets", + "snapshot": "snapshot", + "timestamp": "timestamp", +} + +func SetValidRoles(rs map[string]string) { + for k, v := range rs { + ValidRoles[strings.ToLower(k)] = strings.ToLower(v) + } +} + +func RoleName(role string) string { + if r, ok := ValidRoles[role]; ok { + return r + } + return role +} + +// ValidRole only determines the name is semantically +// correct. For target delegated roles, it does NOT check +// the the appropriate parent roles exist. +func ValidRole(name string) bool { + name = strings.ToLower(name) + if v, ok := ValidRoles[name]; ok { + return name == v + } + targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"]) + if strings.HasPrefix(name, targetsBase) { + return true + } + for _, v := range ValidRoles { + if name == v { + return true + } + } + return false +} + +type RootRole struct { + KeyIDs []string `json:"keyids"` + Threshold int `json:"threshold"` +} +type Role struct { + RootRole + Name string `json:"name"` + Paths []string `json:"paths,omitempty"` + PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"` +} + +func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []string) (*Role, error) { + if len(paths) > 0 && len(pathHashPrefixes) > 0 { + return nil, errors.ErrInvalidRole{} + } + if threshold < 1 { + return nil, errors.ErrInvalidRole{} + } + if !ValidRole(name) { + return nil, errors.ErrInvalidRole{} + } + return &Role{ + RootRole: RootRole{ + KeyIDs: keyIDs, + Threshold: threshold, + }, + Name: name, + Paths: paths, + PathHashPrefixes: pathHashPrefixes, + }, nil + +} + +func (r Role) IsValid() bool { + return !(len(r.Paths) > 0 && len(r.PathHashPrefixes) > 0) +} + +func (r Role) ValidKey(id string) bool { + for _, key := range r.KeyIDs { + if key == id { + return true + } + } + return false +} + +func (r Role) CheckPaths(path string) bool { + for _, p := range r.Paths { + if strings.HasPrefix(path, p) { + return true + } + } + return false +} + +func (r Role) CheckPrefixes(hash string) bool { + for _, p := range r.PathHashPrefixes { + if strings.HasPrefix(hash, p) { + return true + } + } + return false +} + +func (r Role) IsDelegation() bool { + targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"]) + return strings.HasPrefix(r.Name, targetsBase) +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/root.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/root.go new file mode 100644 index 0000000000..323787802e --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/root.go @@ -0,0 +1,69 @@ +package data + +import ( + "encoding/json" + + cjson "github.com/tent/canonical-json-go" +) + +type SignedRoot struct { + Signatures []Signature + Signed Root + Dirty bool +} + +type Root struct { + Type string `json:"_type"` + Version int `json:"version"` + Expires string `json:"expires"` + Keys map[string]*TUFKey `json:"keys"` + Roles map[string]*RootRole `json:"roles"` + ConsistentSnapshot bool `json:"consistent_snapshot"` +} + +func NewRoot(keys map[string]*TUFKey, roles map[string]*RootRole, consistent bool) (*SignedRoot, error) { + return &SignedRoot{ + Signatures: make([]Signature, 0), + Signed: Root{ + Type: TUFTypes["root"], + Version: 0, + Expires: DefaultExpires("root").String(), + Keys: keys, + Roles: roles, + ConsistentSnapshot: consistent, + }, + Dirty: true, + }, nil +} + +func (r SignedRoot) ToSigned() (*Signed, error) { + s, err := cjson.Marshal(r.Signed) + if err != nil { + return nil, err + } + signed := json.RawMessage{} + err = signed.UnmarshalJSON(s) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(r.Signatures)) + copy(sigs, r.Signatures) + return &Signed{ + Signatures: sigs, + Signed: signed, + }, nil +} + +func RootFromSigned(s *Signed) (*SignedRoot, error) { + r := Root{} + err := json.Unmarshal(s.Signed, &r) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(s.Signatures)) + copy(sigs, s.Signatures) + return &SignedRoot{ + Signatures: sigs, + Signed: r, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/snapshot.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/snapshot.go new file mode 100644 index 0000000000..1519f38150 --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/snapshot.go @@ -0,0 +1,93 @@ +package data + +import ( + "bytes" + "encoding/json" + + cjson "github.com/tent/canonical-json-go" +) + +type SignedSnapshot struct { + Signatures []Signature + Signed Snapshot + Dirty bool +} + +type Snapshot struct { + Type string `json:"_type"` + Version int `json:"version"` + Expires string `json:"expires"` + Meta Files `json:"meta"` +} + +func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) { + rootJSON, err := json.Marshal(root) + if err != nil { + return nil, err + } + targetsJSON, err := json.Marshal(targets) + if err != nil { + return nil, err + } + rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), "sha256") + if err != nil { + return nil, err + } + targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), "sha256") + if err != nil { + return nil, err + } + return &SignedSnapshot{ + Signatures: make([]Signature, 0), + Signed: Snapshot{ + Type: TUFTypes["snapshot"], + Version: 0, + Expires: DefaultExpires("snapshot").String(), + Meta: Files{ + ValidRoles["root"]: rootMeta, + ValidRoles["targets"]: targetsMeta, + }, + }, + }, nil +} + +func (sp *SignedSnapshot) hashForRole(role string) HexBytes { + return sp.Signed.Meta[role].Hashes["sha256"] +} + +func (sp SignedSnapshot) ToSigned() (*Signed, error) { + s, err := cjson.Marshal(sp.Signed) + if err != nil { + return nil, err + } + signed := json.RawMessage{} + err = signed.UnmarshalJSON(s) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(sp.Signatures)) + copy(sigs, sp.Signatures) + return &Signed{ + Signatures: sigs, + Signed: signed, + }, nil +} + +func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) { + sp.Signed.Meta[role] = meta + sp.Dirty = true +} + +func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) { + sp := Snapshot{} + err := json.Unmarshal(s.Signed, &sp) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(s.Signatures)) + copy(sigs, s.Signatures) + return &SignedSnapshot{ + Signatures: sigs, + Signed: sp, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/targets.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/targets.go new file mode 100644 index 0000000000..bf0da26eff --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/targets.go @@ -0,0 +1,117 @@ +package data + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + + cjson "github.com/tent/canonical-json-go" +) + +type SignedTargets struct { + Signatures []Signature + Signed Targets + Dirty bool +} + +type Targets struct { + Type string `json:"_type"` + Version int `json:"version"` + Expires string `json:"expires"` + Targets Files `json:"targets"` + Delegations Delegations `json:"delegations,omitempty"` +} + +func NewTargets() *SignedTargets { + return &SignedTargets{ + Signatures: make([]Signature, 0), + Signed: Targets{ + Type: TUFTypes["targets"], + Version: 0, + Expires: DefaultExpires("targets").String(), + Targets: make(Files), + Delegations: *NewDelegations(), + }, + Dirty: true, + } +} + +// GetMeta attempts to find the targets entry for the path. It +// will return nil in the case of the target not being found. +func (t SignedTargets) GetMeta(path string) *FileMeta { + for p, meta := range t.Signed.Targets { + if p == path { + return &meta + } + } + return nil +} + +// GetDelegations filters the roles and associated keys that may be +// the signers for the given target path. If no appropriate roles +// can be found, it will simply return nil for the return values. +// The returned slice of Role will have order maintained relative +// to the role slice on Delegations per TUF spec proposal on using +// order to determine priority. +func (t SignedTargets) GetDelegations(path string) []*Role { + roles := make([]*Role, 0) + pathHashBytes := sha256.Sum256([]byte(path)) + pathHash := hex.EncodeToString(pathHashBytes[:]) + for _, r := range t.Signed.Delegations.Roles { + if !r.IsValid() { + // Role has both Paths and PathHashPrefixes. + continue + } + if r.CheckPaths(path) { + roles = append(roles, r) + continue + } + if r.CheckPrefixes(pathHash) { + roles = append(roles, r) + continue + } + //keysDB.AddRole(r) + } + return roles +} + +func (t *SignedTargets) AddTarget(path string, meta FileMeta) { + t.Signed.Targets[path] = meta + t.Dirty = true +} + +func (t *SignedTargets) AddDelegation(role *Role, keys []*TUFKey) error { + return nil +} + +func (t SignedTargets) ToSigned() (*Signed, error) { + s, err := cjson.Marshal(t.Signed) + if err != nil { + return nil, err + } + signed := json.RawMessage{} + err = signed.UnmarshalJSON(s) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(t.Signatures)) + copy(sigs, t.Signatures) + return &Signed{ + Signatures: sigs, + Signed: signed, + }, nil +} + +func TargetsFromSigned(s *Signed) (*SignedTargets, error) { + t := Targets{} + err := json.Unmarshal(s.Signed, &t) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(s.Signatures)) + copy(sigs, s.Signatures) + return &SignedTargets{ + Signatures: sigs, + Signed: t, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/timestamp.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/timestamp.go new file mode 100644 index 0000000000..84b976830a --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/timestamp.go @@ -0,0 +1,75 @@ +package data + +import ( + "bytes" + "encoding/json" + + cjson "github.com/tent/canonical-json-go" +) + +type SignedTimestamp struct { + Signatures []Signature + Signed Timestamp + Dirty bool +} + +type Timestamp struct { + Type string `json:"_type"` + Version int `json:"version"` + Expires string `json:"expires"` + Meta Files `json:"meta"` +} + +func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) { + snapshotJSON, err := json.Marshal(snapshot) + if err != nil { + return nil, err + } + snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), "sha256") + if err != nil { + return nil, err + } + return &SignedTimestamp{ + Signatures: make([]Signature, 0), + Signed: Timestamp{ + Type: TUFTypes["timestamp"], + Version: 0, + Expires: DefaultExpires("timestamp").String(), + Meta: Files{ + ValidRoles["timestamp"]: snapshotMeta, + }, + }, + }, nil +} + +func (ts SignedTimestamp) ToSigned() (*Signed, error) { + s, err := cjson.Marshal(ts.Signed) + if err != nil { + return nil, err + } + signed := json.RawMessage{} + err = signed.UnmarshalJSON(s) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(ts.Signatures)) + copy(sigs, ts.Signatures) + return &Signed{ + Signatures: sigs, + Signed: signed, + }, nil +} + +func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) { + ts := Timestamp{} + err := json.Unmarshal(s.Signed, &ts) + if err != nil { + return nil, err + } + sigs := make([]Signature, len(s.Signatures)) + copy(sigs, s.Signatures) + return &SignedTimestamp{ + Signatures: sigs, + Signed: ts, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/data/types.go b/Godeps/_workspace/src/github.com/endophage/gotuf/data/types.go new file mode 100644 index 0000000000..21308b0673 --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/data/types.go @@ -0,0 +1,136 @@ +package data + +import ( + "crypto/sha256" + "crypto/sha512" + "encoding/json" + "fmt" + "hash" + "io" + "io/ioutil" + "time" + + "github.com/Sirupsen/logrus" +) + +const defaultHashAlgorithm = "sha256" + +var TUFTypes = map[string]string{ + "targets": "Targets", + "root": "Root", + "snapshot": "Snapshot", + "timestamp": "Timestamp", +} + +// SetTUFTypes allows one to override some or all of the default +// type names in TUF. +func SetTUFTypes(ts map[string]string) { + for k, v := range ts { + TUFTypes[k] = v + } +} + +// Checks if type is correct. +func ValidTUFType(t string) bool { + // most people will just use the defaults so have this optimal check + // first. Do comparison just in case there is some unknown vulnerability + // if a key and value in the map differ. + if v, ok := TUFTypes[t]; ok { + return t == v + } + // For people that feel the need to change the default type names. + for _, v := range TUFTypes { + if t == v { + return true + } + } + return false +} + +type Signed struct { + Signed json.RawMessage `json:"signed"` + Signatures []Signature `json:"signatures"` +} + +type Signature struct { + KeyID string `json:"keyid"` + Method string `json:"method"` + Signature HexBytes `json:"sig"` +} + +type Files map[string]FileMeta + +type Hashes map[string]HexBytes + +type FileMeta struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` + Custom *json.RawMessage `json:"custom,omitempty"` +} + +func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) { + if len(hashAlgorithms) == 0 { + hashAlgorithms = []string{defaultHashAlgorithm} + } + hashes := make(map[string]hash.Hash, len(hashAlgorithms)) + for _, hashAlgorithm := range hashAlgorithms { + var h hash.Hash + switch hashAlgorithm { + case "sha256": + h = sha256.New() + case "sha512": + h = sha512.New() + default: + return FileMeta{}, fmt.Errorf("Unknown Hash Algorithm: %s", hashAlgorithm) + } + hashes[hashAlgorithm] = h + r = io.TeeReader(r, h) + } + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + return FileMeta{}, err + } + m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))} + for hashAlgorithm, h := range hashes { + m.Hashes[hashAlgorithm] = h.Sum(nil) + } + return m, nil +} + +type Delegations struct { + Keys map[string]*TUFKey `json:"keys"` + Roles []*Role `json:"roles"` +} + +func NewDelegations() *Delegations { + return &Delegations{ + Keys: make(map[string]*TUFKey), + Roles: make([]*Role, 0), + } +} + +var defaultExpiryTimes = map[string]time.Time{ + "root": time.Now().AddDate(1, 0, 0), + "targets": time.Now().AddDate(0, 3, 0), + "snapshot": time.Now().AddDate(0, 0, 7), + "timestamp": time.Now().AddDate(0, 0, 1), +} + +// SetDefaultExpiryTimes allows one to change the default expiries. +func SetDefaultExpiryTimes(times map[string]time.Time) { + for key, value := range times { + if _, ok := defaultExpiryTimes[key]; !ok { + logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key) + continue + } + defaultExpiryTimes[key] = value + } +} + +func DefaultExpires(role string) time.Time { + var t time.Time + if t, ok := defaultExpiryTimes[role]; ok { + return t + } + return t.UTC().Round(time.Second) +} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/errors/errors.go b/Godeps/_workspace/src/github.com/endophage/gotuf/errors/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/errors/errors.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/errors/errors.go diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/keys/db.go b/Godeps/_workspace/src/github.com/endophage/gotuf/keys/db.go new file mode 100644 index 0000000000..7404f02229 --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/keys/db.go @@ -0,0 +1,60 @@ +package keys + +import ( + "errors" + + "github.com/endophage/gotuf/data" +) + +var ( + ErrWrongType = errors.New("tuf: invalid key type") + ErrExists = errors.New("tuf: key already in db") + ErrWrongID = errors.New("tuf: key id mismatch") + ErrInvalidKey = errors.New("tuf: invalid key") + ErrInvalidRole = errors.New("tuf: invalid role") + ErrInvalidKeyID = errors.New("tuf: invalid key id") + ErrInvalidThreshold = errors.New("tuf: invalid role threshold") +) + +type KeyDB struct { + roles map[string]*data.Role + keys map[string]*data.PublicKey +} + +func NewDB() *KeyDB { + return &KeyDB{ + roles: make(map[string]*data.Role), + keys: make(map[string]*data.PublicKey), + } +} + +func (db *KeyDB) AddKey(k *data.PublicKey) { + db.keys[k.ID()] = k +} + +func (db *KeyDB) AddRole(r *data.Role) error { + if !data.ValidRole(r.Name) { + return ErrInvalidRole + } + if r.Threshold < 1 { + return ErrInvalidThreshold + } + + // validate all key ids are in the keys maps + for _, id := range r.KeyIDs { + if _, ok := db.keys[id]; !ok { + return ErrInvalidKeyID + } + } + + db.roles[r.Name] = r + return nil +} + +func (db *KeyDB) GetKey(id string) *data.PublicKey { + return db.keys[id] +} + +func (db *KeyDB) GetRole(name string) *data.Role { + return db.roles[name] +} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/trust.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/ed25519.go similarity index 50% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/signed/trust.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/signed/ed25519.go index 6dbaac0092..ff9390ce76 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/trust.go +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/ed25519.go @@ -2,43 +2,26 @@ package signed import ( "crypto/rand" + "encoding/hex" "github.com/agl/ed25519" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" + "github.com/endophage/gotuf/data" ) // Ed25519 implements a simple in memory keystore and trust service type Ed25519 struct { - keys map[string]*keys.PrivateKey + keys map[string]*data.PrivateKey } -var _ TrustService = &Ed25519{} - func NewEd25519() *Ed25519 { return &Ed25519{ - make(map[string]*keys.PrivateKey), + make(map[string]*data.PrivateKey), } } // addKey allows you to add a private key to the trust service -func (trust *Ed25519) addKey(k *keys.PrivateKey) { - key := keys.PrivateKey{ - PublicKey: keys.PublicKey{ - Key: data.Key{ - Type: k.Type, - Value: data.KeyValue{ - Public: make([]byte, len(k.Value.Public)), - }, - }, - ID: k.ID, - }, - Private: make([]byte, len(k.Private)), - } - - copy(key.Value.Public, k.Value.Public) - copy(key.Private, k.Private) - trust.keys[k.ID] = &key +func (trust *Ed25519) addKey(k *data.PrivateKey) { + trust.keys[k.ID()] = k } func (trust *Ed25519) RemoveKey(keyID string) { @@ -50,8 +33,8 @@ func (trust *Ed25519) Sign(keyIDs []string, toSign []byte) ([]data.Signature, er for _, kID := range keyIDs { priv := [ed25519.PrivateKeySize]byte{} pub := [ed25519.PublicKeySize]byte{} - copy(priv[:], trust.keys[kID].Private) - copy(pub[:], trust.keys[kID].Value.Public) + copy(priv[:], trust.keys[kID].Private()) + copy(pub[:], trust.keys[kID].Public()) sig := ed25519.Sign(&priv, toSign) signatures = append(signatures, data.Signature{ KeyID: kID, @@ -63,27 +46,30 @@ func (trust *Ed25519) Sign(keyIDs []string, toSign []byte) ([]data.Signature, er } -func (trust *Ed25519) Create() (*keys.PublicKey, error) { +func (trust *Ed25519) Create() (*data.PublicKey, error) { pub, priv, err := ed25519.GenerateKey(rand.Reader) if err != nil { return nil, err } - pubBytes := make([]byte, ed25519.PublicKeySize) - copy(pubBytes, pub[:]) - privBytes := make([]byte, ed25519.PrivateKeySize) - copy(privBytes, priv[:]) - public := keys.NewPublicKey("ed25519", pubBytes) - private := keys.PrivateKey{*public, privBytes} - trust.addKey(&private) + pubStr := hex.EncodeToString(pub[:]) + privStr := hex.EncodeToString(priv[:]) + public := data.NewPublicKey("ed25519", pubStr) + private := data.NewPrivateKey("ed25519", pubStr, privStr) + trust.addKey(private) return public, nil } -func (trust *Ed25519) PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error) { - k := make(map[string]*keys.PublicKey) +func (trust *Ed25519) PublicKeys(keyIDs ...string) (map[string]*data.PublicKey, error) { + k := make(map[string]*data.PublicKey) for _, kID := range keyIDs { if key, ok := trust.keys[kID]; ok { - k[kID] = &key.PublicKey + k[kID] = data.PublicKeyFromPrivate(*key) } } return k, nil } + +func (trust *Ed25519) CanSign(keyID string) bool { + _, ok := trust.keys[keyID] + return ok +} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/errors.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/errors.go similarity index 92% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/signed/errors.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/signed/errors.go index 10bdfd6f57..e7384a0519 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/errors.go +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/errors.go @@ -2,11 +2,10 @@ package signed import ( "fmt" - "time" ) type ErrExpired struct { - Expired time.Time + Expired string } func (e ErrExpired) Error() string { diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/signed/interface.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/interface.go new file mode 100644 index 0000000000..e7f8edf49d --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/interface.go @@ -0,0 +1,45 @@ +package signed + +import ( + "github.com/endophage/gotuf/data" +) + +// SigningService defines the necessary functions to determine +// if a user is able to sign with a key, and to perform signing. +type SigningService interface { + // Sign takes a slice of keyIDs and a piece of data to sign + // and returns a slice of signatures and an error + Sign(keyIDs []string, data []byte) ([]data.Signature, error) + + // CanSign takes a single keyID and returns a boolean indicating + // whether the caller is able to sign with the keyID (i.e. does + // this signing service hold the private key associated with + // they keyID) + CanSign(keyID string) bool +} + +// KeyService provides management of keys locally. It will never +// accept or provide private keys. Communication between the KeyService +// and a SigningService happen behind the Create function. +type KeyService interface { + // Create issues a new key pair and is responsible for loading + // the private key into the appropriate signing service. + Create() (*data.PublicKey, error) + + // PublicKeys return the PublicKey instances for the given KeyIDs + PublicKeys(keyIDs ...string) (map[string]*data.PublicKey, error) +} + +// CryptoService defines a unified Signing and Key Service as this +// will be most useful for most applications. +type CryptoService interface { + SigningService + KeyService +} + +// Verifier defines an interface for verfying signatures. An implementer +// of this interface should verify signatures for one and only one +// signing scheme. +type Verifier interface { + Verify(key data.Key, sig []byte, msg []byte) error +} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/sign.go similarity index 57% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/signed/sign.go index cf379f4ad3..7b421b6df7 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign.go +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/sign.go @@ -1,29 +1,28 @@ package signed import ( - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" + "github.com/endophage/gotuf/data" ) // Signer encapsulates a signing service with some convenience methods to // interface between TUF keys and the generic service interface type Signer struct { - service TrustService + service CryptoService } -func NewSigner(service TrustService) *Signer { +func NewSigner(service CryptoService) *Signer { return &Signer{service} } // Sign takes a data.Signed and a key, calculated and adds the signature // to the data.Signed -func (signer *Signer) Sign(s *data.Signed, keys ...*keys.PublicKey) error { +func (signer *Signer) Sign(s *data.Signed, keys ...*data.PublicKey) error { signatures := make([]data.Signature, 0, len(s.Signatures)+1) keyIDMemb := make(map[string]struct{}) keyIDs := make([]string, 0, len(keys)) for _, key := range keys { - keyIDMemb[key.ID] = struct{}{} - keyIDs = append(keyIDs, key.ID) + keyIDMemb[key.ID()] = struct{}{} + keyIDs = append(keyIDs, key.ID()) } for _, sig := range s.Signatures { if _, ok := keyIDMemb[sig.KeyID]; ok { @@ -40,20 +39,10 @@ func (signer *Signer) Sign(s *data.Signed, keys ...*keys.PublicKey) error { return nil } -//func (signer *Signer) Marshal(v interface{}, keys ...*keys.PublicKey) (*data.Signed, error) { -// b, err := cjson.Marshal(v) -// if err != nil { -// return nil, err -// } -// s := &data.Signed{Signed: b} -// err = signer.Sign(s, keys...) -// return s, err // err may be nil but there's no point in checking, just return it -//} - -func (signer *Signer) Create() (*keys.PublicKey, error) { +func (signer *Signer) Create() (*data.PublicKey, error) { return signer.service.Create() } -func (signer *Signer) PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error) { +func (signer *Signer) PublicKeys(keyIDs ...string) (map[string]*data.PublicKey, error) { return signer.service.PublicKeys(keyIDs...) } diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign_test.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/sign_test.go similarity index 77% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign_test.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/signed/sign_test.go index 2262d7b380..6e330e8aa4 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/sign_test.go +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/sign_test.go @@ -3,15 +3,15 @@ package signed import ( "testing" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" + "github.com/endophage/gotuf/data" + "github.com/endophage/gotuf/keys" ) -type MockTrustService struct { +type MockCryptoService struct { testKey keys.PublicKey } -func (mts *MockTrustService) Sign(keyIDs []string, _ []byte) ([]data.Signature, error) { +func (mts *MockCryptoService) Sign(keyIDs []string, _ []byte) ([]data.Signature, error) { sigs := make([]data.Signature, 0, len(keyIDs)) for _, keyID := range keyIDs { sigs = append(sigs, data.Signature{KeyID: keyID}) @@ -19,20 +19,20 @@ func (mts *MockTrustService) Sign(keyIDs []string, _ []byte) ([]data.Signature, return sigs, nil } -func (mts *MockTrustService) Create() (*keys.PublicKey, error) { +func (mts *MockCryptoService) Create() (*keys.PublicKey, error) { return &mts.testKey, nil } -func (mts *MockTrustService) PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error) { +func (mts *MockCryptoService) PublicKeys(keyIDs ...string) (map[string]*keys.PublicKey, error) { keys := map[string]*keys.PublicKey{"testID": &mts.testKey} return keys, nil } -var _ TrustService = &MockTrustService{} +var _ CryptoService = &MockCryptoService{} // Test signing and ensure the expected signature is added func TestBasicSign(t *testing.T) { - signer := Signer{&MockTrustService{ + signer := Signer{&MockCryptoService{ testKey: keys.PublicKey{ID: "testID"}, }} key, err := signer.Create() @@ -54,10 +54,10 @@ func TestBasicSign(t *testing.T) { } // Test signing with the same key multiple times only registers a single signature -// for the key (N.B. MockTrustService.Sign will still be called again, but Signer.Sign +// for the key (N.B. MockCryptoService.Sign will still be called again, but Signer.Sign // should be cleaning previous signatures by the KeyID when asked to sign again) func TestReSign(t *testing.T) { - signer := Signer{&MockTrustService{ + signer := Signer{&MockCryptoService{ testKey: keys.PublicKey{}, }} key := keys.PublicKey{ID: "testID"} @@ -77,7 +77,7 @@ func TestReSign(t *testing.T) { } func TestMultiSign(t *testing.T) { - signer := Signer{&MockTrustService{}} + signer := Signer{&MockCryptoService{}} key := keys.PublicKey{ID: "testID1"} testData := data.Signed{} @@ -100,7 +100,7 @@ func TestMultiSign(t *testing.T) { } func TestCreate(t *testing.T) { - signer := Signer{&MockTrustService{ + signer := Signer{&MockCryptoService{ testKey: keys.PublicKey{ID: "testID"}, }} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers.go new file mode 100644 index 0000000000..82db1d67ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers.go @@ -0,0 +1,119 @@ +package signed + +import ( + "crypto" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "io/ioutil" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/agl/ed25519" + "github.com/endophage/gotuf/data" +) + +// Verifiers serves as a map of all verifiers available on the system and +// can be injected into a verificationService. For testing and configuration +// purposes, it will not be used by default. +var Verifiers = map[string]Verifier{ + "ed25519": Ed25519Verifier{}, + "rsa": RSAVerifier{}, + "pycrypto-pkcs#1 pss": RSAPSSVerifier{}, +} + +// RegisterVerifier provides a convenience function for init() functions +// to register additional verifiers or replace existing ones. +func RegisterVerifier(name string, v Verifier) { + curr, ok := Verifiers[name] + if ok { + typOld := reflect.TypeOf(curr) + typNew := reflect.TypeOf(v) + logrus.Debugf( + "Replacing already loaded verifier %s:%s with %s:%s", + typOld.PkgPath(), typOld.Name(), + typNew.PkgPath(), typNew.Name(), + ) + } else { + logrus.Debug("Adding verifier for: ", name) + } + Verifiers[name] = v +} + +type Ed25519Verifier struct{} + +func (v Ed25519Verifier) Verify(key data.Key, sig []byte, msg []byte) error { + var sigBytes [ed25519.SignatureSize]byte + if len(sig) != len(sigBytes) { + logrus.Infof("Signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig)) + return ErrInvalid + } + copy(sigBytes[:], sig) + + var keyBytes [ed25519.PublicKeySize]byte + copy(keyBytes[:], key.Public()) + + if !ed25519.Verify(&keyBytes, msg, &sigBytes) { + logrus.Infof("Failed ed25519 verification") + return ErrInvalid + } + return nil +} + +type RSAVerifier struct{} + +func (v RSAVerifier) Verify(key data.Key, sig []byte, msg []byte) error { + digest := sha256.Sum256(msg) + keyReader := base64.NewDecoder(base64.StdEncoding, strings.NewReader(key.Public())) + keyBytes, _ := ioutil.ReadAll(keyReader) + pub, err := x509.ParsePKIXPublicKey(keyBytes) + if err != nil { + logrus.Infof("Failed to parse public key: %s\n", err) + return ErrInvalid + } + + rsaPub, ok := pub.(*rsa.PublicKey) + if !ok { + logrus.Infof("Value returned from ParsePKIXPublicKey was not an RSA public key") + return ErrInvalid + } + + if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil { + logrus.Infof("Failed verification: %s", err) + return ErrInvalid + } + return nil +} + +// RSAPSSVerifier checks RSASSA-PSS signatures +type RSAPSSVerifier struct{} + +// Verify does the actual check. +// N.B. We have not been able to make this work in a way that is compatible +// with PyCrypto. +func (v RSAPSSVerifier) Verify(key data.Key, sig []byte, msg []byte) error { + digest := sha256.Sum256(msg) + + k, _ := pem.Decode([]byte(key.Public())) + pub, err := x509.ParsePKIXPublicKey(k.Bytes) + if err != nil { + logrus.Infof("Failed to parse public key: %s\n", err) + return ErrInvalid + } + + rsaPub, ok := pub.(*rsa.PublicKey) + if !ok { + logrus.Infof("Value returned from ParsePKIXPublicKey was not an RSA public key") + return ErrInvalid + } + + opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256} + if err = rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil { + logrus.Infof("Failed verification: %s", err) + return ErrInvalid + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers_test.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers_test.go new file mode 100644 index 0000000000..b51ae0c15d --- /dev/null +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verifiers_test.go @@ -0,0 +1,13 @@ +package signed + +import ( + "crypto" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "testing" +) + +func TestRSAVerify(t *testing.T) { + +} diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/verify.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verify.go similarity index 52% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/signed/verify.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/signed/verify.go index 754bcfa445..d60b732abd 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/verify.go +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verify.go @@ -3,18 +3,12 @@ package signed import ( "encoding/json" "errors" - "fmt" - "log" "strings" "time" - "crypto" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - //"github.com/agl/ed25519" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" + "github.com/Sirupsen/logrus" + "github.com/endophage/gotuf/data" + "github.com/endophage/gotuf/keys" "github.com/tent/canonical-json-go" ) @@ -29,12 +23,12 @@ var ( ) type signedMeta struct { - Type string `json:"_type"` - Expires time.Time `json:"expires"` - Version int `json:"version"` + Type string `json:"_type"` + Expires string `json:"expires"` + Version int `json:"version"` } -func Verify(s *data.Signed, role string, minVersion int, db *keys.DB) error { +func Verify(s *data.Signed, role string, minVersion int, db *keys.KeyDB) error { if err := VerifySignatures(s, role, db); err != nil { return err } @@ -43,11 +37,17 @@ func Verify(s *data.Signed, role string, minVersion int, db *keys.DB) error { if err := json.Unmarshal(s.Signed, sm); err != nil { return err } - if strings.ToLower(sm.Type) != strings.ToLower(role) { + // This is not the valid way to check types as all targets files will + // have the "Targets" type. + //if strings.ToLower(sm.Type) != strings.ToLower(role) { + // return ErrWrongType + //} + if !data.ValidTUFType(sm.Type) { return ErrWrongType } if IsExpired(sm.Expires) { - return ErrExpired{sm.Expires} + //logrus.Errorf("Metadata for %s expired", role) + //return ErrExpired{sm.Expires} } if sm.Version < minVersion { return ErrLowVersion{sm.Version, minVersion} @@ -56,16 +56,22 @@ func Verify(s *data.Signed, role string, minVersion int, db *keys.DB) error { return nil } -var IsExpired = func(t time.Time) bool { - return t.Sub(time.Now()) <= 0 +var IsExpired = func(t string) bool { + ts, err := time.Parse(time.RFC3339, t) + if err != nil { + ts, err = time.Parse("2006-01-02 15:04:05 MST", t) + if err != nil { + return false + } + } + return ts.Sub(time.Now()) <= 0 } -func VerifySignatures(s *data.Signed, role string, db *keys.DB) error { +func VerifySignatures(s *data.Signed, role string, db *keys.KeyDB) error { if len(s.Signatures) == 0 { return ErrNoSignatures } - fmt.Println("Role:", role) roleData := db.GetRole(role) if roleData == nil { return ErrUnknownRole @@ -82,55 +88,28 @@ func VerifySignatures(s *data.Signed, role string, db *keys.DB) error { valid := make(map[string]struct{}) for _, sig := range s.Signatures { - //var sigBytes [ed25519.SignatureSize]byte - //if sig.Method != "ed25519" { - // return ErrWrongMethod - //} - //if len(sig.Signature) != len(sigBytes) { - // return ErrInvalid - //} - - //if !roleData.ValidKey(sig.KeyID) { - //log.Printf("continuing b/c keyid was invalid: %s for roledata %s\n", sig.KeyID, roleData) - //continue - //} + if !roleData.ValidKey(sig.KeyID) { + logrus.Infof("continuing b/c keyid was invalid: %s for roledata %s\n", sig.KeyID, roleData) + continue + } key := db.GetKey(sig.KeyID) if key == nil { - log.Printf("continuing b/c keyid lookup was nil: %s\n", sig.KeyID) + logrus.Infof("continuing b/c keyid lookup was nil: %s\n", sig.KeyID) + continue + } + // make method lookup consistent with case uniformity. + method := strings.ToLower(sig.Method) + verifier, ok := Verifiers[method] + if !ok { + logrus.Infof("continuing b/c signing method is not supported: %s\n", sig.Method) continue } - //copy(sigBytes[:], sig.Signature) - //var keyBytes [ed25519.PublicKeySize]byte - //copy(keyBytes[:], key.Value.Public) - - //if !ed25519.Verify(&keyBytes, msg, &sigBytes) { - // return ErrInvalid - //} - //valid[sig.KeyID] = struct{}{} - - //TODO(mccauley): move this to rsa.verify routine - digest := sha256.Sum256(msg) - pub, err := x509.ParsePKIXPublicKey(key.Value.Public) - if err != nil { - log.Printf("Failed to parse public key: %s\n", err) - return err - } - - rsaPub, ok := pub.(*rsa.PublicKey) - if !ok { - log.Printf("Value returned from ParsePKIXPublicKey was not an RSA public key") - return err - } - - err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig.Signature) - if err != nil { - log.Printf("Failed verification: %s", err) - return err - } else { - log.Printf("---------------Verification succeeded!!!---------------") - valid[sig.KeyID] = struct{}{} + if err := verifier.Verify(key, sig.Signature, msg); err != nil { + logrus.Infof("continuing b/c signature was invalid\n") + continue } + valid[sig.KeyID] = struct{}{} } if len(valid) < roleData.Threshold { @@ -140,7 +119,7 @@ func VerifySignatures(s *data.Signed, role string, db *keys.DB) error { return nil } -func Unmarshal(b []byte, v interface{}, role string, minVersion int, db *keys.DB) error { +func Unmarshal(b []byte, v interface{}, role string, minVersion int, db *keys.KeyDB) error { s := &data.Signed{} if err := json.Unmarshal(b, s); err != nil { return err @@ -151,7 +130,7 @@ func Unmarshal(b []byte, v interface{}, role string, minVersion int, db *keys.DB return json.Unmarshal(s.Signed, v) } -func UnmarshalTrusted(b []byte, v interface{}, role string, db *keys.DB) error { +func UnmarshalTrusted(b []byte, v interface{}, role string, db *keys.KeyDB) error { s := &data.Signed{} if err := json.Unmarshal(b, s); err != nil { return err diff --git a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/verify_test.go b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verify_test.go similarity index 87% rename from Godeps/_workspace/src/github.com/endophage/go-tuf/signed/verify_test.go rename to Godeps/_workspace/src/github.com/endophage/gotuf/signed/verify_test.go index d9a9033470..8c36bba4f6 100644 --- a/Godeps/_workspace/src/github.com/endophage/go-tuf/signed/verify_test.go +++ b/Godeps/_workspace/src/github.com/endophage/gotuf/signed/verify_test.go @@ -4,12 +4,11 @@ import ( "testing" "time" - "github.com/agl/ed25519" cjson "github.com/tent/canonical-json-go" . "gopkg.in/check.v1" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" + "github.com/endophage/gotuf/data" + "github.com/endophage/gotuf/keys" ) // Hook up gocheck into the "go test" runner. @@ -48,26 +47,26 @@ func (VerifySuite) Test(c *C) { role: "foo", err: ErrUnknownRole, }, - { - name: "wrong signature method", - mut: func(t *test) { t.s.Signatures[0].Method = "foo" }, - err: ErrWrongMethod, - }, - { - name: "signature wrong length", - mut: func(t *test) { t.s.Signatures[0].Signature = []byte{0} }, - err: ErrInvalid, - }, + //{ + // name: "wrong signature method", + // mut: func(t *test) { t.s.Signatures[0].Method = "foo" }, + // err: ErrWrongMethod, + //}, + // { + // name: "signature wrong length", + // mut: func(t *test) { t.s.Signatures[0].Signature = []byte{0} }, + // err: ErrInvalid, + // }, { name: "key missing from role", mut: func(t *test) { t.roles["root"].KeyIDs = nil }, err: ErrRoleThreshold, }, - { - name: "invalid signature", - mut: func(t *test) { t.s.Signatures[0].Signature = make([]byte, ed25519.SignatureSize) }, - err: ErrInvalid, - }, + // { + // name: "invalid signature", + // mut: func(t *test) { t.s.Signatures[0].Signature = make([]byte, ed25519.SignatureSize) }, + // err: ErrInvalid, + // }, { name: "not enough signatures", mut: func(t *test) { t.roles["root"].Threshold = 2 }, diff --git a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go b/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go deleted file mode 100644 index ed46ba2f2e..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package secretbox encrypts and authenticates small messages. - -Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with -secret-key cryptography. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html. -*/ -package secretbox - -import ( - "golang.org/x/crypto/poly1305" - "golang.org/x/crypto/salsa20/salsa" -) - -// Overhead is the number of bytes of overhead when boxing a message. -const Overhead = poly1305.TagSize - -// setup produces a sub-key and Salsa20 counter given a nonce and key. -func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { - // We use XSalsa20 for encryption so first we need to generate a - // key and nonce with HSalsa20. - var hNonce [16]byte - copy(hNonce[:], nonce[:]) - salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) - - // The final 8 bytes of the original nonce form the new nonce. - copy(counter[:], nonce[16:]) -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// must not overlap message. The key and nonce pair must be unique for each -// distinct message and the output will be Overhead bytes longer than message. -func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - - ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) - - // We XOR up to 32 bytes of message with the keystream generated from - // the first block. - firstMessageBlock := message - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - - tagOut := out - out = out[poly1305.TagSize:] - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - message = message[len(firstMessageBlock):] - ciphertext := out - out = out[len(firstMessageBlock):] - - // Now encrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, message, &counter, &subKey) - - var tag [poly1305.TagSize]byte - poly1305.Sum(&tag, ciphertext, &poly1305Key) - copy(tagOut, tag[:]) - - return ret -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { - if len(box) < Overhead { - return nil, false - } - - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - var tag [poly1305.TagSize]byte - copy(tag[:], box) - - if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { - return nil, false - } - - ret, out := sliceForAppend(out, len(box)-Overhead) - - // We XOR up to 32 bytes of box with the keystream generated from - // the first block. - box = box[Overhead:] - firstMessageBlock := box - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - - box = box[len(firstMessageBlock):] - out = out[len(firstMessageBlock):] - - // Now decrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, box, &counter, &subKey) - - return ret, true -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go b/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go deleted file mode 100644 index 664dc1521d..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package secretbox - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "testing" -) - -func TestSealOpen(t *testing.T) { - var key [32]byte - var nonce [24]byte - - rand.Reader.Read(key[:]) - rand.Reader.Read(nonce[:]) - - var box, opened []byte - - for msgLen := 0; msgLen < 128; msgLen += 17 { - message := make([]byte, msgLen) - rand.Reader.Read(message) - - box = Seal(box[:0], message, &nonce, &key) - var ok bool - opened, ok = Open(opened[:0], box, &nonce, &key) - if !ok { - t.Errorf("%d: failed to open box", msgLen) - continue - } - - if !bytes.Equal(opened, message) { - t.Errorf("%d: got %x, expected %x", msgLen, opened, message) - continue - } - } - - for i := range box { - box[i] ^= 0x20 - _, ok := Open(opened[:0], box, &nonce, &key) - if ok { - t.Errorf("box was opened after corrupting byte %d", i) - } - box[i] ^= 0x20 - } -} - -func TestSecretBox(t *testing.T) { - var key [32]byte - var nonce [24]byte - var message [64]byte - - for i := range key[:] { - key[i] = 1 - } - for i := range nonce[:] { - nonce[i] = 2 - } - for i := range message[:] { - message[i] = 3 - } - - box := Seal(nil, message[:], &nonce, &key) - // expected was generated using the C implementation of NaCl. - expected, _ := hex.DecodeString("8442bc313f4626f1359e3b50122b6ce6fe66ddfe7d39d14e637eb4fd5b45beadab55198df6ab5368439792a23c87db70acb6156dc5ef957ac04f6276cf6093b84be77ff0849cc33e34b7254d5a8f65ad") - - if !bytes.Equal(box, expected) { - t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected) - } -} - -func TestAppend(t *testing.T) { - var key [32]byte - var nonce [24]byte - var message [8]byte - - out := make([]byte, 4) - box := Seal(out, message[:], &nonce, &key) - if !bytes.Equal(box[:4], out[:4]) { - t.Fatalf("Seal didn't correctly append") - } - - out = make([]byte, 4, 100) - box = Seal(out, message[:], &nonce, &key) - if !bytes.Equal(box[:4], out[:4]) { - t.Fatalf("Seal didn't correctly append with sufficient capacity.") - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go b/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index c02b4d5a70..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go b/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go deleted file mode 100644 index 1379240610..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pbkdf2 - -import ( - "bytes" - "crypto/sha1" - "crypto/sha256" - "hash" - "testing" -) - -type testVector struct { - password string - salt string - iter int - output []byte -} - -// Test vectors from RFC 6070, http://tools.ietf.org/html/rfc6070 -var sha1TestVectors = []testVector{ - { - "password", - "salt", - 1, - []byte{ - 0x0c, 0x60, 0xc8, 0x0f, 0x96, 0x1f, 0x0e, 0x71, - 0xf3, 0xa9, 0xb5, 0x24, 0xaf, 0x60, 0x12, 0x06, - 0x2f, 0xe0, 0x37, 0xa6, - }, - }, - { - "password", - "salt", - 2, - []byte{ - 0xea, 0x6c, 0x01, 0x4d, 0xc7, 0x2d, 0x6f, 0x8c, - 0xcd, 0x1e, 0xd9, 0x2a, 0xce, 0x1d, 0x41, 0xf0, - 0xd8, 0xde, 0x89, 0x57, - }, - }, - { - "password", - "salt", - 4096, - []byte{ - 0x4b, 0x00, 0x79, 0x01, 0xb7, 0x65, 0x48, 0x9a, - 0xbe, 0xad, 0x49, 0xd9, 0x26, 0xf7, 0x21, 0xd0, - 0x65, 0xa4, 0x29, 0xc1, - }, - }, - // // This one takes too long - // { - // "password", - // "salt", - // 16777216, - // []byte{ - // 0xee, 0xfe, 0x3d, 0x61, 0xcd, 0x4d, 0xa4, 0xe4, - // 0xe9, 0x94, 0x5b, 0x3d, 0x6b, 0xa2, 0x15, 0x8c, - // 0x26, 0x34, 0xe9, 0x84, - // }, - // }, - { - "passwordPASSWORDpassword", - "saltSALTsaltSALTsaltSALTsaltSALTsalt", - 4096, - []byte{ - 0x3d, 0x2e, 0xec, 0x4f, 0xe4, 0x1c, 0x84, 0x9b, - 0x80, 0xc8, 0xd8, 0x36, 0x62, 0xc0, 0xe4, 0x4a, - 0x8b, 0x29, 0x1a, 0x96, 0x4c, 0xf2, 0xf0, 0x70, - 0x38, - }, - }, - { - "pass\000word", - "sa\000lt", - 4096, - []byte{ - 0x56, 0xfa, 0x6a, 0xa7, 0x55, 0x48, 0x09, 0x9d, - 0xcc, 0x37, 0xd7, 0xf0, 0x34, 0x25, 0xe0, 0xc3, - }, - }, -} - -// Test vectors from -// http://stackoverflow.com/questions/5130513/pbkdf2-hmac-sha2-test-vectors -var sha256TestVectors = []testVector{ - { - "password", - "salt", - 1, - []byte{ - 0x12, 0x0f, 0xb6, 0xcf, 0xfc, 0xf8, 0xb3, 0x2c, - 0x43, 0xe7, 0x22, 0x52, 0x56, 0xc4, 0xf8, 0x37, - 0xa8, 0x65, 0x48, 0xc9, - }, - }, - { - "password", - "salt", - 2, - []byte{ - 0xae, 0x4d, 0x0c, 0x95, 0xaf, 0x6b, 0x46, 0xd3, - 0x2d, 0x0a, 0xdf, 0xf9, 0x28, 0xf0, 0x6d, 0xd0, - 0x2a, 0x30, 0x3f, 0x8e, - }, - }, - { - "password", - "salt", - 4096, - []byte{ - 0xc5, 0xe4, 0x78, 0xd5, 0x92, 0x88, 0xc8, 0x41, - 0xaa, 0x53, 0x0d, 0xb6, 0x84, 0x5c, 0x4c, 0x8d, - 0x96, 0x28, 0x93, 0xa0, - }, - }, - { - "passwordPASSWORDpassword", - "saltSALTsaltSALTsaltSALTsaltSALTsalt", - 4096, - []byte{ - 0x34, 0x8c, 0x89, 0xdb, 0xcb, 0xd3, 0x2b, 0x2f, - 0x32, 0xd8, 0x14, 0xb8, 0x11, 0x6e, 0x84, 0xcf, - 0x2b, 0x17, 0x34, 0x7e, 0xbc, 0x18, 0x00, 0x18, - 0x1c, - }, - }, - { - "pass\000word", - "sa\000lt", - 4096, - []byte{ - 0x89, 0xb6, 0x9d, 0x05, 0x16, 0xf8, 0x29, 0x89, - 0x3c, 0x69, 0x62, 0x26, 0x65, 0x0a, 0x86, 0x87, - }, - }, -} - -func testHash(t *testing.T, h func() hash.Hash, hashName string, vectors []testVector) { - for i, v := range vectors { - o := Key([]byte(v.password), []byte(v.salt), v.iter, len(v.output), h) - if !bytes.Equal(o, v.output) { - t.Errorf("%s %d: expected %x, got %x", hashName, i, v.output, o) - } - } -} - -func TestWithHMACSHA1(t *testing.T) { - testHash(t, sha1.New, "SHA1", sha1TestVectors) -} - -func TestWithHMACSHA256(t *testing.T) { - testHash(t, sha256.New, "SHA256", sha256TestVectors) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s deleted file mode 100644 index 33fcd6ee1a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo - -DATA ·SCALE(SB)/8, $0x37F4000000000000 -GLOBL ·SCALE(SB), 8, $8 -DATA ·TWO32(SB)/8, $0x41F0000000000000 -GLOBL ·TWO32(SB), 8, $8 -DATA ·TWO64(SB)/8, $0x43F0000000000000 -GLOBL ·TWO64(SB), 8, $8 -DATA ·TWO96(SB)/8, $0x45F0000000000000 -GLOBL ·TWO96(SB), 8, $8 -DATA ·ALPHA32(SB)/8, $0x45E8000000000000 -GLOBL ·ALPHA32(SB), 8, $8 -DATA ·ALPHA64(SB)/8, $0x47E8000000000000 -GLOBL ·ALPHA64(SB), 8, $8 -DATA ·ALPHA96(SB)/8, $0x49E8000000000000 -GLOBL ·ALPHA96(SB), 8, $8 -DATA ·ALPHA130(SB)/8, $0x4C08000000000000 -GLOBL ·ALPHA130(SB), 8, $8 -DATA ·DOFFSET0(SB)/8, $0x4330000000000000 -GLOBL ·DOFFSET0(SB), 8, $8 -DATA ·DOFFSET1(SB)/8, $0x4530000000000000 -GLOBL ·DOFFSET1(SB), 8, $8 -DATA ·DOFFSET2(SB)/8, $0x4730000000000000 -GLOBL ·DOFFSET2(SB), 8, $8 -DATA ·DOFFSET3(SB)/8, $0x4930000000000000 -GLOBL ·DOFFSET3(SB), 8, $8 -DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000 -GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8 -DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB -GLOBL ·HOFFSET0(SB), 8, $8 -DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE -GLOBL ·HOFFSET1(SB), 8, $8 -DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE -GLOBL ·HOFFSET2(SB), 8, $8 -DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE -GLOBL ·HOFFSET3(SB), 8, $8 -DATA ·ROUNDING(SB)/2, $0x137f -GLOBL ·ROUNDING(SB), 8, $2 diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go deleted file mode 100644 index 2270d2b38a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf. - -Poly1305 is a fast, one-time authentication function. It is infeasible for an -attacker to generate an authenticator for a message without the key. However, a -key must only be used for a single message. Authenticating two different -messages with the same key allows an attacker to forge authenticators for other -messages with the same key. - -Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -used with a fixed key in order to generate one-time keys from an nonce. -However, in this package AES isn't used and the one-time key is specified -directly. -*/ -package poly1305 - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Verify returns true if mac is a valid authenticator for m with the given -// key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s deleted file mode 100644 index b9ad0ba436..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo - -// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305(SB),0,$224-32 - MOVQ out+0(FP),DI - MOVQ m+8(FP),SI - MOVQ mlen+16(FP),DX - MOVQ key+24(FP),CX - - MOVQ SP,R11 - MOVQ $31,R9 - NOTQ R9 - ANDQ R9,SP - ADDQ $32,SP - - MOVQ R11,32(SP) - MOVQ R12,40(SP) - MOVQ R13,48(SP) - MOVQ R14,56(SP) - MOVQ R15,64(SP) - MOVQ BX,72(SP) - MOVQ BP,80(SP) - FLDCW ·ROUNDING(SB) - MOVL 0(CX),R8 - MOVL 4(CX),R9 - MOVL 8(CX),AX - MOVL 12(CX),R10 - MOVQ DI,88(SP) - MOVQ CX,96(SP) - MOVL $0X43300000,108(SP) - MOVL $0X45300000,116(SP) - MOVL $0X47300000,124(SP) - MOVL $0X49300000,132(SP) - ANDL $0X0FFFFFFF,R8 - ANDL $0X0FFFFFFC,R9 - ANDL $0X0FFFFFFC,AX - ANDL $0X0FFFFFFC,R10 - MOVL R8,104(SP) - MOVL R9,112(SP) - MOVL AX,120(SP) - MOVL R10,128(SP) - FMOVD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FMOVD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FMOVD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FMOVD 128(SP), F0 - FSUBD ·DOFFSET3(SB), F0 - FXCHD F0, F3 - FMOVDP F0, 136(SP) - FXCHD F0, F1 - FMOVD F0, 144(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 152(SP) - FMOVD F0, 160(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 168(SP) - FMOVD F0, 176(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 184(SP) - FLDZ - FLDZ - FLDZ - FLDZ - CMPQ DX,$16 - JB ADDATMOST15BYTES - INITIALATLEAST16BYTES: - MOVL 12(SI),DI - MOVL 8(SI),CX - MOVL 4(SI),R8 - MOVL 0(SI),R9 - MOVL DI,128(SP) - MOVL CX,120(SP) - MOVL R8,112(SP) - MOVL R9,104(SP) - ADDQ $16,SI - SUBQ $16,DX - FXCHD F0, F3 - FADDD 128(SP), F0 - FSUBD ·DOFFSET3MINUSTWO128(SB), F0 - FXCHD F0, F1 - FADDD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FXCHD F0, F2 - FADDD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FXCHD F0, F3 - FADDD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - CMPQ DX,$16 - JB MULTIPLYADDATMOST15BYTES - MULTIPLYADDATLEAST16BYTES: - MOVL 12(SI),DI - MOVL 8(SI),CX - MOVL 4(SI),R8 - MOVL 0(SI),R9 - MOVL DI,128(SP) - MOVL CX,120(SP) - MOVL R8,112(SP) - MOVL R9,104(SP) - ADDQ $16,SI - SUBQ $16,DX - FMOVD ·ALPHA130(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F2 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FXCHD F0, F2 - FADDDP F0,F1 - FMOVD ·ALPHA64(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F4 - FMOVD ·ALPHA96(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F6 - FXCHD F0, F6 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FXCHD F0, F3 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F3,F0 - FMOVD 160(SP), F0 - FMULD F4,F0 - FMOVD 144(SP), F0 - FMULD F5,F0 - FMOVD 136(SP), F0 - FMULDP F0,F6 - FMOVD 160(SP), F0 - FMULD F4,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F4,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F4,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F4 - FXCHD F0, F3 - FADDDP F0,F5 - FMOVD 144(SP), F0 - FMULD F4,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F4,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F4,F0 - FADDDP F0,F3 - FMOVD 168(SP), F0 - FMULDP F0,F4 - FXCHD F0, F3 - FADDDP F0,F4 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FXCHD F0, F3 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FXCHD F0, F1 - FMOVD 168(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 152(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F1 - CMPQ DX,$16 - FXCHD F0, F2 - FMOVD 128(SP), F0 - FSUBD ·DOFFSET3MINUSTWO128(SB), F0 - FADDDP F0,F1 - FXCHD F0, F1 - FMOVD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FADDDP F0,F1 - FXCHD F0, F3 - FMOVD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FADDDP F0,F1 - FXCHD F0, F2 - FMOVD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FADDDP F0,F1 - JAE MULTIPLYADDATLEAST16BYTES - MULTIPLYADDATMOST15BYTES: - FMOVD ·ALPHA130(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F2 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F5,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F5 - FMOVD ·ALPHA96(SB), F0 - FADDD F7,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F7 - FXCHD F0, F7 - FADDDP F0,F1 - FXCHD F0, F5 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F1,F0 - FMOVD 160(SP), F0 - FMULD F2,F0 - FMOVD 144(SP), F0 - FMULD F3,F0 - FMOVD 136(SP), F0 - FMULDP F0,F4 - FMOVD 160(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 152(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F1 - ADDATMOST15BYTES: - CMPQ DX,$0 - JE NOMOREBYTES - MOVL $0,0(SP) - MOVL $0, 4 (SP) - MOVL $0, 8 (SP) - MOVL $0, 12 (SP) - LEAQ 0(SP),DI - MOVQ DX,CX - REP; MOVSB - MOVB $1,0(DI) - MOVL 12 (SP),DI - MOVL 8 (SP),SI - MOVL 4 (SP),DX - MOVL 0(SP),CX - MOVL DI,128(SP) - MOVL SI,120(SP) - MOVL DX,112(SP) - MOVL CX,104(SP) - FXCHD F0, F3 - FADDD 128(SP), F0 - FSUBD ·DOFFSET3(SB), F0 - FXCHD F0, F2 - FADDD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FXCHD F0, F1 - FADDD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FXCHD F0, F3 - FADDD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FMOVD ·ALPHA130(SB), F0 - FADDD F3,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F3 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F6 - FMOVD ·ALPHA96(SB), F0 - FADDD F5,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F5 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F6 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FXCHD F0, F3 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F3,F0 - FMOVD 160(SP), F0 - FMULD F4,F0 - FMOVD 144(SP), F0 - FMULD F5,F0 - FMOVD 136(SP), F0 - FMULDP F0,F6 - FMOVD 160(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F5 - FMOVD 144(SP), F0 - FMULD F6,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F6,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F6,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULDP F0,F6 - FXCHD F0, F5 - FADDDP F0,F4 - FMOVD 136(SP), F0 - FMULD F2,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F2,F0 - FADDDP F0,F5 - FMOVD 168(SP), F0 - FMULD F2,F0 - FADDDP F0,F3 - FMOVD 152(SP), F0 - FMULDP F0,F2 - FXCHD F0, F1 - FADDDP F0,F3 - FXCHD F0, F3 - FXCHD F0, F2 - NOMOREBYTES: - MOVL $0,R10 - FMOVD ·ALPHA130(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F4 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F4 - FMOVD ·ALPHA96(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA96(SB), F0 - FXCHD F0, F6 - FSUBD F6,F0 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F4 - FADDDP F0,F1 - FXCHD F0, F2 - FADDDP F0,F3 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F3 - FADDD ·HOFFSET0(SB), F0 - FXCHD F0, F3 - FADDD ·HOFFSET1(SB), F0 - FXCHD F0, F1 - FADDD ·HOFFSET2(SB), F0 - FXCHD F0, F2 - FADDD ·HOFFSET3(SB), F0 - FXCHD F0, F3 - FMOVDP F0, 104(SP) - FMOVDP F0, 112(SP) - FMOVDP F0, 120(SP) - FMOVDP F0, 128(SP) - MOVL 108(SP),DI - ANDL $63,DI - MOVL 116(SP),SI - ANDL $63,SI - MOVL 124(SP),DX - ANDL $63,DX - MOVL 132(SP),CX - ANDL $63,CX - MOVL 112(SP),R8 - ADDL DI,R8 - MOVQ R8,112(SP) - MOVL 120(SP),DI - ADCL SI,DI - MOVQ DI,120(SP) - MOVL 128(SP),DI - ADCL DX,DI - MOVQ DI,128(SP) - MOVL R10,DI - ADCL CX,DI - MOVQ DI,136(SP) - MOVQ $5,DI - MOVL 104(SP),SI - ADDL SI,DI - MOVQ DI,104(SP) - MOVL R10,DI - MOVQ 112(SP),DX - ADCL DX,DI - MOVQ DI,112(SP) - MOVL R10,DI - MOVQ 120(SP),CX - ADCL CX,DI - MOVQ DI,120(SP) - MOVL R10,DI - MOVQ 128(SP),R8 - ADCL R8,DI - MOVQ DI,128(SP) - MOVQ $0XFFFFFFFC,DI - MOVQ 136(SP),R9 - ADCL R9,DI - SARL $16,DI - MOVQ DI,R9 - XORL $0XFFFFFFFF,R9 - ANDQ DI,SI - MOVQ 104(SP),AX - ANDQ R9,AX - ORQ AX,SI - ANDQ DI,DX - MOVQ 112(SP),AX - ANDQ R9,AX - ORQ AX,DX - ANDQ DI,CX - MOVQ 120(SP),AX - ANDQ R9,AX - ORQ AX,CX - ANDQ DI,R8 - MOVQ 128(SP),DI - ANDQ R9,DI - ORQ DI,R8 - MOVQ 88(SP),DI - MOVQ 96(SP),R9 - ADDL 16(R9),SI - ADCL 20(R9),DX - ADCL 24(R9),CX - ADCL 28(R9),R8 - MOVL SI,0(DI) - MOVL DX,4(DI) - MOVL CX,8(DI) - MOVL R8,12(DI) - MOVQ 32(SP),R11 - MOVQ 40(SP),R12 - MOVQ 48(SP),R13 - MOVQ 56(SP),R14 - MOVQ 64(SP),R15 - MOVQ 72(SP),BX - MOVQ 80(SP),BP - MOVQ R11,SP - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go deleted file mode 100644 index 2c6d1bc986..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package poly1305 - -import ( - "bytes" - "testing" -) - -var testData = []struct { - in, k, correct []byte -}{ - { - []byte("Hello world!"), - []byte("this is 32-byte key for Poly1305"), - []byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0}, - }, - { - make([]byte, 32), - []byte("this is 32-byte key for Poly1305"), - []byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07}, - }, - { - make([]byte, 2007), - []byte("this is 32-byte key for Poly1305"), - []byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa}, - }, - { - make([]byte, 2007), - make([]byte, 32), - make([]byte, 16), - }, -} - -func TestSum(t *testing.T) { - var out [16]byte - var key [32]byte - - for i, v := range testData { - copy(key[:], v.k) - Sum(&out, v.in, &key) - if !bytes.Equal(out[:], v.correct) { - t.Errorf("%d: expected %x, got %x", i, v.correct, out[:]) - } - } -} - -func Benchmark1K(b *testing.B) { - b.StopTimer() - var out [16]byte - var key [32]byte - in := make([]byte, 1024) - b.SetBytes(int64(len(in))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sum(&out, in, &key) - } -} - -func Benchmark64(b *testing.B) { - b.StopTimer() - var out [16]byte - var key [32]byte - in := make([]byte, 64) - b.SetBytes(int64(len(in))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sum(&out, in, &key) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go deleted file mode 100644 index eb22ca1548..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo - -package poly1305 - -// This function is implemented in poly1305_amd64.s - -//go:noescape - -func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - poly1305(out, mPtr, uint64(len(m)), key) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go deleted file mode 100644 index 12568a2f64..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go +++ /dev/null @@ -1,1531 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 gccgo - -package poly1305 - -// Based on original, public domain implementation from NaCl by D. J. -// Bernstein. - -import "math" - -const ( - alpham80 = 0.00000000558793544769287109375 - alpham48 = 24.0 - alpham16 = 103079215104.0 - alpha0 = 6755399441055744.0 - alpha18 = 1770887431076116955136.0 - alpha32 = 29014219670751100192948224.0 - alpha50 = 7605903601369376408980219232256.0 - alpha64 = 124615124604835863084731911901282304.0 - alpha82 = 32667107224410092492483962313449748299776.0 - alpha96 = 535217884764734955396857238543560676143529984.0 - alpha112 = 35076039295941670036888435985190792471742381031424.0 - alpha130 = 9194973245195333150150082162901855101712434733101613056.0 - scale = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125 - offset0 = 6755408030990331.0 - offset1 = 29014256564239239022116864.0 - offset2 = 124615283061160854719918951570079744.0 - offset3 = 535219245894202480694386063513315216128475136.0 -) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - r := key - s := key[16:] - var ( - y7 float64 - y6 float64 - y1 float64 - y0 float64 - y5 float64 - y4 float64 - x7 float64 - x6 float64 - x1 float64 - x0 float64 - y3 float64 - y2 float64 - x5 float64 - r3lowx0 float64 - x4 float64 - r0lowx6 float64 - x3 float64 - r3highx0 float64 - x2 float64 - r0highx6 float64 - r0lowx0 float64 - sr1lowx6 float64 - r0highx0 float64 - sr1highx6 float64 - sr3low float64 - r1lowx0 float64 - sr2lowx6 float64 - r1highx0 float64 - sr2highx6 float64 - r2lowx0 float64 - sr3lowx6 float64 - r2highx0 float64 - sr3highx6 float64 - r1highx4 float64 - r1lowx4 float64 - r0highx4 float64 - r0lowx4 float64 - sr3highx4 float64 - sr3lowx4 float64 - sr2highx4 float64 - sr2lowx4 float64 - r0lowx2 float64 - r0highx2 float64 - r1lowx2 float64 - r1highx2 float64 - r2lowx2 float64 - r2highx2 float64 - sr3lowx2 float64 - sr3highx2 float64 - z0 float64 - z1 float64 - z2 float64 - z3 float64 - m0 int64 - m1 int64 - m2 int64 - m3 int64 - m00 uint32 - m01 uint32 - m02 uint32 - m03 uint32 - m10 uint32 - m11 uint32 - m12 uint32 - m13 uint32 - m20 uint32 - m21 uint32 - m22 uint32 - m23 uint32 - m30 uint32 - m31 uint32 - m32 uint32 - m33 uint64 - lbelow2 int32 - lbelow3 int32 - lbelow4 int32 - lbelow5 int32 - lbelow6 int32 - lbelow7 int32 - lbelow8 int32 - lbelow9 int32 - lbelow10 int32 - lbelow11 int32 - lbelow12 int32 - lbelow13 int32 - lbelow14 int32 - lbelow15 int32 - s00 uint32 - s01 uint32 - s02 uint32 - s03 uint32 - s10 uint32 - s11 uint32 - s12 uint32 - s13 uint32 - s20 uint32 - s21 uint32 - s22 uint32 - s23 uint32 - s30 uint32 - s31 uint32 - s32 uint32 - s33 uint32 - bits32 uint64 - f uint64 - f0 uint64 - f1 uint64 - f2 uint64 - f3 uint64 - f4 uint64 - g uint64 - g0 uint64 - g1 uint64 - g2 uint64 - g3 uint64 - g4 uint64 - ) - - var p int32 - - l := int32(len(m)) - - r00 := uint32(r[0]) - - r01 := uint32(r[1]) - - r02 := uint32(r[2]) - r0 := int64(2151) - - r03 := uint32(r[3]) - r03 &= 15 - r0 <<= 51 - - r10 := uint32(r[4]) - r10 &= 252 - r01 <<= 8 - r0 += int64(r00) - - r11 := uint32(r[5]) - r02 <<= 16 - r0 += int64(r01) - - r12 := uint32(r[6]) - r03 <<= 24 - r0 += int64(r02) - - r13 := uint32(r[7]) - r13 &= 15 - r1 := int64(2215) - r0 += int64(r03) - - d0 := r0 - r1 <<= 51 - r2 := int64(2279) - - r20 := uint32(r[8]) - r20 &= 252 - r11 <<= 8 - r1 += int64(r10) - - r21 := uint32(r[9]) - r12 <<= 16 - r1 += int64(r11) - - r22 := uint32(r[10]) - r13 <<= 24 - r1 += int64(r12) - - r23 := uint32(r[11]) - r23 &= 15 - r2 <<= 51 - r1 += int64(r13) - - d1 := r1 - r21 <<= 8 - r2 += int64(r20) - - r30 := uint32(r[12]) - r30 &= 252 - r22 <<= 16 - r2 += int64(r21) - - r31 := uint32(r[13]) - r23 <<= 24 - r2 += int64(r22) - - r32 := uint32(r[14]) - r2 += int64(r23) - r3 := int64(2343) - - d2 := r2 - r3 <<= 51 - - r33 := uint32(r[15]) - r33 &= 15 - r31 <<= 8 - r3 += int64(r30) - - r32 <<= 16 - r3 += int64(r31) - - r33 <<= 24 - r3 += int64(r32) - - r3 += int64(r33) - h0 := alpha32 - alpha32 - - d3 := r3 - h1 := alpha32 - alpha32 - - h2 := alpha32 - alpha32 - - h3 := alpha32 - alpha32 - - h4 := alpha32 - alpha32 - - r0low := math.Float64frombits(uint64(d0)) - h5 := alpha32 - alpha32 - - r1low := math.Float64frombits(uint64(d1)) - h6 := alpha32 - alpha32 - - r2low := math.Float64frombits(uint64(d2)) - h7 := alpha32 - alpha32 - - r0low -= alpha0 - - r1low -= alpha32 - - r2low -= alpha64 - - r0high := r0low + alpha18 - - r3low := math.Float64frombits(uint64(d3)) - - r1high := r1low + alpha50 - sr1low := scale * r1low - - r2high := r2low + alpha82 - sr2low := scale * r2low - - r0high -= alpha18 - r0high_stack := r0high - - r3low -= alpha96 - - r1high -= alpha50 - r1high_stack := r1high - - sr1high := sr1low + alpham80 - - r0low -= r0high - - r2high -= alpha82 - sr3low = scale * r3low - - sr2high := sr2low + alpham48 - - r1low -= r1high - r1low_stack := r1low - - sr1high -= alpham80 - sr1high_stack := sr1high - - r2low -= r2high - r2low_stack := r2low - - sr2high -= alpham48 - sr2high_stack := sr2high - - r3high := r3low + alpha112 - r0low_stack := r0low - - sr1low -= sr1high - sr1low_stack := sr1low - - sr3high := sr3low + alpham16 - r2high_stack := r2high - - sr2low -= sr2high - sr2low_stack := sr2low - - r3high -= alpha112 - r3high_stack := r3high - - sr3high -= alpham16 - sr3high_stack := sr3high - - r3low -= r3high - r3low_stack := r3low - - sr3low -= sr3high - sr3low_stack := sr3low - - if l < 16 { - goto addatmost15bytes - } - - m00 = uint32(m[p+0]) - m0 = 2151 - - m0 <<= 51 - m1 = 2215 - m01 = uint32(m[p+1]) - - m1 <<= 51 - m2 = 2279 - m02 = uint32(m[p+2]) - - m2 <<= 51 - m3 = 2343 - m03 = uint32(m[p+3]) - - m10 = uint32(m[p+4]) - m01 <<= 8 - m0 += int64(m00) - - m11 = uint32(m[p+5]) - m02 <<= 16 - m0 += int64(m01) - - m12 = uint32(m[p+6]) - m03 <<= 24 - m0 += int64(m02) - - m13 = uint32(m[p+7]) - m3 <<= 51 - m0 += int64(m03) - - m20 = uint32(m[p+8]) - m11 <<= 8 - m1 += int64(m10) - - m21 = uint32(m[p+9]) - m12 <<= 16 - m1 += int64(m11) - - m22 = uint32(m[p+10]) - m13 <<= 24 - m1 += int64(m12) - - m23 = uint32(m[p+11]) - m1 += int64(m13) - - m30 = uint32(m[p+12]) - m21 <<= 8 - m2 += int64(m20) - - m31 = uint32(m[p+13]) - m22 <<= 16 - m2 += int64(m21) - - m32 = uint32(m[p+14]) - m23 <<= 24 - m2 += int64(m22) - - m33 = uint64(m[p+15]) - m2 += int64(m23) - - d0 = m0 - m31 <<= 8 - m3 += int64(m30) - - d1 = m1 - m32 <<= 16 - m3 += int64(m31) - - d2 = m2 - m33 += 256 - - m33 <<= 24 - m3 += int64(m32) - - m3 += int64(m33) - d3 = m3 - - p += 16 - l -= 16 - - z0 = math.Float64frombits(uint64(d0)) - - z1 = math.Float64frombits(uint64(d1)) - - z2 = math.Float64frombits(uint64(d2)) - - z3 = math.Float64frombits(uint64(d3)) - - z0 -= alpha0 - - z1 -= alpha32 - - z2 -= alpha64 - - z3 -= alpha96 - - h0 += z0 - - h1 += z1 - - h3 += z2 - - h5 += z3 - - if l < 16 { - goto multiplyaddatmost15bytes - } - -multiplyaddatleast16bytes: - - m2 = 2279 - m20 = uint32(m[p+8]) - y7 = h7 + alpha130 - - m2 <<= 51 - m3 = 2343 - m21 = uint32(m[p+9]) - y6 = h6 + alpha130 - - m3 <<= 51 - m0 = 2151 - m22 = uint32(m[p+10]) - y1 = h1 + alpha32 - - m0 <<= 51 - m1 = 2215 - m23 = uint32(m[p+11]) - y0 = h0 + alpha32 - - m1 <<= 51 - m30 = uint32(m[p+12]) - y7 -= alpha130 - - m21 <<= 8 - m2 += int64(m20) - m31 = uint32(m[p+13]) - y6 -= alpha130 - - m22 <<= 16 - m2 += int64(m21) - m32 = uint32(m[p+14]) - y1 -= alpha32 - - m23 <<= 24 - m2 += int64(m22) - m33 = uint64(m[p+15]) - y0 -= alpha32 - - m2 += int64(m23) - m00 = uint32(m[p+0]) - y5 = h5 + alpha96 - - m31 <<= 8 - m3 += int64(m30) - m01 = uint32(m[p+1]) - y4 = h4 + alpha96 - - m32 <<= 16 - m02 = uint32(m[p+2]) - x7 = h7 - y7 - y7 *= scale - - m33 += 256 - m03 = uint32(m[p+3]) - x6 = h6 - y6 - y6 *= scale - - m33 <<= 24 - m3 += int64(m31) - m10 = uint32(m[p+4]) - x1 = h1 - y1 - - m01 <<= 8 - m3 += int64(m32) - m11 = uint32(m[p+5]) - x0 = h0 - y0 - - m3 += int64(m33) - m0 += int64(m00) - m12 = uint32(m[p+6]) - y5 -= alpha96 - - m02 <<= 16 - m0 += int64(m01) - m13 = uint32(m[p+7]) - y4 -= alpha96 - - m03 <<= 24 - m0 += int64(m02) - d2 = m2 - x1 += y7 - - m0 += int64(m03) - d3 = m3 - x0 += y6 - - m11 <<= 8 - m1 += int64(m10) - d0 = m0 - x7 += y5 - - m12 <<= 16 - m1 += int64(m11) - x6 += y4 - - m13 <<= 24 - m1 += int64(m12) - y3 = h3 + alpha64 - - m1 += int64(m13) - d1 = m1 - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - z2 = math.Float64frombits(uint64(d2)) - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - z3 = math.Float64frombits(uint64(d3)) - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - z2 -= alpha64 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - z3 -= alpha96 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - p += 16 - l -= 16 - h6 += r2lowx2 - - h7 += r2highx2 - - z1 = math.Float64frombits(uint64(d1)) - h0 += sr3lowx2 - - z0 = math.Float64frombits(uint64(d0)) - h1 += sr3highx2 - - z1 -= alpha32 - - z0 -= alpha0 - - h5 += z3 - - h3 += z2 - - h1 += z1 - - h0 += z0 - - if l >= 16 { - goto multiplyaddatleast16bytes - } - -multiplyaddatmost15bytes: - - y7 = h7 + alpha130 - - y6 = h6 + alpha130 - - y1 = h1 + alpha32 - - y0 = h0 + alpha32 - - y7 -= alpha130 - - y6 -= alpha130 - - y1 -= alpha32 - - y0 -= alpha32 - - y5 = h5 + alpha96 - - y4 = h4 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - x6 = h6 - y6 - y6 *= scale - - x1 = h1 - y1 - - x0 = h0 - y0 - - y5 -= alpha96 - - y4 -= alpha96 - - x1 += y7 - - x0 += y6 - - x7 += y5 - - x6 += y4 - - y3 = h3 + alpha64 - - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - h6 += r2lowx2 - - h7 += r2highx2 - - h0 += sr3lowx2 - - h1 += sr3highx2 - -addatmost15bytes: - - if l == 0 { - goto nomorebytes - } - - lbelow2 = l - 2 - - lbelow3 = l - 3 - - lbelow2 >>= 31 - lbelow4 = l - 4 - - m00 = uint32(m[p+0]) - lbelow3 >>= 31 - p += lbelow2 - - m01 = uint32(m[p+1]) - lbelow4 >>= 31 - p += lbelow3 - - m02 = uint32(m[p+2]) - p += lbelow4 - m0 = 2151 - - m03 = uint32(m[p+3]) - m0 <<= 51 - m1 = 2215 - - m0 += int64(m00) - m01 &^= uint32(lbelow2) - - m02 &^= uint32(lbelow3) - m01 -= uint32(lbelow2) - - m01 <<= 8 - m03 &^= uint32(lbelow4) - - m0 += int64(m01) - lbelow2 -= lbelow3 - - m02 += uint32(lbelow2) - lbelow3 -= lbelow4 - - m02 <<= 16 - m03 += uint32(lbelow3) - - m03 <<= 24 - m0 += int64(m02) - - m0 += int64(m03) - lbelow5 = l - 5 - - lbelow6 = l - 6 - lbelow7 = l - 7 - - lbelow5 >>= 31 - lbelow8 = l - 8 - - lbelow6 >>= 31 - p += lbelow5 - - m10 = uint32(m[p+4]) - lbelow7 >>= 31 - p += lbelow6 - - m11 = uint32(m[p+5]) - lbelow8 >>= 31 - p += lbelow7 - - m12 = uint32(m[p+6]) - m1 <<= 51 - p += lbelow8 - - m13 = uint32(m[p+7]) - m10 &^= uint32(lbelow5) - lbelow4 -= lbelow5 - - m10 += uint32(lbelow4) - lbelow5 -= lbelow6 - - m11 &^= uint32(lbelow6) - m11 += uint32(lbelow5) - - m11 <<= 8 - m1 += int64(m10) - - m1 += int64(m11) - m12 &^= uint32(lbelow7) - - lbelow6 -= lbelow7 - m13 &^= uint32(lbelow8) - - m12 += uint32(lbelow6) - lbelow7 -= lbelow8 - - m12 <<= 16 - m13 += uint32(lbelow7) - - m13 <<= 24 - m1 += int64(m12) - - m1 += int64(m13) - m2 = 2279 - - lbelow9 = l - 9 - m3 = 2343 - - lbelow10 = l - 10 - lbelow11 = l - 11 - - lbelow9 >>= 31 - lbelow12 = l - 12 - - lbelow10 >>= 31 - p += lbelow9 - - m20 = uint32(m[p+8]) - lbelow11 >>= 31 - p += lbelow10 - - m21 = uint32(m[p+9]) - lbelow12 >>= 31 - p += lbelow11 - - m22 = uint32(m[p+10]) - m2 <<= 51 - p += lbelow12 - - m23 = uint32(m[p+11]) - m20 &^= uint32(lbelow9) - lbelow8 -= lbelow9 - - m20 += uint32(lbelow8) - lbelow9 -= lbelow10 - - m21 &^= uint32(lbelow10) - m21 += uint32(lbelow9) - - m21 <<= 8 - m2 += int64(m20) - - m2 += int64(m21) - m22 &^= uint32(lbelow11) - - lbelow10 -= lbelow11 - m23 &^= uint32(lbelow12) - - m22 += uint32(lbelow10) - lbelow11 -= lbelow12 - - m22 <<= 16 - m23 += uint32(lbelow11) - - m23 <<= 24 - m2 += int64(m22) - - m3 <<= 51 - lbelow13 = l - 13 - - lbelow13 >>= 31 - lbelow14 = l - 14 - - lbelow14 >>= 31 - p += lbelow13 - lbelow15 = l - 15 - - m30 = uint32(m[p+12]) - lbelow15 >>= 31 - p += lbelow14 - - m31 = uint32(m[p+13]) - p += lbelow15 - m2 += int64(m23) - - m32 = uint32(m[p+14]) - m30 &^= uint32(lbelow13) - lbelow12 -= lbelow13 - - m30 += uint32(lbelow12) - lbelow13 -= lbelow14 - - m3 += int64(m30) - m31 &^= uint32(lbelow14) - - m31 += uint32(lbelow13) - m32 &^= uint32(lbelow15) - - m31 <<= 8 - lbelow14 -= lbelow15 - - m3 += int64(m31) - m32 += uint32(lbelow14) - d0 = m0 - - m32 <<= 16 - m33 = uint64(lbelow15 + 1) - d1 = m1 - - m33 <<= 24 - m3 += int64(m32) - d2 = m2 - - m3 += int64(m33) - d3 = m3 - - z3 = math.Float64frombits(uint64(d3)) - - z2 = math.Float64frombits(uint64(d2)) - - z1 = math.Float64frombits(uint64(d1)) - - z0 = math.Float64frombits(uint64(d0)) - - z3 -= alpha96 - - z2 -= alpha64 - - z1 -= alpha32 - - z0 -= alpha0 - - h5 += z3 - - h3 += z2 - - h1 += z1 - - h0 += z0 - - y7 = h7 + alpha130 - - y6 = h6 + alpha130 - - y1 = h1 + alpha32 - - y0 = h0 + alpha32 - - y7 -= alpha130 - - y6 -= alpha130 - - y1 -= alpha32 - - y0 -= alpha32 - - y5 = h5 + alpha96 - - y4 = h4 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - x6 = h6 - y6 - y6 *= scale - - x1 = h1 - y1 - - x0 = h0 - y0 - - y5 -= alpha96 - - y4 -= alpha96 - - x1 += y7 - - x0 += y6 - - x7 += y5 - - x6 += y4 - - y3 = h3 + alpha64 - - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - h6 += r2lowx2 - - h7 += r2highx2 - - h0 += sr3lowx2 - - h1 += sr3highx2 - -nomorebytes: - - y7 = h7 + alpha130 - - y0 = h0 + alpha32 - - y1 = h1 + alpha32 - - y2 = h2 + alpha64 - - y7 -= alpha130 - - y3 = h3 + alpha64 - - y4 = h4 + alpha96 - - y5 = h5 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - y0 -= alpha32 - - y1 -= alpha32 - - y2 -= alpha64 - - h6 += x7 - - y3 -= alpha64 - - y4 -= alpha96 - - y5 -= alpha96 - - y6 = h6 + alpha130 - - x0 = h0 - y0 - - x1 = h1 - y1 - - x2 = h2 - y2 - - y6 -= alpha130 - - x0 += y7 - - x3 = h3 - y3 - - x4 = h4 - y4 - - x5 = h5 - y5 - - x6 = h6 - y6 - - y6 *= scale - - x2 += y0 - - x3 += y1 - - x4 += y2 - - x0 += y6 - - x5 += y3 - - x6 += y4 - - x2 += x3 - - x0 += x1 - - x4 += x5 - - x6 += y5 - - x2 += offset1 - d1 = int64(math.Float64bits(x2)) - - x0 += offset0 - d0 = int64(math.Float64bits(x0)) - - x4 += offset2 - d2 = int64(math.Float64bits(x4)) - - x6 += offset3 - d3 = int64(math.Float64bits(x6)) - - f0 = uint64(d0) - - f1 = uint64(d1) - bits32 = math.MaxUint64 - - f2 = uint64(d2) - bits32 >>= 32 - - f3 = uint64(d3) - f = f0 >> 32 - - f0 &= bits32 - f &= 255 - - f1 += f - g0 = f0 + 5 - - g = g0 >> 32 - g0 &= bits32 - - f = f1 >> 32 - f1 &= bits32 - - f &= 255 - g1 = f1 + g - - g = g1 >> 32 - f2 += f - - f = f2 >> 32 - g1 &= bits32 - - f2 &= bits32 - f &= 255 - - f3 += f - g2 = f2 + g - - g = g2 >> 32 - g2 &= bits32 - - f4 = f3 >> 32 - f3 &= bits32 - - f4 &= 255 - g3 = f3 + g - - g = g3 >> 32 - g3 &= bits32 - - g4 = f4 + g - - g4 = g4 - 4 - s00 = uint32(s[0]) - - f = uint64(int64(g4) >> 63) - s01 = uint32(s[1]) - - f0 &= f - g0 &^= f - s02 = uint32(s[2]) - - f1 &= f - f0 |= g0 - s03 = uint32(s[3]) - - g1 &^= f - f2 &= f - s10 = uint32(s[4]) - - f3 &= f - g2 &^= f - s11 = uint32(s[5]) - - g3 &^= f - f1 |= g1 - s12 = uint32(s[6]) - - f2 |= g2 - f3 |= g3 - s13 = uint32(s[7]) - - s01 <<= 8 - f0 += uint64(s00) - s20 = uint32(s[8]) - - s02 <<= 16 - f0 += uint64(s01) - s21 = uint32(s[9]) - - s03 <<= 24 - f0 += uint64(s02) - s22 = uint32(s[10]) - - s11 <<= 8 - f1 += uint64(s10) - s23 = uint32(s[11]) - - s12 <<= 16 - f1 += uint64(s11) - s30 = uint32(s[12]) - - s13 <<= 24 - f1 += uint64(s12) - s31 = uint32(s[13]) - - f0 += uint64(s03) - f1 += uint64(s13) - s32 = uint32(s[14]) - - s21 <<= 8 - f2 += uint64(s20) - s33 = uint32(s[15]) - - s22 <<= 16 - f2 += uint64(s21) - - s23 <<= 24 - f2 += uint64(s22) - - s31 <<= 8 - f3 += uint64(s30) - - s32 <<= 16 - f3 += uint64(s31) - - s33 <<= 24 - f3 += uint64(s32) - - f2 += uint64(s23) - f3 += uint64(s33) - - out[0] = byte(f0) - f0 >>= 8 - out[1] = byte(f0) - f0 >>= 8 - out[2] = byte(f0) - f0 >>= 8 - out[3] = byte(f0) - f0 >>= 8 - f1 += f0 - - out[4] = byte(f1) - f1 >>= 8 - out[5] = byte(f1) - f1 >>= 8 - out[6] = byte(f1) - f1 >>= 8 - out[7] = byte(f1) - f1 >>= 8 - f2 += f1 - - out[8] = byte(f2) - f2 >>= 8 - out[9] = byte(f2) - f2 >>= 8 - out[10] = byte(f2) - f2 >>= 8 - out[11] = byte(f2) - f2 >>= 8 - f3 += f2 - - out[12] = byte(f3) - f3 >>= 8 - out[13] = byte(f3) - f3 >>= 8 - out[14] = byte(f3) - f3 >>= 8 - out[15] = byte(f3) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go deleted file mode 100644 index 4ba47d5913..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package salsa provides low-level access to functions in the Salsa family. -package salsa - -// Sigma is the Salsa20 constant for 256-bit keys. -var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} - -// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte -// key k, and 16-byte constant c, and puts the result into the 32-byte array -// out. -func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - for i := 0; i < 20; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x5) - out[5] = byte(x5 >> 8) - out[6] = byte(x5 >> 16) - out[7] = byte(x5 >> 24) - - out[8] = byte(x10) - out[9] = byte(x10 >> 8) - out[10] = byte(x10 >> 16) - out[11] = byte(x10 >> 24) - - out[12] = byte(x15) - out[13] = byte(x15 >> 8) - out[14] = byte(x15 >> 16) - out[15] = byte(x15 >> 24) - - out[16] = byte(x6) - out[17] = byte(x6 >> 8) - out[18] = byte(x6 >> 16) - out[19] = byte(x6 >> 24) - - out[20] = byte(x7) - out[21] = byte(x7 >> 8) - out[22] = byte(x7 >> 16) - out[23] = byte(x7 >> 24) - - out[24] = byte(x8) - out[25] = byte(x8 >> 8) - out[26] = byte(x8 >> 16) - out[27] = byte(x8 >> 24) - - out[28] = byte(x9) - out[29] = byte(x9 >> 8) - out[30] = byte(x9 >> 16) - out[31] = byte(x9 >> 24) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s deleted file mode 100644 index 6e1df96391..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s +++ /dev/null @@ -1,902 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!appengine,!gccgo - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -TEXT ·salsa2020XORKeyStream(SB),0,$512-40 - MOVQ out+0(FP),DI - MOVQ in+8(FP),SI - MOVQ n+16(FP),DX - MOVQ nonce+24(FP),CX - MOVQ key+32(FP),R8 - - MOVQ SP,R11 - MOVQ $31,R9 - NOTQ R9 - ANDQ R9,SP - ADDQ $32,SP - - MOVQ R11,352(SP) - MOVQ R12,360(SP) - MOVQ R13,368(SP) - MOVQ R14,376(SP) - MOVQ R15,384(SP) - MOVQ BX,392(SP) - MOVQ BP,400(SP) - MOVQ DX,R9 - MOVQ CX,DX - MOVQ R8,R10 - CMPQ R9,$0 - JBE DONE - START: - MOVL 20(R10),CX - MOVL 0(R10),R8 - MOVL 0(DX),AX - MOVL 16(R10),R11 - MOVL CX,0(SP) - MOVL R8, 4 (SP) - MOVL AX, 8 (SP) - MOVL R11, 12 (SP) - MOVL 8(DX),CX - MOVL 24(R10),R8 - MOVL 4(R10),AX - MOVL 4(DX),R11 - MOVL CX,16(SP) - MOVL R8, 20 (SP) - MOVL AX, 24 (SP) - MOVL R11, 28 (SP) - MOVL 12(DX),CX - MOVL 12(R10),DX - MOVL 28(R10),R8 - MOVL 8(R10),AX - MOVL DX,32(SP) - MOVL CX, 36 (SP) - MOVL R8, 40 (SP) - MOVL AX, 44 (SP) - MOVQ $1634760805,DX - MOVQ $857760878,CX - MOVQ $2036477234,R8 - MOVQ $1797285236,AX - MOVL DX,48(SP) - MOVL CX, 52 (SP) - MOVL R8, 56 (SP) - MOVL AX, 60 (SP) - CMPQ R9,$256 - JB BYTESBETWEEN1AND255 - MOVOA 48(SP),X0 - PSHUFL $0X55,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X3 - PSHUFL $0X00,X0,X0 - MOVOA X1,64(SP) - MOVOA X2,80(SP) - MOVOA X3,96(SP) - MOVOA X0,112(SP) - MOVOA 0(SP),X0 - PSHUFL $0XAA,X0,X1 - PSHUFL $0XFF,X0,X2 - PSHUFL $0X00,X0,X3 - PSHUFL $0X55,X0,X0 - MOVOA X1,128(SP) - MOVOA X2,144(SP) - MOVOA X3,160(SP) - MOVOA X0,176(SP) - MOVOA 16(SP),X0 - PSHUFL $0XFF,X0,X1 - PSHUFL $0X55,X0,X2 - PSHUFL $0XAA,X0,X0 - MOVOA X1,192(SP) - MOVOA X2,208(SP) - MOVOA X0,224(SP) - MOVOA 32(SP),X0 - PSHUFL $0X00,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X0 - MOVOA X1,240(SP) - MOVOA X2,256(SP) - MOVOA X0,272(SP) - BYTESATLEAST256: - MOVL 16(SP),DX - MOVL 36 (SP),CX - MOVL DX,288(SP) - MOVL CX,304(SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 292 (SP) - MOVL CX, 308 (SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 296 (SP) - MOVL CX, 312 (SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 300 (SP) - MOVL CX, 316 (SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX,16(SP) - MOVL CX, 36 (SP) - MOVQ R9,408(SP) - MOVQ $20,DX - MOVOA 64(SP),X0 - MOVOA 80(SP),X1 - MOVOA 96(SP),X2 - MOVOA 256(SP),X3 - MOVOA 272(SP),X4 - MOVOA 128(SP),X5 - MOVOA 144(SP),X6 - MOVOA 176(SP),X7 - MOVOA 192(SP),X8 - MOVOA 208(SP),X9 - MOVOA 224(SP),X10 - MOVOA 304(SP),X11 - MOVOA 112(SP),X12 - MOVOA 160(SP),X13 - MOVOA 240(SP),X14 - MOVOA 288(SP),X15 - MAINLOOP1: - MOVOA X1,320(SP) - MOVOA X2,336(SP) - MOVOA X13,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X14 - PSRLL $25,X2 - PXOR X2,X14 - MOVOA X7,X1 - PADDL X0,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X11 - PSRLL $25,X2 - PXOR X2,X11 - MOVOA X12,X1 - PADDL X14,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X15 - PSRLL $23,X2 - PXOR X2,X15 - MOVOA X0,X1 - PADDL X11,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X9 - PSRLL $23,X2 - PXOR X2,X9 - MOVOA X14,X1 - PADDL X15,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X13 - PSRLL $19,X2 - PXOR X2,X13 - MOVOA X11,X1 - PADDL X9,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X7 - PSRLL $19,X2 - PXOR X2,X7 - MOVOA X15,X1 - PADDL X13,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA 320(SP),X1 - MOVOA X12,320(SP) - MOVOA X9,X2 - PADDL X7,X2 - MOVOA X2,X12 - PSLLL $18,X2 - PXOR X2,X0 - PSRLL $14,X12 - PXOR X12,X0 - MOVOA X5,X2 - PADDL X1,X2 - MOVOA X2,X12 - PSLLL $7,X2 - PXOR X2,X3 - PSRLL $25,X12 - PXOR X12,X3 - MOVOA 336(SP),X2 - MOVOA X0,336(SP) - MOVOA X6,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X4 - PSRLL $25,X12 - PXOR X12,X4 - MOVOA X1,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X10 - PSRLL $23,X12 - PXOR X12,X10 - MOVOA X2,X0 - PADDL X4,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X8 - PSRLL $23,X12 - PXOR X12,X8 - MOVOA X3,X0 - PADDL X10,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X5 - PSRLL $19,X12 - PXOR X12,X5 - MOVOA X4,X0 - PADDL X8,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X6 - PSRLL $19,X12 - PXOR X12,X6 - MOVOA X10,X0 - PADDL X5,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA 320(SP),X0 - MOVOA X1,320(SP) - MOVOA X4,X1 - PADDL X0,X1 - MOVOA X1,X12 - PSLLL $7,X1 - PXOR X1,X7 - PSRLL $25,X12 - PXOR X12,X7 - MOVOA X8,X1 - PADDL X6,X1 - MOVOA X1,X12 - PSLLL $18,X1 - PXOR X1,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 336(SP),X12 - MOVOA X2,336(SP) - MOVOA X14,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X5 - PSRLL $25,X2 - PXOR X2,X5 - MOVOA X0,X1 - PADDL X7,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X10 - PSRLL $23,X2 - PXOR X2,X10 - MOVOA X12,X1 - PADDL X5,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X8 - PSRLL $23,X2 - PXOR X2,X8 - MOVOA X7,X1 - PADDL X10,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X4 - PSRLL $19,X2 - PXOR X2,X4 - MOVOA X5,X1 - PADDL X8,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X14 - PSRLL $19,X2 - PXOR X2,X14 - MOVOA X10,X1 - PADDL X4,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X0 - PSRLL $14,X2 - PXOR X2,X0 - MOVOA 320(SP),X1 - MOVOA X0,320(SP) - MOVOA X8,X0 - PADDL X14,X0 - MOVOA X0,X2 - PSLLL $18,X0 - PXOR X0,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA X11,X0 - PADDL X1,X0 - MOVOA X0,X2 - PSLLL $7,X0 - PXOR X0,X6 - PSRLL $25,X2 - PXOR X2,X6 - MOVOA 336(SP),X2 - MOVOA X12,336(SP) - MOVOA X3,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X13 - PSRLL $25,X12 - PXOR X12,X13 - MOVOA X1,X0 - PADDL X6,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X15 - PSRLL $23,X12 - PXOR X12,X15 - MOVOA X2,X0 - PADDL X13,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X9 - PSRLL $23,X12 - PXOR X12,X9 - MOVOA X6,X0 - PADDL X15,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X11 - PSRLL $19,X12 - PXOR X12,X11 - MOVOA X13,X0 - PADDL X9,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X3 - PSRLL $19,X12 - PXOR X12,X3 - MOVOA X15,X0 - PADDL X11,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA X9,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 320(SP),X12 - MOVOA 336(SP),X0 - SUBQ $2,DX - JA MAINLOOP1 - PADDL 112(SP),X12 - PADDL 176(SP),X7 - PADDL 224(SP),X10 - PADDL 272(SP),X4 - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 0(SI),DX - XORL 4(SI),CX - XORL 8(SI),R8 - XORL 12(SI),R9 - MOVL DX,0(DI) - MOVL CX,4(DI) - MOVL R8,8(DI) - MOVL R9,12(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 64(SI),DX - XORL 68(SI),CX - XORL 72(SI),R8 - XORL 76(SI),R9 - MOVL DX,64(DI) - MOVL CX,68(DI) - MOVL R8,72(DI) - MOVL R9,76(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 128(SI),DX - XORL 132(SI),CX - XORL 136(SI),R8 - XORL 140(SI),R9 - MOVL DX,128(DI) - MOVL CX,132(DI) - MOVL R8,136(DI) - MOVL R9,140(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - XORL 192(SI),DX - XORL 196(SI),CX - XORL 200(SI),R8 - XORL 204(SI),R9 - MOVL DX,192(DI) - MOVL CX,196(DI) - MOVL R8,200(DI) - MOVL R9,204(DI) - PADDL 240(SP),X14 - PADDL 64(SP),X0 - PADDL 128(SP),X5 - PADDL 192(SP),X8 - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 16(SI),DX - XORL 20(SI),CX - XORL 24(SI),R8 - XORL 28(SI),R9 - MOVL DX,16(DI) - MOVL CX,20(DI) - MOVL R8,24(DI) - MOVL R9,28(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 80(SI),DX - XORL 84(SI),CX - XORL 88(SI),R8 - XORL 92(SI),R9 - MOVL DX,80(DI) - MOVL CX,84(DI) - MOVL R8,88(DI) - MOVL R9,92(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 144(SI),DX - XORL 148(SI),CX - XORL 152(SI),R8 - XORL 156(SI),R9 - MOVL DX,144(DI) - MOVL CX,148(DI) - MOVL R8,152(DI) - MOVL R9,156(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - XORL 208(SI),DX - XORL 212(SI),CX - XORL 216(SI),R8 - XORL 220(SI),R9 - MOVL DX,208(DI) - MOVL CX,212(DI) - MOVL R8,216(DI) - MOVL R9,220(DI) - PADDL 288(SP),X15 - PADDL 304(SP),X11 - PADDL 80(SP),X1 - PADDL 144(SP),X6 - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 32(SI),DX - XORL 36(SI),CX - XORL 40(SI),R8 - XORL 44(SI),R9 - MOVL DX,32(DI) - MOVL CX,36(DI) - MOVL R8,40(DI) - MOVL R9,44(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 96(SI),DX - XORL 100(SI),CX - XORL 104(SI),R8 - XORL 108(SI),R9 - MOVL DX,96(DI) - MOVL CX,100(DI) - MOVL R8,104(DI) - MOVL R9,108(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 160(SI),DX - XORL 164(SI),CX - XORL 168(SI),R8 - XORL 172(SI),R9 - MOVL DX,160(DI) - MOVL CX,164(DI) - MOVL R8,168(DI) - MOVL R9,172(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - XORL 224(SI),DX - XORL 228(SI),CX - XORL 232(SI),R8 - XORL 236(SI),R9 - MOVL DX,224(DI) - MOVL CX,228(DI) - MOVL R8,232(DI) - MOVL R9,236(DI) - PADDL 160(SP),X13 - PADDL 208(SP),X9 - PADDL 256(SP),X3 - PADDL 96(SP),X2 - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 48(SI),DX - XORL 52(SI),CX - XORL 56(SI),R8 - XORL 60(SI),R9 - MOVL DX,48(DI) - MOVL CX,52(DI) - MOVL R8,56(DI) - MOVL R9,60(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 112(SI),DX - XORL 116(SI),CX - XORL 120(SI),R8 - XORL 124(SI),R9 - MOVL DX,112(DI) - MOVL CX,116(DI) - MOVL R8,120(DI) - MOVL R9,124(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 176(SI),DX - XORL 180(SI),CX - XORL 184(SI),R8 - XORL 188(SI),R9 - MOVL DX,176(DI) - MOVL CX,180(DI) - MOVL R8,184(DI) - MOVL R9,188(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - XORL 240(SI),DX - XORL 244(SI),CX - XORL 248(SI),R8 - XORL 252(SI),R9 - MOVL DX,240(DI) - MOVL CX,244(DI) - MOVL R8,248(DI) - MOVL R9,252(DI) - MOVQ 408(SP),R9 - SUBQ $256,R9 - ADDQ $256,SI - ADDQ $256,DI - CMPQ R9,$256 - JAE BYTESATLEAST256 - CMPQ R9,$0 - JBE DONE - BYTESBETWEEN1AND255: - CMPQ R9,$64 - JAE NOCOPY - MOVQ DI,DX - LEAQ 416(SP),DI - MOVQ R9,CX - REP; MOVSB - LEAQ 416(SP),DI - LEAQ 416(SP),SI - NOCOPY: - MOVQ R9,408(SP) - MOVOA 48(SP),X0 - MOVOA 0(SP),X1 - MOVOA 16(SP),X2 - MOVOA 32(SP),X3 - MOVOA X1,X4 - MOVQ $20,CX - MAINLOOP2: - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - SUBQ $4,CX - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PXOR X7,X7 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - JA MAINLOOP2 - PADDL 48(SP),X0 - PADDL 0(SP),X1 - PADDL 16(SP),X2 - PADDL 32(SP),X3 - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 0(SI),CX - XORL 48(SI),R8 - XORL 32(SI),R9 - XORL 16(SI),AX - MOVL CX,0(DI) - MOVL R8,48(DI) - MOVL R9,32(DI) - MOVL AX,16(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 20(SI),CX - XORL 4(SI),R8 - XORL 52(SI),R9 - XORL 36(SI),AX - MOVL CX,20(DI) - MOVL R8,4(DI) - MOVL R9,52(DI) - MOVL AX,36(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 40(SI),CX - XORL 24(SI),R8 - XORL 8(SI),R9 - XORL 56(SI),AX - MOVL CX,40(DI) - MOVL R8,24(DI) - MOVL R9,8(DI) - MOVL AX,56(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - XORL 60(SI),CX - XORL 44(SI),R8 - XORL 28(SI),R9 - XORL 12(SI),AX - MOVL CX,60(DI) - MOVL R8,44(DI) - MOVL R9,28(DI) - MOVL AX,12(DI) - MOVQ 408(SP),R9 - MOVL 16(SP),CX - MOVL 36 (SP),R8 - ADDQ $1,CX - SHLQ $32,R8 - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $32,R8 - MOVL CX,16(SP) - MOVL R8, 36 (SP) - CMPQ R9,$64 - JA BYTESATLEAST65 - JAE BYTESATLEAST64 - MOVQ DI,SI - MOVQ DX,DI - MOVQ R9,CX - REP; MOVSB - BYTESATLEAST64: - DONE: - MOVQ 352(SP),R11 - MOVQ 360(SP),R12 - MOVQ 368(SP),R13 - MOVQ 376(SP),R14 - MOVQ 384(SP),R15 - MOVQ 392(SP),BX - MOVQ 400(SP),BP - MOVQ R11,SP - RET - BYTESATLEAST65: - SUBQ $64,R9 - ADDQ $64,DI - ADDQ $64,SI - JMP BYTESBETWEEN1AND255 diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go deleted file mode 100644 index 9bfc0927ce..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts -// the result into the 64-byte array out. The input and output may be the same array. -func Core208(out *[64]byte, in *[64]byte) { - j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 - j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 - j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 - j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 - j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 - j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 - j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 - j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 - j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 - j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 - j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 - j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < 8; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go deleted file mode 100644 index 903c7858e4..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!appengine,!gccgo - -package salsa - -// This function is implemented in salsa2020_amd64.s. - -//go:noescape - -func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out may be the same slice but otherwise should not overlap. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - if len(in) == 0 { - return - } - salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go deleted file mode 100644 index 95f8ca5bb9..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine gccgo - -package salsa - -const rounds = 20 - -// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, -// and 16-byte constant c, and puts the result into 64-byte array out. -func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < rounds; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out may be the same slice but otherwise should not overlap. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - var block [64]byte - var counterCopy [16]byte - copy(counterCopy[:], counter[:]) - - for len(in) >= 64 { - core(&block, &counterCopy, key, &Sigma) - for i, x := range block { - out[i] = in[i] ^ x - } - u := uint32(1) - for i := 8; i < 16; i++ { - u += uint32(counterCopy[i]) - counterCopy[i] = byte(u) - u >>= 8 - } - in = in[64:] - out = out[64:] - } - - if len(in) > 0 { - core(&block, &counterCopy, key, &Sigma) - for i, v := range in { - out[i] = v ^ block[i] - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go deleted file mode 100644 index f8cecd9e6e..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -import "testing" - -func TestCore208(t *testing.T) { - in := [64]byte{ - 0x7e, 0x87, 0x9a, 0x21, 0x4f, 0x3e, 0xc9, 0x86, - 0x7c, 0xa9, 0x40, 0xe6, 0x41, 0x71, 0x8f, 0x26, - 0xba, 0xee, 0x55, 0x5b, 0x8c, 0x61, 0xc1, 0xb5, - 0x0d, 0xf8, 0x46, 0x11, 0x6d, 0xcd, 0x3b, 0x1d, - 0xee, 0x24, 0xf3, 0x19, 0xdf, 0x9b, 0x3d, 0x85, - 0x14, 0x12, 0x1e, 0x4b, 0x5a, 0xc5, 0xaa, 0x32, - 0x76, 0x02, 0x1d, 0x29, 0x09, 0xc7, 0x48, 0x29, - 0xed, 0xeb, 0xc6, 0x8d, 0xb8, 0xb8, 0xc2, 0x5e} - - out := [64]byte{ - 0xa4, 0x1f, 0x85, 0x9c, 0x66, 0x08, 0xcc, 0x99, - 0x3b, 0x81, 0xca, 0xcb, 0x02, 0x0c, 0xef, 0x05, - 0x04, 0x4b, 0x21, 0x81, 0xa2, 0xfd, 0x33, 0x7d, - 0xfd, 0x7b, 0x1c, 0x63, 0x96, 0x68, 0x2f, 0x29, - 0xb4, 0x39, 0x31, 0x68, 0xe3, 0xc9, 0xe6, 0xbc, - 0xfe, 0x6b, 0xc5, 0xb7, 0xa0, 0x6d, 0x96, 0xba, - 0xe4, 0x24, 0xcc, 0x10, 0x2c, 0x91, 0x74, 0x5c, - 0x24, 0xad, 0x67, 0x3d, 0xc7, 0x61, 0x8f, 0x81, - } - - Core208(&in, &in) - if in != out { - t.Errorf("expected %x, got %x", out, in) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt.go b/Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt.go deleted file mode 100644 index 30737b0a62..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scrypt implements the scrypt key derivation function as defined in -// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard -// Functions" (http://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt - -import ( - "crypto/sha256" - "errors" - - "golang.org/x/crypto/pbkdf2" -) - -const maxInt = int(^uint(0) >> 1) - -// blockCopy copies n numbers from src into dst. -func blockCopy(dst, src []uint32, n int) { - copy(dst, src[:n]) -} - -// blockXOR XORs numbers from dst with n numbers from src. -func blockXOR(dst, src []uint32, n int) { - for i, v := range src[:n] { - dst[i] ^= v - } -} - -// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, -// and puts the result into both both tmp and out. -func salsaXOR(tmp *[16]uint32, in, out []uint32) { - w0 := tmp[0] ^ in[0] - w1 := tmp[1] ^ in[1] - w2 := tmp[2] ^ in[2] - w3 := tmp[3] ^ in[3] - w4 := tmp[4] ^ in[4] - w5 := tmp[5] ^ in[5] - w6 := tmp[6] ^ in[6] - w7 := tmp[7] ^ in[7] - w8 := tmp[8] ^ in[8] - w9 := tmp[9] ^ in[9] - w10 := tmp[10] ^ in[10] - w11 := tmp[11] ^ in[11] - w12 := tmp[12] ^ in[12] - w13 := tmp[13] ^ in[13] - w14 := tmp[14] ^ in[14] - w15 := tmp[15] ^ in[15] - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 - x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 - - for i := 0; i < 8; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - x0 += w0 - x1 += w1 - x2 += w2 - x3 += w3 - x4 += w4 - x5 += w5 - x6 += w6 - x7 += w7 - x8 += w8 - x9 += w9 - x10 += w10 - x11 += w11 - x12 += w12 - x13 += w13 - x14 += w14 - x15 += w15 - - out[0], tmp[0] = x0, x0 - out[1], tmp[1] = x1, x1 - out[2], tmp[2] = x2, x2 - out[3], tmp[3] = x3, x3 - out[4], tmp[4] = x4, x4 - out[5], tmp[5] = x5, x5 - out[6], tmp[6] = x6, x6 - out[7], tmp[7] = x7, x7 - out[8], tmp[8] = x8, x8 - out[9], tmp[9] = x9, x9 - out[10], tmp[10] = x10, x10 - out[11], tmp[11] = x11, x11 - out[12], tmp[12] = x12, x12 - out[13], tmp[13] = x13, x13 - out[14], tmp[14] = x14, x14 - out[15], tmp[15] = x15, x15 -} - -func blockMix(tmp *[16]uint32, in, out []uint32, r int) { - blockCopy(tmp[:], in[(2*r-1)*16:], 16) - for i := 0; i < 2*r; i += 2 { - salsaXOR(tmp, in[i*16:], out[i*8:]) - salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) - } -} - -func integer(b []uint32, r int) uint64 { - j := (2*r - 1) * 16 - return uint64(b[j]) | uint64(b[j+1])<<32 -} - -func smix(b []byte, r, N int, v, xy []uint32) { - var tmp [16]uint32 - x := xy - y := xy[32*r:] - - j := 0 - for i := 0; i < 32*r; i++ { - x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24 - j += 4 - } - for i := 0; i < N; i += 2 { - blockCopy(v[i*(32*r):], x, 32*r) - blockMix(&tmp, x, y, r) - - blockCopy(v[(i+1)*(32*r):], y, 32*r) - blockMix(&tmp, y, x, r) - } - for i := 0; i < N; i += 2 { - j := int(integer(x, r) & uint64(N-1)) - blockXOR(x, v[j*(32*r):], 32*r) - blockMix(&tmp, x, y, r) - - j = int(integer(y, r) & uint64(N-1)) - blockXOR(y, v[j*(32*r):], 32*r) - blockMix(&tmp, y, x, r) - } - j = 0 - for _, v := range x[:32*r] { - b[j+0] = byte(v >> 0) - b[j+1] = byte(v >> 8) - b[j+2] = byte(v >> 16) - b[j+3] = byte(v >> 24) - j += 4 - } -} - -// Key derives a key from the password, salt, and cost parameters, returning -// a byte slice of length keyLen that can be used as cryptographic key. -// -// N is a CPU/memory cost parameter, which must be a power of two greater than 1. -// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the -// limits, the function returns a nil byte slice and an error. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// dk := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) -// -// The recommended parameters for interactive logins as of 2009 are N=16384, -// r=8, p=1. They should be increased as memory latency and CPU parallelism -// increases. Remember to get a good random salt. -func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { - if N <= 1 || N&(N-1) != 0 { - return nil, errors.New("scrypt: N must be > 1 and a power of 2") - } - if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { - return nil, errors.New("scrypt: parameters are too large") - } - - xy := make([]uint32, 64*r) - v := make([]uint32, 32*N*r) - b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) - - for i := 0; i < p; i++ { - smix(b[i*128*r:], r, N, v, xy) - } - - return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt_test.go b/Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt_test.go deleted file mode 100644 index e096c3a31a..0000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/scrypt/scrypt_test.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package scrypt - -import ( - "bytes" - "testing" -) - -type testVector struct { - password string - salt string - N, r, p int - output []byte -} - -var good = []testVector{ - { - "password", - "salt", - 2, 10, 10, - []byte{ - 0x48, 0x2c, 0x85, 0x8e, 0x22, 0x90, 0x55, 0xe6, 0x2f, - 0x41, 0xe0, 0xec, 0x81, 0x9a, 0x5e, 0xe1, 0x8b, 0xdb, - 0x87, 0x25, 0x1a, 0x53, 0x4f, 0x75, 0xac, 0xd9, 0x5a, - 0xc5, 0xe5, 0xa, 0xa1, 0x5f, - }, - }, - { - "password", - "salt", - 16, 100, 100, - []byte{ - 0x88, 0xbd, 0x5e, 0xdb, 0x52, 0xd1, 0xdd, 0x0, 0x18, - 0x87, 0x72, 0xad, 0x36, 0x17, 0x12, 0x90, 0x22, 0x4e, - 0x74, 0x82, 0x95, 0x25, 0xb1, 0x8d, 0x73, 0x23, 0xa5, - 0x7f, 0x91, 0x96, 0x3c, 0x37, - }, - }, - { - "this is a long \000 password", - "and this is a long \000 salt", - 16384, 8, 1, - []byte{ - 0xc3, 0xf1, 0x82, 0xee, 0x2d, 0xec, 0x84, 0x6e, 0x70, - 0xa6, 0x94, 0x2f, 0xb5, 0x29, 0x98, 0x5a, 0x3a, 0x09, - 0x76, 0x5e, 0xf0, 0x4c, 0x61, 0x29, 0x23, 0xb1, 0x7f, - 0x18, 0x55, 0x5a, 0x37, 0x07, 0x6d, 0xeb, 0x2b, 0x98, - 0x30, 0xd6, 0x9d, 0xe5, 0x49, 0x26, 0x51, 0xe4, 0x50, - 0x6a, 0xe5, 0x77, 0x6d, 0x96, 0xd4, 0x0f, 0x67, 0xaa, - 0xee, 0x37, 0xe1, 0x77, 0x7b, 0x8a, 0xd5, 0xc3, 0x11, - 0x14, 0x32, 0xbb, 0x3b, 0x6f, 0x7e, 0x12, 0x64, 0x40, - 0x18, 0x79, 0xe6, 0x41, 0xae, - }, - }, - { - "p", - "s", - 2, 1, 1, - []byte{ - 0x48, 0xb0, 0xd2, 0xa8, 0xa3, 0x27, 0x26, 0x11, 0x98, - 0x4c, 0x50, 0xeb, 0xd6, 0x30, 0xaf, 0x52, - }, - }, - - { - "", - "", - 16, 1, 1, - []byte{ - 0x77, 0xd6, 0x57, 0x62, 0x38, 0x65, 0x7b, 0x20, 0x3b, - 0x19, 0xca, 0x42, 0xc1, 0x8a, 0x04, 0x97, 0xf1, 0x6b, - 0x48, 0x44, 0xe3, 0x07, 0x4a, 0xe8, 0xdf, 0xdf, 0xfa, - 0x3f, 0xed, 0xe2, 0x14, 0x42, 0xfc, 0xd0, 0x06, 0x9d, - 0xed, 0x09, 0x48, 0xf8, 0x32, 0x6a, 0x75, 0x3a, 0x0f, - 0xc8, 0x1f, 0x17, 0xe8, 0xd3, 0xe0, 0xfb, 0x2e, 0x0d, - 0x36, 0x28, 0xcf, 0x35, 0xe2, 0x0c, 0x38, 0xd1, 0x89, - 0x06, - }, - }, - { - "password", - "NaCl", - 1024, 8, 16, - []byte{ - 0xfd, 0xba, 0xbe, 0x1c, 0x9d, 0x34, 0x72, 0x00, 0x78, - 0x56, 0xe7, 0x19, 0x0d, 0x01, 0xe9, 0xfe, 0x7c, 0x6a, - 0xd7, 0xcb, 0xc8, 0x23, 0x78, 0x30, 0xe7, 0x73, 0x76, - 0x63, 0x4b, 0x37, 0x31, 0x62, 0x2e, 0xaf, 0x30, 0xd9, - 0x2e, 0x22, 0xa3, 0x88, 0x6f, 0xf1, 0x09, 0x27, 0x9d, - 0x98, 0x30, 0xda, 0xc7, 0x27, 0xaf, 0xb9, 0x4a, 0x83, - 0xee, 0x6d, 0x83, 0x60, 0xcb, 0xdf, 0xa2, 0xcc, 0x06, - 0x40, - }, - }, - { - "pleaseletmein", "SodiumChloride", - 16384, 8, 1, - []byte{ - 0x70, 0x23, 0xbd, 0xcb, 0x3a, 0xfd, 0x73, 0x48, 0x46, - 0x1c, 0x06, 0xcd, 0x81, 0xfd, 0x38, 0xeb, 0xfd, 0xa8, - 0xfb, 0xba, 0x90, 0x4f, 0x8e, 0x3e, 0xa9, 0xb5, 0x43, - 0xf6, 0x54, 0x5d, 0xa1, 0xf2, 0xd5, 0x43, 0x29, 0x55, - 0x61, 0x3f, 0x0f, 0xcf, 0x62, 0xd4, 0x97, 0x05, 0x24, - 0x2a, 0x9a, 0xf9, 0xe6, 0x1e, 0x85, 0xdc, 0x0d, 0x65, - 0x1e, 0x40, 0xdf, 0xcf, 0x01, 0x7b, 0x45, 0x57, 0x58, - 0x87, - }, - }, - /* - // Disabled: needs 1 GiB RAM and takes too long for a simple test. - { - "pleaseletmein", "SodiumChloride", - 1048576, 8, 1, - []byte{ - 0x21, 0x01, 0xcb, 0x9b, 0x6a, 0x51, 0x1a, 0xae, 0xad, - 0xdb, 0xbe, 0x09, 0xcf, 0x70, 0xf8, 0x81, 0xec, 0x56, - 0x8d, 0x57, 0x4a, 0x2f, 0xfd, 0x4d, 0xab, 0xe5, 0xee, - 0x98, 0x20, 0xad, 0xaa, 0x47, 0x8e, 0x56, 0xfd, 0x8f, - 0x4b, 0xa5, 0xd0, 0x9f, 0xfa, 0x1c, 0x6d, 0x92, 0x7c, - 0x40, 0xf4, 0xc3, 0x37, 0x30, 0x40, 0x49, 0xe8, 0xa9, - 0x52, 0xfb, 0xcb, 0xf4, 0x5c, 0x6f, 0xa7, 0x7a, 0x41, - 0xa4, - }, - }, - */ -} - -var bad = []testVector{ - {"p", "s", 0, 1, 1, nil}, // N == 0 - {"p", "s", 1, 1, 1, nil}, // N == 1 - {"p", "s", 7, 8, 1, nil}, // N is not power of 2 - {"p", "s", 16, maxInt / 2, maxInt / 2, nil}, // p * r too large -} - -func TestKey(t *testing.T) { - for i, v := range good { - k, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, len(v.output)) - if err != nil { - t.Errorf("%d: got unexpected error: %s", i, err) - } - if !bytes.Equal(k, v.output) { - t.Errorf("%d: expected %x, got %x", i, v.output, k) - } - } - for i, v := range bad { - _, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, 32) - if err == nil { - t.Errorf("%d: expected error, got nil", i) - } - } -} - -func BenchmarkKey(b *testing.B) { - for i := 0; i < b.N; i++ { - Key([]byte("password"), []byte("salt"), 16384, 8, 1, 64) - } -} diff --git a/README.md b/README.md index 63ca498d55..2b877ad812 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,29 @@ -# vetinari -Trust Server +# Vetinari + +## Overview + +Vetinari manages trust metadata as a complementary service to the registry. +It implements all endpoints under the `_trust` segment of the registry URLs. +Vetinari expects to manage TUF metadata and will do validation of one parent +level of content for any data uploaded to ensure repositories do not become +corrupted. This means either the keys in the root.json file will be used to +validate the uploaded role, or the keys in the immediate delegate parent will +be used. + +Uploading a new root.json will be validated using the same token mechanism +present in the registry. A user having write permissions on a repository +will be sufficient to permit the uploading of a new root.json. + +## Timestamping + +TUF requires a timestamp file be regularly generated. To achieve any ease +of use, it is necessary that Vetinari is responsible for generating the +timestamp.json based on the snapshot.json created and uploaded by the +repository owner. + +It is bad policy to place any signing keys in frontline servers. While +Vetinari is capable of supporting this behaviour we recommend using a +separate service and server with highly restricted permissions. Rufus +is provided as a reference implementation of a remote signer. An +implementation that satisfies the gRPC interface defined in Rufus will +satisfy Vetinari's requirements. diff --git a/auth/auth.go b/auth/auth.go deleted file mode 100644 index 486b7ab507..0000000000 --- a/auth/auth.go +++ /dev/null @@ -1,115 +0,0 @@ -package auth - -import ( - "errors" - "net/http" -) - -// User represents an entity -type User struct { - Name string -} - -// Resource represents something that can be accessed and manipulated -// by a User -type Resource struct { - Type string `json:"type"` - Name string `json:"name"` -} - -// Scope is an identifier scope -type Scope interface { - ID() string - Compare(Scope) bool -} - -// Authorizer is an interfaces to authorize a scope -type Authorizer interface { - // Authorize checks whether the credentials provided in the request - // are sufficient to satisfy the required scopes. - Authorize(*http.Request, ...Scope) (*User, error) -} - -// Authorization is an interface to determine whether -// an object has a scope -type Authorization interface { - HasScope(Scope) bool -} - -// ### START INSECURE AUTHORIZATION TOOLS ### -// THESE ARE FOR DEV PURPOSES ONLY, DO NOT USE IN -// PRODUCTION - -// InsecureAuthorizer is an insecure implementation of IAuthorizer. -// WARNING: DON'T USE THIS FOR ANYTHING, IT'S VERY INSECURE -type InsecureAuthorizer struct{} - -// Authorize authorizes any scope -// WARNING: LIKE I SAID, VERY INSECURE -func (auth *InsecureAuthorizer) Authorize(r *http.Request, scopes ...Scope) (*User, error) { - return &User{}, nil -} - -// InsecureAuthorization is an implementation of IAuthorization -// which will consider any scope authorized. -// WARNING: ALSO DON'T USE THIS, IT'S ALSO VERY INSECURE -type InsecureAuthorization struct { -} - -// HasScope always returns true for any scope -// WARNING: THIS IS JUST INCREDIBLY INSECURE -func (authzn *InsecureAuthorization) HasScope(scope Scope) bool { - return true -} - -// ### END INSECURE AUTHORIZATION TOOLS ### - -// NoAuthorizer is a factory for NoAuthorization object -type NoAuthorizer struct{} - -// Authorize implements the IAuthorizer interface -func (auth *NoAuthorizer) Authorize(r *http.Request, scopes ...Scope) (*User, error) { - return nil, errors.New("User not authorized") -} - -// NoAuthorization is an implementation of IAuthorization -// which never allows a scope to be valid. -type NoAuthorization struct{} - -// HasScope returns false for any scope -func (authzn *NoAuthorization) HasScope(scope Scope) bool { - return false -} - -// SimpleScope is a simple scope represented by a string. -type SimpleScope string - -// ID returns the string representing the scope. -func (ss SimpleScope) ID() string { - return string(ss) -} - -// Compare compares to the given scope for equality. -// TODO: possibly rename to Equals -func (ss SimpleScope) Compare(toCompare Scope) bool { - return ss.ID() == toCompare.ID() -} - -var _ Scope = SimpleScope("Test") - -const ( - // SSNoAuth is the simple scope "NoAuth" - SSNoAuth SimpleScope = SimpleScope("NoAuth") - - // SSCreate is the simple scope "Create" - SSCreate = SimpleScope("Create") - - // SSRead is the simple scope "Read" - SSRead = SimpleScope("Read") - - // SSUpdate is the simple scope "Update" - SSUpdate = SimpleScope("Update") - - // SSDelete is the simple scope "Delete" - SSDelete = SimpleScope("Delete") -) diff --git a/auth/auth_test.go b/auth/auth_test.go deleted file mode 100644 index 184097b05e..0000000000 --- a/auth/auth_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package auth - -import ( - "net/http" - "testing" -) - -func TestInsecureAuthorization(t *testing.T) { - auther := InsecureAuthorizer{} - _, err := auther.Authorize(&http.Request{}, SSNoAuth) - if err != nil { - t.Fatalf("Failed to authorize with InsecureAuthorizer") - } - -} - -func TestNoAuthorization(t *testing.T) { - auth := NoAuthorization{} - if auth.HasScope(SSCreate) { - t.Fatalf("NoAuthorization should not have any scopes") - } -} - -func TestSimpleScope(t *testing.T) { - scope1 := SimpleScope("Test") - scope2 := SimpleScope("Test") - if !scope1.Compare(scope2) { - t.Fatalf("Expected scope1 and scope2 to match") - } - - scope3 := SimpleScope("Test") - scope4 := SimpleScope("Don't Match") - if scope3.Compare(scope4) { - t.Fatalf("Expected scope3 and scope4 not to match") - } -} diff --git a/auth/token/authorizer.go b/auth/token/authorizer.go deleted file mode 100644 index 52c3a5e646..0000000000 --- a/auth/token/authorizer.go +++ /dev/null @@ -1,225 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/docker/libtrust" - - "github.com/docker/vetinari/auth" - "github.com/docker/vetinari/utils" -) - -// ConfigSection is the name used to identify the tokenAuthorizer config -// in the config json -const ConfigSection string = "token_auth" - -type tokenConfig struct { - Realm string `json:"realm"` - Issuer string `json:"issuer"` - Service string `json:"service"` - RootCertBundle string `json:"root_cert_bundle"` -} - -// authChallenge implements the auth.Challenge interface. -type authChallenge struct { - err error - realm string - service string - scopes []auth.Scope -} - -// Error returns the internal error string for this authChallenge. -func (ac authChallenge) Error() string { - return ac.err.Error() -} - -// Status returns the HTTP Response Status Code for this authChallenge. -func (ac *authChallenge) Status() int { - return http.StatusUnauthorized -} - -// challengeParams constructs the value to be used in -// the WWW-Authenticate response challenge header. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (ac *authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) - - scope := make([]string, 0, len(ac.scopes)) - for _, s := range ac.scopes { - scope = append(scope, s.ID()) - } - if len(scope) > 0 { - scopeStr := strings.Join(scope, " ") - str = fmt.Sprintf("%s,scope=%q", str, scopeStr) - } - - if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { - str = fmt.Sprintf("%s,error=%q", str, "invalid_token") - } else if ac.err == ErrInsufficientScope { - str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") - } - - return str -} - -// SetHeader sets the WWW-Authenticate value for the given header. -func (ac *authChallenge) SetHeader(header http.Header) { - header.Add("WWW-Authenticate", ac.challengeParams()) -} - -// ServeHttp handles writing the challenge response -// by setting the challenge header and status code. -func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ac.SetHeader(w.Header()) - w.WriteHeader(ac.Status()) -} - -// accessController implements the auth.AccessController interface. -type tokenAuthorizer struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey -} - -// NewTokenAuthorizer creates an Authorizer that operates with JWTs. -func NewTokenAuthorizer(conf []byte) (auth.Authorizer, error) { - tokenConf := new(tokenConfig) - err := json.Unmarshal(conf, tokenConf) - if err != nil { - return nil, fmt.Errorf("unable to parse TokenAuthorizer configuration: %s", err) - } - - fp, err := os.Open(tokenConf.RootCertBundle) - if err != nil { - return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", tokenConf.RootCertBundle, err) - } - defer fp.Close() - - rawCertBundle, err := ioutil.ReadAll(fp) - if err != nil { - return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", tokenConf.RootCertBundle, err) - } - - var rootCerts []*x509.Certificate - pemBlock, rawCertBundle := pem.Decode(rawCertBundle) - for pemBlock != nil { - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) - } - - rootCerts = append(rootCerts, cert) - - pemBlock, rawCertBundle = pem.Decode(rawCertBundle) - } - - if len(rootCerts) == 0 { - return nil, errors.New("token auth requires at least one token signing root certificate") - } - - rootPool := x509.NewCertPool() - trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) - if err != nil { - return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) - } - trustedKeys[pubKey.KeyID()] = pubKey - } - - return &tokenAuthorizer{ - realm: tokenConf.Realm, - issuer: tokenConf.Issuer, - service: tokenConf.Service, - rootCerts: rootPool, - trustedKeys: trustedKeys, - }, nil -} - -// Authorize handles checking whether the given request is authorized -// for actions on resources described by the given Scopes. -func (ac *tokenAuthorizer) Authorize(r *http.Request, scopes ...auth.Scope) (*auth.User, error) { - challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - scopes: scopes, - } - - token, err := parseToken(r) - if err != nil { - challenge.err = err - return nil, challenge - } - - resource := auth.Resource{Type: "repo", Name: utils.ResourceName(r)} - - return ac.authorize(token, resource, scopes...) -} - -// authorize separates out the code that needs to know how to handle a request from the rest of the -// authorization code making it easier to test this part by injecting test values -func (ac *tokenAuthorizer) authorize(token *Token, resource auth.Resource, scopes ...auth.Scope) (*auth.User, error) { - challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - scopes: scopes, - } - - verifyOpts := VerifyOptions{ - TrustedIssuers: []string{ac.issuer}, - AcceptedAudiences: []string{ac.service}, - Roots: ac.rootCerts, - TrustedKeys: ac.trustedKeys, - } - - if err := token.Verify(verifyOpts); err != nil { - challenge.err = err - return nil, challenge - } - - tokenScopes := token.scopes(resource) - for _, scope := range scopes { - match := false - for _, tokenScope := range tokenScopes { - if scope.Compare(tokenScope) { - match = true - break - } - } - if !match { - challenge.err = ErrInsufficientScope - return nil, challenge - } - } - - return &auth.User{Name: token.Claims.Subject}, nil - -} - -func parseToken(r *http.Request) (*Token, error) { - parts := strings.Split(r.Header.Get("Authorization"), " ") - - if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { - return nil, ErrTokenRequired - } - - rawToken := parts[1] - - token, err := NewToken(rawToken) - if err != nil { - return nil, err - } - return token, nil -} diff --git a/auth/token/errors.go b/auth/token/errors.go deleted file mode 100644 index 93b0dfa271..0000000000 --- a/auth/token/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package token - -import ( - "errors" -) - -// Errors used and exported by this package. -var ( - ErrInsufficientScope = errors.New("insufficient scope") - ErrTokenRequired = errors.New("authorization token required") - ErrMalformedToken = errors.New("malformed token") - ErrInvalidToken = errors.New("invalid token") -) diff --git a/cmd/vetinari-server/config.json b/cmd/vetinari-server/config.json index 44f7f5da42..2c6264624f 100644 --- a/cmd/vetinari-server/config.json +++ b/cmd/vetinari-server/config.json @@ -8,5 +8,8 @@ "type": "local", "hostname": "", "port": "" + }, + "logging": { + "level": 5 } } diff --git a/cmd/vetinari-server/main.go b/cmd/vetinari-server/main.go index a029254715..659a1b1213 100644 --- a/cmd/vetinari-server/main.go +++ b/cmd/vetinari-server/main.go @@ -11,10 +11,10 @@ import ( "syscall" "github.com/Sirupsen/logrus" - "github.com/endophage/go-tuf/signed" + _ "github.com/docker/distribution/registry/auth/token" + "github.com/endophage/gotuf/signed" "golang.org/x/net/context" - _ "github.com/docker/vetinari/auth/token" "github.com/docker/vetinari/config" "github.com/docker/vetinari/server" "github.com/docker/vetinari/signer" @@ -27,8 +27,12 @@ var debug bool var configFile string func init() { + // Set default logging level to Error + logrus.SetLevel(logrus.ErrorLevel) + + // Setup flags flag.StringVar(&configFile, "config", "", "Path to configuration file") - flag.BoolVar(&debug, "debug", false, "show the version and exit") + flag.BoolVar(&debug, "debug", false, "Enable the debugging server on localhost:8080") } func main() { @@ -46,6 +50,9 @@ func main() { logrus.Fatal("Error parsing config: ", err.Error()) return // not strictly needed but let's be explicit } + if conf.Logging { + logrus.SetLevel(conf.Logging.Level) + } sigHup := make(chan os.Signal) sigTerm := make(chan os.Signal) @@ -55,10 +62,10 @@ func main() { var trust signed.TrustService if conf.TrustService.Type == "remote" { - logrus.Info("[Vetinari Server] : Using remote signing service") + logrus.Info("[Vetinari] : Using remote signing service") trust = signer.NewRufusSigner(conf.TrustService.Hostname, conf.TrustService.Port, conf.TrustService.TLSCAFile) } else { - logrus.Info("[Vetinari Server] : Using local signing service") + logrus.Info("[Vetinari] : Using local signing service") trust = signed.NewEd25519() } diff --git a/config/config.go b/config/config.go index 17a644d897..6c4e212b84 100644 --- a/config/config.go +++ b/config/config.go @@ -12,6 +12,7 @@ import ( type Configuration struct { Server ServerConf `json:"server,omitempty"` TrustService TrustServiceConf `json:"trust_service,omitempty"` + Logging LoggingConf `json:"logging,omitempty"` } // ServerConf specifically addresses configuration related to @@ -32,6 +33,10 @@ type TrustServiceConf struct { TLSCAFile string `json:"tls_ca_file,omitempty"` } +type LoggingConf struct { + Level uint8 `json:"level,omitempty"` +} + // Load takes a filename (relative path from pwd) and attempts // to parse the file as a JSON obejct into the Configuration // struct diff --git a/server/handlers/default.go b/server/handlers/default.go index a587b30e0f..8653df6c19 100644 --- a/server/handlers/default.go +++ b/server/handlers/default.go @@ -2,24 +2,15 @@ package handlers import ( "encoding/json" - "log" "net/http" + "golang.org/x/net/context" + "github.com/docker/vetinari/errors" - "github.com/docker/vetinari/utils" - repo "github.com/endophage/go-tuf" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/store" - "github.com/endophage/go-tuf/util" - "github.com/gorilla/mux" ) -// TODO: This is just for PoC. The real DB should be injected as part of -// the context for a final version. -var db = util.GetSqliteDB() - // MainHandler is the default handler for the server -func MainHandler(ctx utils.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { +func MainHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { if r.Method == "GET" { err := json.NewEncoder(w).Encode("{}") if err != nil { @@ -37,175 +28,11 @@ func MainHandler(ctx utils.Context, w http.ResponseWriter, r *http.Request) *err } // AddHandler accepts urls in the form // -func AddHandler(ctx utils.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { - vars := mux.Vars(r) - log.Printf("AddHandler request for images name: %s and tag: %s", vars["imageName"], vars["tag"]) - - local := store.DBStore(db, vars["imageName"]) - // parse body for correctness - meta := data.FileMeta{} - decoder := json.NewDecoder(r.Body) - err := decoder.Decode(&meta) - defer r.Body.Close() - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - // add to targets - local.AddBlob(vars["tag"], meta) - tufRepo, err := repo.NewRepo(ctx.Trust(), local, "sha256", "sha512") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - // err = tufRepo.Init(false) - //if err != nil { - // return &errors.HTTPError{ - // HTTPStatus: http.StatusInternalServerError, - // Code: 9996, - // Err: err, - // } - //} - err = tufRepo.AddTarget(vars["tag"], json.RawMessage{}) - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9997, - Err: err, - } - } - err = tufRepo.Sign("targets.json") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9998, - Err: err, - } - } - tufRepo.Snapshot(repo.CompressionTypeNone) - err = tufRepo.Sign("snapshot.json") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - tufRepo.Timestamp() - err = tufRepo.Sign("timestamp.json") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - return nil -} - -// RemoveHandler accepts urls in the form // -func RemoveHandler(ctx utils.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { - // remove tag from tagets list - vars := mux.Vars(r) - log.Printf("RemoveHandler request for images name: %s and tag: %s", vars["imageName"], vars["tag"]) - - local := store.DBStore(db, vars["imageName"]) - local.RemoveBlob(vars["tag"]) - tufRepo, err := repo.NewRepo(ctx.Trust(), local, "sha256", "sha512") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - _ = tufRepo.Init(false) - tufRepo.RemoveTarget(vars["tag"]) - err = tufRepo.Sign("targets.json") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - tufRepo.Snapshot(repo.CompressionTypeNone) - err = tufRepo.Sign("snapshot.json") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - tufRepo.Timestamp() - err = tufRepo.Sign("timestamp.json") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } +func UpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { return nil } // GetHandler accepts urls in the form //.json -func GetHandler(ctx utils.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { - // generate requested file and serve - vars := mux.Vars(r) - log.Printf("GetHandler request for image name: %s and tuf-file: %s", vars["imageName"], vars["tufFile"]) - - local := store.DBStore(db, vars["imageName"]) - - meta, err := local.GetMeta() - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - w.Write(meta[vars["tufFile"]]) - return nil -} - -// GenKeysHandler is the handler for generate keys endpoint -func GenKeysHandler(ctx utils.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { - // remove tag from tagets list - vars := mux.Vars(r) - log.Printf("GenKeysHandler request for: %s", vars["imageName"]) - - local := store.DBStore(db, vars["imageName"]) - tufRepo, err := repo.NewRepo(ctx.Trust(), local, "sha256", "sha512") - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - // init repo - err = tufRepo.Init(false) - if err != nil { - return &errors.HTTPError{ - HTTPStatus: http.StatusInternalServerError, - Code: 9999, - Err: err, - } - } - // gen keys - - // generate empty targets file - - // snapshot - - // timestamp +func GetHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError { return nil } diff --git a/server/handlers/default_test.go b/server/handlers/default_test.go index b1d030d845..a753dcbedf 100644 --- a/server/handlers/default_test.go +++ b/server/handlers/default_test.go @@ -5,7 +5,7 @@ import ( "net/http/httptest" "testing" - "github.com/endophage/go-tuf/signed" + "github.com/endophage/gotuf/signed" "github.com/docker/vetinari/utils" ) diff --git a/server/server.go b/server/server.go index db02d7ba2c..3ac7d8e68a 100644 --- a/server/server.go +++ b/server/server.go @@ -3,11 +3,14 @@ package server import ( "crypto/rand" "crypto/tls" - "log" "net" "net/http" + "time" - "github.com/endophage/go-tuf/signed" + "code.google.com/p/go-uuid/uuid" + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/auth" + "github.com/endophage/gotuf/signed" "github.com/gorilla/mux" "golang.org/x/net/context" @@ -16,21 +19,54 @@ import ( "github.com/docker/vetinari/utils" ) +type HTTPServer struct { + http.Server + conns map[net.Conn]struct{} + id string +} + +func NewHTTPServer(s http.Server) *HTTPServer { + return &HTTPServer{ + Server: s, + conns: make(map[net.Conn]struct{}), + id: uuid.New(), + } +} + +// Track connections for cleanup on shutdown. +func (svr *HTTPServer) ConnState(conn net.Conn, state http.ConnState) { + switch state { + case http.StateNew: + svr.conns[conn] = struct{}{} + case http.StateClosed, http.StateHijacked: + delete(svr.conns, conn) + } +} + +// This should only be called after closing the server's listeners. +func (svr *HTTPServer) TimeoutConnections() { + time.Sleep(time.Second * 30) + for conn, _ := range svr.conns { + conn.Close() + } + logrus.Infof("[Vetinari] All connections closed for server %s", svr.id) +} + // Run sets up and starts a TLS server that can be cancelled using the // given configuration. The context it is passed is the context it should // use directly for the TLS server, and generate children off for requests -func Run(ctx context.Context, conf config.ServerConf, trust signed.TrustService) error { +func Run(ctx context.Context, conf config.ServerConf, trust signed.CryptoService) error { // TODO: check validity of config return run(ctx, conf.Addr, conf.TLSCertFile, conf.TLSKeyFile, trust) } -func run(ctx context.Context, addr, tlsCertFile, tlsKeyFile string, trust signed.TrustService) error { +func run(ctx context.Context, addr, tlsCertFile, tlsKeyFile string, trust signed.CryptoService) error { keypair, err := tls.LoadX509KeyPair(tlsCertFile, tlsKeyFile) if err != nil { - log.Printf("error loading keys %s", err) + logrus.Errorf("[Vetinari] Error loading keys %s", err) return err } @@ -61,34 +97,39 @@ func run(ctx context.Context, addr, tlsCertFile, tlsKeyFile string, trust signed } tlsLsnr := tls.NewListener(lsnr, tlsConfig) - // This is a basic way to shutdown the running listeners. - // A more complete implementation would ensure each individual connection - // gets cleaned up. - go func() { - doneChan := ctx.Done() - <-doneChan - // TODO: log that we received close signal - lsnr.Close() - tlsLsnr.Close() - }() - - hand := utils.RootHandlerFactory(&utils.InsecureAuthorizer{}, utils.NewContext, trust) + ac, err := auth.GetAccessController("token", map[string]interface{}{}) + if err != nil { + return err + } + hand := utils.RootHandlerFactory(ac, context.Background(), trust) r := mux.NewRouter() // TODO (endophage): use correct regexes for image and tag names - r.Methods("PUT").Path("/{imageName:.*}/init").Handler(hand(handlers.GenKeysHandler, utils.SSCreate)) - r.Methods("GET").Path("/{imageName:.*}/{tufFile:(root.json|targets.json|timestamp.json|snapshot.json)}").Handler(hand(handlers.GetHandler, utils.SSNoAuth)) - r.Methods("DELETE").Path("/{imageName:.*}/{tag:[a-zA-Z0-9]+}").Handler(hand(handlers.RemoveHandler, utils.SSDelete)) - r.Methods("POST").Path("/{imageName:.*}/{tag:[a-zA-Z0-9]+}").Handler(hand(handlers.AddHandler, utils.SSUpdate)) + r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/{tufFile:(root.json|targets.json|timestamp.json|snapshot.json)}").Handler(hand(handlers.GetHandler, "pull")) + r.Methods("POST").Path("/v2/{imageName:.*}/_trust/tuf/{tufFile:(root.json|targets.json|timestamp.json|snapshot.json)}").Handler(hand(handlers.UpdateHandler, "push", "pull")) - server := http.Server{ - Addr: addr, - Handler: r, - } + svr := NewHTTPServer( + http.Server{ + Addr: addr, + Handler: r, + }, + ) - log.Println("[Vetinari Server] : Listening on", addr) + logrus.Info("[Vetinari] : Listening on", addr) - err = server.Serve(tlsLsnr) + go stopWatcher(ctx, svr, lsnr, tlsLsnr) + + err = svr.Serve(tlsLsnr) return err } + +func stopWatcher(ctx context.Context, svr *HTTPServer, ls ...net.Listener) { + doneChan := ctx.Done() + <-doneChan + logrus.Debug("[Vetinari] Received close signal") + for _, l := range ls { + l.Close() + } + svr.TimeoutConnections() +} diff --git a/server/server_test.go b/server/server_test.go index 12db026600..df23dafe01 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/endophage/go-tuf/signed" + "github.com/endophage/gotuf/signed" "golang.org/x/net/context" "github.com/docker/vetinari/config" diff --git a/signer/rufus_trust.go b/signer/rufus_trust.go index b64095f3e5..f2d226e049 100644 --- a/signer/rufus_trust.go +++ b/signer/rufus_trust.go @@ -6,8 +6,8 @@ import ( "github.com/Sirupsen/logrus" pb "github.com/docker/rufus/proto" - "github.com/endophage/go-tuf/data" - "github.com/endophage/go-tuf/keys" + "github.com/endophage/gotuf/data" + "github.com/endophage/gotuf/keys" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" diff --git a/utils/auth.go b/utils/auth.go deleted file mode 100644 index 82cd8c0887..0000000000 --- a/utils/auth.go +++ /dev/null @@ -1,103 +0,0 @@ -package utils - -import ( - "errors" -) - -// Scope is an identifier scope -type Scope interface { - ID() string - Compare(Scope) bool -} - -// Authorizer is an interfaces to authorize a scope -type Authorizer interface { - // Authorize is expected to set the Authorization on the Context. If - // Authorization fails, an error should be returned, but additionally, - // the Authorization on the Context should be set to an instance of - // NoAuthorization - Authorize(Context, ...Scope) error -} - -// Authorization is an interface to determine whether -// an object has a scope -type Authorization interface { - HasScope(Scope) bool -} - -// ### START INSECURE AUTHORIZATION TOOLS ### -// THESE ARE FOR DEV PURPOSES ONLY, DO NOT USE IN -// PRODUCTION - -// InsecureAuthorizer is an insecure implementation of IAuthorizer. -// WARNING: DON'T USE THIS FOR ANYTHING, IT'S VERY INSECURE -type InsecureAuthorizer struct{} - -// Authorize authorizes any scope -// WARNING: LIKE I SAID, VERY INSECURE -func (auth *InsecureAuthorizer) Authorize(ctx Context, scopes ...Scope) error { - ctx.SetAuthorization(&InsecureAuthorization{}) - return nil -} - -// InsecureAuthorization is an implementation of IAuthorization -// which will consider any scope authorized. -// WARNING: ALSO DON'T USE THIS, IT'S ALSO VERY INSECURE -type InsecureAuthorization struct { -} - -// HasScope always returns true for any scope -// WARNING: THIS IS JUST INCREDIBLY INSECURE -func (authzn *InsecureAuthorization) HasScope(scope Scope) bool { - return true -} - -// ### END INSECURE AUTHORIZATION TOOLS ### - -// NoAuthorizer is a factory for NoAuthorization object -type NoAuthorizer struct{} - -// Authorize implements the IAuthorizer interface -func (auth *NoAuthorizer) Authorize(ctx Context, scopes ...Scope) error { - ctx.SetAuthorization(&NoAuthorization{}) - return errors.New("User not authorized") -} - -// NoAuthorization is an implementation of IAuthorization -// which never allows a scope to be valid. -type NoAuthorization struct{} - -// HasScope returns false for any scope -func (authzn *NoAuthorization) HasScope(scope Scope) bool { - return false -} - -// SimpleScope is a simple scope represented by a string. -type SimpleScope string - -// ID returns the string representing the scope. -func (ss SimpleScope) ID() string { - return string(ss) -} - -// Compare compares to the given scope for equality. -func (ss SimpleScope) Compare(toCompare Scope) bool { - return ss.ID() == toCompare.ID() -} - -const ( - // SSNoAuth is the simple scope "NoAuth" - SSNoAuth SimpleScope = SimpleScope("NoAuth") - - // SSCreate is the simple scope "Create" - SSCreate = SimpleScope("Create") - - // SSRead is the simple scope "Read" - SSRead = SimpleScope("Read") - - // SSUpdate is the simple scope "Update" - SSUpdate = SimpleScope("Update") - - // SSDelete is the simple scope "Delete" - SSDelete = SimpleScope("Delete") -) diff --git a/utils/auth_test.go b/utils/auth_test.go deleted file mode 100644 index 07c237af26..0000000000 --- a/utils/auth_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestInsecureAuthorization(t *testing.T) { - auther := InsecureAuthorizer{} - ctx := context{} - err := auther.Authorize(&ctx, SSNoAuth) - if err != nil { - t.Fatalf("Failed to authorize with InsecureAuthorizer") - } - if !ctx.Authorization().HasScope(SSCreate) { - t.Fatalf("InsecureAuthorization failed to approve a scope") - } - -} - -func TestNoAuthorization(t *testing.T) { - auth := NoAuthorization{} - if auth.HasScope(SSCreate) { - t.Fatalf("NoAuthorization should not have any scopes") - } -} - -func TestSimpleScope(t *testing.T) { - scope1 := SimpleScope("Test") - scope2 := SimpleScope("Test") - if !scope1.Compare(scope2) { - t.Fatalf("Expected scope1 and scope2 to match") - } - - scope3 := SimpleScope("Test") - scope4 := SimpleScope("Don't Match") - if scope3.Compare(scope4) { - t.Fatalf("Expected scope3 and scope4 not to match") - } -} diff --git a/utils/context.go b/utils/context.go deleted file mode 100644 index 1edb239686..0000000000 --- a/utils/context.go +++ /dev/null @@ -1,71 +0,0 @@ -package utils - -import ( - "github.com/endophage/go-tuf/signed" - "net/http" -) - -// Context defines an interface for managing authorizations. -type Context interface { - // TODO: define a set of standard getters. Using getters - // will allow us to easily and transparently cache - // fields or load them on demand. Using this interface - // will allow people to define their own context struct - // that may handle things like caching and lazy loading - // differently. - - // Resource return the QDN of the resource being accessed - Resource() string - - // Authorized returns a boolean indicating whether the user - // has been successfully authorized for this request. - Authorization() Authorization - - // SetAuthStatus should be called to change the authorization - // status of the context (and therefore the request) - SetAuthorization(Authorization) - - // Trust returns the trust service to be used - Trust() signed.TrustService -} - -// ContextFactory creates a IContext from an http request. -type ContextFactory func(*http.Request, signed.TrustService) Context - -// Context represents an authorization context for a resource. -type context struct { - resource string - authorization Authorization - trust signed.TrustService -} - -// NewContext creates a new authorization context with the -// given HTTP request path as the resource. -func NewContext(r *http.Request, trust signed.TrustService) Context { - return &context{ - resource: r.URL.Path, - trust: trust, - } -} - -// Resource returns the resource value for the context. -func (ctx *context) Resource() string { - return ctx.resource -} - -// Authorization returns an IAuthorization implementation for -// the context. -func (ctx *context) Authorization() Authorization { - return ctx.authorization -} - -// SetAuthorization allows setting an IAuthorization for -// the context. -func (ctx *context) SetAuthorization(authzn Authorization) { - ctx.authorization = authzn -} - -// Trust returns the instantiated TrustService for the context -func (ctx *context) Trust() signed.TrustService { - return ctx.trust -} diff --git a/utils/context_test.go b/utils/context_test.go deleted file mode 100644 index da4ed637b5..0000000000 --- a/utils/context_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package utils - -import ( - "net/http" - "testing" - - "github.com/endophage/go-tuf/signed" -) - -func TestNewContex(t *testing.T) { - r, err := http.NewRequest("GET", "http://localhost/test/url", nil) - if err != nil { - t.Fatalf("Error creating request: %s", err.Error()) - } - ctx := NewContext(r, &signed.Ed25519{}) - - if ctx.Resource() != "/test/url" { - t.Fatalf("Context has incorrect resource") - } -} - -func TestContextTrust(t *testing.T) { - ctx := context{} - - if ctx.Trust() != nil { - t.Fatalf("Update this test now that Trust has been implemented") - } -} diff --git a/utils/http.go b/utils/http.go index ad286dc556..69e2f59ff9 100644 --- a/utils/http.go +++ b/utils/http.go @@ -3,58 +3,75 @@ package utils import ( "net/http" - "github.com/endophage/go-tuf/signed" + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/auth" + "github.com/endophage/gotuf/signed" "github.com/gorilla/mux" + "golang.org/x/net/context" "github.com/docker/vetinari/errors" ) // contextHandler defines an alterate HTTP handler interface which takes in // a context for authorization and returns an HTTP application error. -type contextHandler func(ctx Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError +type contextHandler func(ctx context.Context, w http.ResponseWriter, r *http.Request) *errors.HTTPError // rootHandler is an implementation of an HTTP request handler which handles // authorization and calling out to the defined alternate http handler. type rootHandler struct { handler contextHandler - auth Authorizer - scopes []Scope - context ContextFactory - trust signed.TrustService + auth auth.AccessController + actions []string + context context.Context + trust signed.CryptoService } // RootHandlerFactory creates a new rootHandler factory using the given // Context creator and authorizer. The returned factory allows creating // new rootHandlers from the alternate http handler contextHandler and // a scope. -func RootHandlerFactory(auth Authorizer, ctxFac ContextFactory, trust signed.TrustService) func(contextHandler, ...Scope) *rootHandler { - return func(handler contextHandler, scopes ...Scope) *rootHandler { - return &rootHandler{handler, auth, scopes, ctxFac, trust} +func RootHandlerFactory(auth auth.AccessController, ctx context.Context, trust signed.CryptoService) func(contextHandler, ...string) *rootHandler { + return func(handler contextHandler, actions ...string) *rootHandler { + return &rootHandler{ + handler: handler, + auth: auth, + actions: actions, + context: ctx, + trust: trust, + } } } // ServeHTTP serves an HTTP request and implements the http.Handler interface. func (root *rootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := root.context(r, root.trust) - if err := root.auth.Authorize(ctx, root.scopes...); err != nil { + vars := mux.Vars(r) + ctx := context.WithValue(root.context, "repo", vars["imageName"]) + ctx = context.WithValue(ctx, "trust", root.trust) + + access := buildAccessRecords(vars["imageName"], root.actions...) + var err error + if ctx, err = root.auth.Authorized(ctx, access...); err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return } if err := root.handler(ctx, w, r); err != nil { - // TODO: Log error + logrus.Error("[Vetinari] ", err.Error()) http.Error(w, err.Error(), err.HTTPStatus) return } return } -// ResourceName parses the name of the resource being accessed from the request. -// It relies on gorilla mux being used and will need to be updated if that -// changes -func ResourceName(r *http.Request) string { - params := mux.Vars(r) - if resource, ok := params["imageName"]; ok { - return resource +func buildAccessRecords(repo string, actions ...string) []auth.Access { + requiredAccess := make([]auth.Access, 0, len(actions)) + for _, action := range actions { + requiredAccess = append(requiredAccess, auth.Access{ + Resource: auth.Resource{ + Type: "repo", + Name: repo, + }, + Action: action, + }) } - return "" + return requiredAccess } diff --git a/utils/http_test.go b/utils/http_test.go index d3b7370435..6977594109 100644 --- a/utils/http_test.go +++ b/utils/http_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/endophage/go-tuf/signed" + "github.com/endophage/gotuf/signed" "github.com/docker/vetinari/errors" )