mirror of https://github.com/docker/docs.git
Updating Godeps (distribution/reference)
Signed-off-by: Nishant Totla <nishanttotla@gmail.com>
This commit is contained in:
parent
25241bdd08
commit
2363ea8077
|
|
@ -38,6 +38,16 @@
|
||||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/distribution/digest",
|
||||||
|
"Comment": "v2.3.0-rc.2-191-gee8450f",
|
||||||
|
"Rev": "ee8450ff13538d829a9a7909da9b979764d56865"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/docker/distribution/reference",
|
||||||
|
"Comment": "v2.3.0-rc.2-191-gee8450f",
|
||||||
|
"Rev": "ee8450ff13538d829a9a7909da9b979764d56865"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/docker/pkg/discovery",
|
"ImportPath": "github.com/docker/docker/pkg/discovery",
|
||||||
"Comment": "v1.4.1-10581-gb65fd8e",
|
"Comment": "v1.4.1-10581-gb65fd8e",
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
139
Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go
generated
vendored
Normal file
139
Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,139 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||||
|
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||||
|
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||||
|
// the correct format and it provides quick access to the components of a
|
||||||
|
// digest string.
|
||||||
|
//
|
||||||
|
// The following is an example of the contents of Digest types:
|
||||||
|
//
|
||||||
|
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||||
|
//
|
||||||
|
// This allows to abstract the digest behind this type and work only in those
|
||||||
|
// terms.
|
||||||
|
type Digest string
|
||||||
|
|
||||||
|
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||||
|
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||||
|
return NewDigestFromBytes(alg, h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestFromBytes returns a new digest from the byte contents of p.
|
||||||
|
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
|
||||||
|
// functions. This is also useful for rebuilding digests from binary
|
||||||
|
// serializations.
|
||||||
|
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
|
||||||
|
return Digest(fmt.Sprintf("%s:%x", alg, p))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
|
||||||
|
func NewDigestFromHex(alg, hex string) Digest {
|
||||||
|
return Digest(fmt.Sprintf("%s:%s", alg, hex))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DigestRegexp matches valid digest types.
|
||||||
|
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
|
||||||
|
|
||||||
|
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
|
||||||
|
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||||
|
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||||
|
|
||||||
|
// ErrDigestInvalidLength returned when digest has invalid length.
|
||||||
|
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
|
||||||
|
|
||||||
|
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
|
||||||
|
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseDigest parses s and returns the validated digest object. An error will
|
||||||
|
// be returned if the format is invalid.
|
||||||
|
func ParseDigest(s string) (Digest, error) {
|
||||||
|
d := Digest(s)
|
||||||
|
|
||||||
|
return d, d.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromReader returns the most valid digest for the underlying content using
|
||||||
|
// the canonical digest algorithm.
|
||||||
|
func FromReader(rd io.Reader) (Digest, error) {
|
||||||
|
return Canonical.FromReader(rd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytes digests the input and returns a Digest.
|
||||||
|
func FromBytes(p []byte) Digest {
|
||||||
|
return Canonical.FromBytes(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks that the contents of d is a valid digest, returning an
|
||||||
|
// error if not.
|
||||||
|
func (d Digest) Validate() error {
|
||||||
|
s := string(d)
|
||||||
|
|
||||||
|
if !DigestRegexpAnchored.MatchString(s) {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
i := strings.Index(s, ":")
|
||||||
|
if i < 0 {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
// case: "sha256:" with no hex.
|
||||||
|
if i+1 == len(s) {
|
||||||
|
return ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
switch algorithm := Algorithm(s[:i]); algorithm {
|
||||||
|
case SHA256, SHA384, SHA512:
|
||||||
|
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||||
|
return ErrDigestInvalidLength
|
||||||
|
}
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
return ErrDigestUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||||
|
// the underlying digest is not in a valid format.
|
||||||
|
func (d Digest) Algorithm() Algorithm {
|
||||||
|
return Algorithm(d[:d.sepIndex()])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||||
|
// underlying digest is not in a valid format.
|
||||||
|
func (d Digest) Hex() string {
|
||||||
|
return string(d[d.sepIndex()+1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Digest) String() string {
|
||||||
|
return string(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Digest) sepIndex() int {
|
||||||
|
i := strings.Index(string(d), ":")
|
||||||
|
|
||||||
|
if i < 0 {
|
||||||
|
panic("could not find ':' in digest: " + d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return i
|
||||||
|
}
|
||||||
155
Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go
generated
vendored
Normal file
155
Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go
generated
vendored
Normal file
|
|
@ -0,0 +1,155 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Algorithm identifies and implementation of a digester by an identifier.
|
||||||
|
// Note the that this defines both the hash algorithm used and the string
|
||||||
|
// encoding.
|
||||||
|
type Algorithm string
|
||||||
|
|
||||||
|
// supported digest types
|
||||||
|
const (
|
||||||
|
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||||
|
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||||
|
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||||
|
|
||||||
|
// Canonical is the primary digest algorithm used with the distribution
|
||||||
|
// project. Other digests may be used but this one is the primary storage
|
||||||
|
// digest.
|
||||||
|
Canonical = SHA256
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||||
|
// registration of digests. Effectively, we are a registerable set and
|
||||||
|
// common symbol access.
|
||||||
|
|
||||||
|
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||||
|
// may be available but they cannot be calculated by the digest package.
|
||||||
|
algorithms = map[Algorithm]crypto.Hash{
|
||||||
|
SHA256: crypto.SHA256,
|
||||||
|
SHA384: crypto.SHA384,
|
||||||
|
SHA512: crypto.SHA512,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Available returns true if the digest type is available for use. If this
|
||||||
|
// returns false, New and Hash will return nil.
|
||||||
|
func (a Algorithm) Available() bool {
|
||||||
|
h, ok := algorithms[a]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check availability of the hash, as well
|
||||||
|
return h.Available()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Algorithm) String() string {
|
||||||
|
return string(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns number of bytes returned by the hash.
|
||||||
|
func (a Algorithm) Size() int {
|
||||||
|
h, ok := algorithms[a]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return h.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set implemented to allow use of Algorithm as a command line flag.
|
||||||
|
func (a *Algorithm) Set(value string) error {
|
||||||
|
if value == "" {
|
||||||
|
*a = Canonical
|
||||||
|
} else {
|
||||||
|
// just do a type conversion, support is queried with Available.
|
||||||
|
*a = Algorithm(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new digester for the specified algorithm. If the algorithm
|
||||||
|
// does not have a digester implementation, nil will be returned. This can be
|
||||||
|
// checked by calling Available before calling New.
|
||||||
|
func (a Algorithm) New() Digester {
|
||||||
|
return &digester{
|
||||||
|
alg: a,
|
||||||
|
hash: a.Hash(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns a new hash as used by the algorithm. If not available, the
|
||||||
|
// method will panic. Check Algorithm.Available() before calling.
|
||||||
|
func (a Algorithm) Hash() hash.Hash {
|
||||||
|
if !a.Available() {
|
||||||
|
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||||
|
// must be resolved at compile time. We don't import in the digest
|
||||||
|
// package to allow users to choose their hash implementation (such as
|
||||||
|
// when using stevvooe/resumable or a hardware accelerated package).
|
||||||
|
//
|
||||||
|
// Applications that may want to resolve the hash at runtime should
|
||||||
|
// call Algorithm.Available before call Algorithm.Hash().
|
||||||
|
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
|
||||||
|
}
|
||||||
|
|
||||||
|
return algorithms[a].New()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromReader returns the digest of the reader using the algorithm.
|
||||||
|
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||||
|
digester := a.New()
|
||||||
|
|
||||||
|
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return digester.Digest(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytes digests the input and returns a Digest.
|
||||||
|
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||||
|
digester := a.New()
|
||||||
|
|
||||||
|
if _, err := digester.Hash().Write(p); err != nil {
|
||||||
|
// Writes to a Hash should never fail. None of the existing
|
||||||
|
// hash implementations in the stdlib or hashes vendored
|
||||||
|
// here can return errors from Write. Having a panic in this
|
||||||
|
// condition instead of having FromBytes return an error value
|
||||||
|
// avoids unnecessary error handling paths in all callers.
|
||||||
|
panic("write to hash function returned error: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return digester.Digest()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
|
||||||
|
// this registration system.
|
||||||
|
|
||||||
|
// Digester calculates the digest of written data. Writes should go directly
|
||||||
|
// to the return value of Hash, while calling Digest will return the current
|
||||||
|
// value of the digest.
|
||||||
|
type Digester interface {
|
||||||
|
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||||
|
Digest() Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// digester provides a simple digester definition that embeds a hasher.
|
||||||
|
type digester struct {
|
||||||
|
alg Algorithm
|
||||||
|
hash hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digester) Hash() hash.Hash {
|
||||||
|
return d.hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digester) Digest() Digest {
|
||||||
|
return NewDigest(d.alg, d.hash)
|
||||||
|
}
|
||||||
42
Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go
generated
vendored
Normal file
42
Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
// Package digest provides a generalized type to opaquely represent message
|
||||||
|
// digests and their operations within the registry. The Digest type is
|
||||||
|
// designed to serve as a flexible identifier in a content-addressable system.
|
||||||
|
// More importantly, it provides tools and wrappers to work with
|
||||||
|
// hash.Hash-based digests with little effort.
|
||||||
|
//
|
||||||
|
// Basics
|
||||||
|
//
|
||||||
|
// The format of a digest is simply a string with two parts, dubbed the
|
||||||
|
// "algorithm" and the "digest", separated by a colon:
|
||||||
|
//
|
||||||
|
// <algorithm>:<digest>
|
||||||
|
//
|
||||||
|
// An example of a sha256 digest representation follows:
|
||||||
|
//
|
||||||
|
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||||
|
//
|
||||||
|
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||||
|
// the "digest".
|
||||||
|
//
|
||||||
|
// Because the Digest type is simply a string, once a valid Digest is
|
||||||
|
// obtained, comparisons are cheap, quick and simple to express with the
|
||||||
|
// standard equality operator.
|
||||||
|
//
|
||||||
|
// Verification
|
||||||
|
//
|
||||||
|
// The main benefit of using the Digest type is simple verification against a
|
||||||
|
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||||
|
// interface, provides a common write sink for digest verification. After
|
||||||
|
// writing is complete, calling the Verifier.Verified method will indicate
|
||||||
|
// whether or not the stream of bytes matches the target digest.
|
||||||
|
//
|
||||||
|
// Missing Features
|
||||||
|
//
|
||||||
|
// In addition to the above, we intend to add the following features to this
|
||||||
|
// package:
|
||||||
|
//
|
||||||
|
// 1. A Digester type that supports write sink digest calculation.
|
||||||
|
//
|
||||||
|
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||||
|
//
|
||||||
|
package digest
|
||||||
245
Godeps/_workspace/src/github.com/docker/distribution/digest/set.go
generated
vendored
Normal file
245
Godeps/_workspace/src/github.com/docker/distribution/digest/set.go
generated
vendored
Normal file
|
|
@ -0,0 +1,245 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDigestNotFound is used when a matching digest
|
||||||
|
// could not be found in a set.
|
||||||
|
ErrDigestNotFound = errors.New("digest not found")
|
||||||
|
|
||||||
|
// ErrDigestAmbiguous is used when multiple digests
|
||||||
|
// are found in a set. None of the matching digests
|
||||||
|
// should be considered valid matches.
|
||||||
|
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Set is used to hold a unique set of digests which
|
||||||
|
// may be easily referenced by easily referenced by a string
|
||||||
|
// representation of the digest as well as short representation.
|
||||||
|
// The uniqueness of the short representation is based on other
|
||||||
|
// digests in the set. If digests are omitted from this set,
|
||||||
|
// collisions in a larger set may not be detected, therefore it
|
||||||
|
// is important to always do short representation lookups on
|
||||||
|
// the complete set of digests. To mitigate collisions, an
|
||||||
|
// appropriately long short code should be used.
|
||||||
|
type Set struct {
|
||||||
|
mutex sync.RWMutex
|
||||||
|
entries digestEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSet creates an empty set of digests
|
||||||
|
// which may have digests added.
|
||||||
|
func NewSet() *Set {
|
||||||
|
return &Set{
|
||||||
|
entries: digestEntries{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkShortMatch checks whether two digests match as either whole
|
||||||
|
// values or short values. This function does not test equality,
|
||||||
|
// rather whether the second value could match against the first
|
||||||
|
// value.
|
||||||
|
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||||
|
if len(hex) == len(shortHex) {
|
||||||
|
if hex != shortHex {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else if !strings.HasPrefix(hex, shortHex) {
|
||||||
|
return false
|
||||||
|
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup looks for a digest matching the given string representation.
|
||||||
|
// If no digests could be found ErrDigestNotFound will be returned
|
||||||
|
// with an empty digest value. If multiple matches are found
|
||||||
|
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||||
|
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||||
|
dst.mutex.RLock()
|
||||||
|
defer dst.mutex.RUnlock()
|
||||||
|
if len(dst.entries) == 0 {
|
||||||
|
return "", ErrDigestNotFound
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
searchFunc func(int) bool
|
||||||
|
alg Algorithm
|
||||||
|
hex string
|
||||||
|
)
|
||||||
|
dgst, err := ParseDigest(d)
|
||||||
|
if err == ErrDigestInvalidFormat {
|
||||||
|
hex = d
|
||||||
|
searchFunc = func(i int) bool {
|
||||||
|
return dst.entries[i].val >= d
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
hex = dgst.Hex()
|
||||||
|
alg = dgst.Algorithm()
|
||||||
|
searchFunc = func(i int) bool {
|
||||||
|
if dst.entries[i].val == hex {
|
||||||
|
return dst.entries[i].alg >= alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= hex
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||||
|
return "", ErrDigestNotFound
|
||||||
|
}
|
||||||
|
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||||
|
return dst.entries[idx].digest, nil
|
||||||
|
}
|
||||||
|
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||||
|
return "", ErrDigestAmbiguous
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst.entries[idx].digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the given digest to the set. An error will be returned
|
||||||
|
// if the given digest is invalid. If the digest already exists in the
|
||||||
|
// set, this operation will be a no-op.
|
||||||
|
func (dst *Set) Add(d Digest) error {
|
||||||
|
if err := d.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.mutex.Lock()
|
||||||
|
defer dst.mutex.Unlock()
|
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||||
|
searchFunc := func(i int) bool {
|
||||||
|
if dst.entries[i].val == entry.val {
|
||||||
|
return dst.entries[i].alg >= entry.alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= entry.val
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
if idx == len(dst.entries) {
|
||||||
|
dst.entries = append(dst.entries, entry)
|
||||||
|
return nil
|
||||||
|
} else if dst.entries[idx].digest == d {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := append(dst.entries, nil)
|
||||||
|
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||||
|
entries[idx] = entry
|
||||||
|
dst.entries = entries
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the given digest from the set. An err will be
|
||||||
|
// returned if the given digest is invalid. If the digest does
|
||||||
|
// not exist in the set, this operation will be a no-op.
|
||||||
|
func (dst *Set) Remove(d Digest) error {
|
||||||
|
if err := d.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.mutex.Lock()
|
||||||
|
defer dst.mutex.Unlock()
|
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||||
|
searchFunc := func(i int) bool {
|
||||||
|
if dst.entries[i].val == entry.val {
|
||||||
|
return dst.entries[i].alg >= entry.alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= entry.val
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
// Not found if idx is after or value at idx is not digest
|
||||||
|
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := dst.entries
|
||||||
|
copy(entries[idx:], entries[idx+1:])
|
||||||
|
entries = entries[:len(entries)-1]
|
||||||
|
dst.entries = entries
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All returns all the digests in the set
|
||||||
|
func (dst *Set) All() []Digest {
|
||||||
|
dst.mutex.RLock()
|
||||||
|
defer dst.mutex.RUnlock()
|
||||||
|
retValues := make([]Digest, len(dst.entries))
|
||||||
|
for i := range dst.entries {
|
||||||
|
retValues[i] = dst.entries[i].digest
|
||||||
|
}
|
||||||
|
|
||||||
|
return retValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||||
|
// length represents the minimum value, the maximum length may be the
|
||||||
|
// entire value of digest if uniqueness cannot be achieved without the
|
||||||
|
// full value. This function will attempt to make short codes as short
|
||||||
|
// as possible to be unique.
|
||||||
|
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||||
|
dst.mutex.RLock()
|
||||||
|
defer dst.mutex.RUnlock()
|
||||||
|
m := make(map[Digest]string, len(dst.entries))
|
||||||
|
l := length
|
||||||
|
resetIdx := 0
|
||||||
|
for i := 0; i < len(dst.entries); i++ {
|
||||||
|
var short string
|
||||||
|
extended := true
|
||||||
|
for extended {
|
||||||
|
extended = false
|
||||||
|
if len(dst.entries[i].val) <= l {
|
||||||
|
short = dst.entries[i].digest.String()
|
||||||
|
} else {
|
||||||
|
short = dst.entries[i].val[:l]
|
||||||
|
for j := i + 1; j < len(dst.entries); j++ {
|
||||||
|
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||||
|
if j > resetIdx {
|
||||||
|
resetIdx = j
|
||||||
|
}
|
||||||
|
extended = true
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if extended {
|
||||||
|
l++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[dst.entries[i].digest] = short
|
||||||
|
if i >= resetIdx {
|
||||||
|
l = length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestEntry struct {
|
||||||
|
alg Algorithm
|
||||||
|
val string
|
||||||
|
digest Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestEntries []*digestEntry
|
||||||
|
|
||||||
|
func (d digestEntries) Len() int {
|
||||||
|
return len(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestEntries) Less(i, j int) bool {
|
||||||
|
if d[i].val != d[j].val {
|
||||||
|
return d[i].val < d[j].val
|
||||||
|
}
|
||||||
|
return d[i].alg < d[j].alg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestEntries) Swap(i, j int) {
|
||||||
|
d[i], d[j] = d[j], d[i]
|
||||||
|
}
|
||||||
44
Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Verifier presents a general verification interface to be used with message
|
||||||
|
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||||
|
// from one of the various methods, write the data under test to it then check
|
||||||
|
// the result with the Verified method.
|
||||||
|
type Verifier interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
// Verified will return true if the content written to Verifier matches
|
||||||
|
// the digest.
|
||||||
|
Verified() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDigestVerifier returns a verifier that compares the written bytes
|
||||||
|
// against a passed in digest.
|
||||||
|
func NewDigestVerifier(d Digest) (Verifier, error) {
|
||||||
|
if err := d.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashVerifier{
|
||||||
|
hash: d.Algorithm().Hash(),
|
||||||
|
digest: d,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type hashVerifier struct {
|
||||||
|
digest Digest
|
||||||
|
hash hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||||
|
return hv.hash.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hv hashVerifier) Verified() bool {
|
||||||
|
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||||
|
}
|
||||||
334
Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
334
Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
|
|
@ -0,0 +1,334 @@
|
||||||
|
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||||
|
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||||
|
//
|
||||||
|
// Grammar
|
||||||
|
//
|
||||||
|
// reference := name [ ":" tag ] [ "@" digest ]
|
||||||
|
// name := [hostname '/'] component ['/' component]*
|
||||||
|
// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
||||||
|
// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||||
|
// port-number := /[0-9]+/
|
||||||
|
// component := alpha-numeric [separator alpha-numeric]*
|
||||||
|
// alpha-numeric := /[a-z0-9]+/
|
||||||
|
// separator := /[_.]|__|[-]*/
|
||||||
|
//
|
||||||
|
// tag := /[\w][\w.-]{0,127}/
|
||||||
|
//
|
||||||
|
// digest := digest-algorithm ":" digest-hex
|
||||||
|
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||||
|
// digest-algorithm-separator := /[+.-_]/
|
||||||
|
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||||
|
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||||
|
package reference
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||||
|
NameTotalLengthMax = 255
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||||
|
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||||
|
|
||||||
|
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
|
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||||
|
|
||||||
|
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
|
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||||
|
|
||||||
|
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||||
|
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||||
|
|
||||||
|
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||||
|
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference is an opaque object reference identifier that may include
|
||||||
|
// modifiers such as a hostname, name, tag, and digest.
|
||||||
|
type Reference interface {
|
||||||
|
// String returns the full reference
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field provides a wrapper type for resolving correct reference types when
|
||||||
|
// working with encoding.
|
||||||
|
type Field struct {
|
||||||
|
reference Reference
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsField wraps a reference in a Field for encoding.
|
||||||
|
func AsField(reference Reference) Field {
|
||||||
|
return Field{reference}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference unwraps the reference type from the field to
|
||||||
|
// return the Reference object. This object should be
|
||||||
|
// of the appropriate type to further check for different
|
||||||
|
// reference types.
|
||||||
|
func (f Field) Reference() Reference {
|
||||||
|
return f.reference
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText serializes the field to byte text which
|
||||||
|
// is the string of the reference.
|
||||||
|
func (f Field) MarshalText() (p []byte, err error) {
|
||||||
|
return []byte(f.reference.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText parses text bytes by invoking the
|
||||||
|
// reference parser to ensure the appropriately
|
||||||
|
// typed reference object is wrapped by field.
|
||||||
|
func (f *Field) UnmarshalText(p []byte) error {
|
||||||
|
r, err := Parse(string(p))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.reference = r
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Named is an object with a full name
|
||||||
|
type Named interface {
|
||||||
|
Reference
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tagged is an object which has a tag
|
||||||
|
type Tagged interface {
|
||||||
|
Reference
|
||||||
|
Tag() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamedTagged is an object including a name and tag.
|
||||||
|
type NamedTagged interface {
|
||||||
|
Named
|
||||||
|
Tag() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Digested is an object which has a digest
|
||||||
|
// in which it can be referenced by
|
||||||
|
type Digested interface {
|
||||||
|
Reference
|
||||||
|
Digest() digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// Canonical reference is an object with a fully unique
|
||||||
|
// name including a name with hostname and digest
|
||||||
|
type Canonical interface {
|
||||||
|
Named
|
||||||
|
Digest() digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitHostname splits a named reference into a
|
||||||
|
// hostname and name string. If no valid hostname is
|
||||||
|
// found, the hostname is empty and the full value
|
||||||
|
// is returned as name
|
||||||
|
func SplitHostname(named Named) (string, string) {
|
||||||
|
name := named.Name()
|
||||||
|
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||||
|
if match == nil || len(match) != 3 {
|
||||||
|
return "", name
|
||||||
|
}
|
||||||
|
return match[1], match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses s and returns a syntactically valid Reference.
|
||||||
|
// If an error was encountered it is returned, along with a nil Reference.
|
||||||
|
// NOTE: Parse will not handle short digests.
|
||||||
|
func Parse(s string) (Reference, error) {
|
||||||
|
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||||
|
if matches == nil {
|
||||||
|
if s == "" {
|
||||||
|
return nil, ErrNameEmpty
|
||||||
|
}
|
||||||
|
// TODO(dmcgowan): Provide more specific and helpful error
|
||||||
|
return nil, ErrReferenceInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matches[1]) > NameTotalLengthMax {
|
||||||
|
return nil, ErrNameTooLong
|
||||||
|
}
|
||||||
|
|
||||||
|
ref := reference{
|
||||||
|
name: matches[1],
|
||||||
|
tag: matches[2],
|
||||||
|
}
|
||||||
|
if matches[3] != "" {
|
||||||
|
var err error
|
||||||
|
ref.digest, err = digest.ParseDigest(matches[3])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r := getBestReferenceType(ref)
|
||||||
|
if r == nil {
|
||||||
|
return nil, ErrNameEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||||
|
// the Named interface. The reference must have a name, otherwise an error is
|
||||||
|
// returned.
|
||||||
|
// If an error was encountered it is returned, along with a nil Reference.
|
||||||
|
// NOTE: ParseNamed will not handle short digests.
|
||||||
|
func ParseNamed(s string) (Named, error) {
|
||||||
|
ref, err := Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
named, isNamed := ref.(Named)
|
||||||
|
if !isNamed {
|
||||||
|
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||||
|
}
|
||||||
|
return named, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithName returns a named object representing the given string. If the input
|
||||||
|
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||||
|
func WithName(name string) (Named, error) {
|
||||||
|
if len(name) > NameTotalLengthMax {
|
||||||
|
return nil, ErrNameTooLong
|
||||||
|
}
|
||||||
|
if !anchoredNameRegexp.MatchString(name) {
|
||||||
|
return nil, ErrReferenceInvalidFormat
|
||||||
|
}
|
||||||
|
return repository(name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||||
|
// reference incorporating both the name and the tag.
|
||||||
|
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||||
|
if !anchoredTagRegexp.MatchString(tag) {
|
||||||
|
return nil, ErrTagInvalidFormat
|
||||||
|
}
|
||||||
|
return taggedReference{
|
||||||
|
name: name.Name(),
|
||||||
|
tag: tag,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||||
|
// a reference incorporating both the name and the digest.
|
||||||
|
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||||
|
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||||
|
return nil, ErrDigestInvalidFormat
|
||||||
|
}
|
||||||
|
return canonicalReference{
|
||||||
|
name: name.Name(),
|
||||||
|
digest: digest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBestReferenceType(ref reference) Reference {
|
||||||
|
if ref.name == "" {
|
||||||
|
// Allow digest only references
|
||||||
|
if ref.digest != "" {
|
||||||
|
return digestReference(ref.digest)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ref.tag == "" {
|
||||||
|
if ref.digest != "" {
|
||||||
|
return canonicalReference{
|
||||||
|
name: ref.name,
|
||||||
|
digest: ref.digest,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return repository(ref.name)
|
||||||
|
}
|
||||||
|
if ref.digest == "" {
|
||||||
|
return taggedReference{
|
||||||
|
name: ref.name,
|
||||||
|
tag: ref.tag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
|
||||||
|
type reference struct {
|
||||||
|
name string
|
||||||
|
tag string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) String() string {
|
||||||
|
return r.name + ":" + r.tag + "@" + r.digest.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) Name() string {
|
||||||
|
return r.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) Tag() string {
|
||||||
|
return r.tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r reference) Digest() digest.Digest {
|
||||||
|
return r.digest
|
||||||
|
}
|
||||||
|
|
||||||
|
type repository string
|
||||||
|
|
||||||
|
func (r repository) String() string {
|
||||||
|
return string(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r repository) Name() string {
|
||||||
|
return string(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestReference digest.Digest
|
||||||
|
|
||||||
|
func (d digestReference) String() string {
|
||||||
|
return d.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestReference) Digest() digest.Digest {
|
||||||
|
return digest.Digest(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
type taggedReference struct {
|
||||||
|
name string
|
||||||
|
tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t taggedReference) String() string {
|
||||||
|
return t.name + ":" + t.tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t taggedReference) Name() string {
|
||||||
|
return t.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t taggedReference) Tag() string {
|
||||||
|
return t.tag
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonicalReference struct {
|
||||||
|
name string
|
||||||
|
digest digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c canonicalReference) String() string {
|
||||||
|
return c.name + "@" + c.digest.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c canonicalReference) Name() string {
|
||||||
|
return c.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c canonicalReference) Digest() digest.Digest {
|
||||||
|
return c.digest
|
||||||
|
}
|
||||||
124
Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
124
Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
||||||
|
package reference
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||||
|
// component of names. This only allows lower case characters and digits.
|
||||||
|
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||||
|
|
||||||
|
// separatorRegexp defines the separators allowed to be embedded in name
|
||||||
|
// components. This allow one period, one or two underscore and multiple
|
||||||
|
// dashes.
|
||||||
|
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||||
|
|
||||||
|
// nameComponentRegexp restricts registry path component names to start
|
||||||
|
// with at least one letter or number, with following parts able to be
|
||||||
|
// separated by one period, one or two underscore and multiple dashes.
|
||||||
|
nameComponentRegexp = expression(
|
||||||
|
alphaNumericRegexp,
|
||||||
|
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||||
|
|
||||||
|
// hostnameComponentRegexp restricts the registry hostname component of a
|
||||||
|
// repository name to start with a component as defined by hostnameRegexp
|
||||||
|
// and followed by an optional port.
|
||||||
|
hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||||
|
|
||||||
|
// hostnameRegexp defines the structure of potential hostname components
|
||||||
|
// that may be part of image names. This is purposely a subset of what is
|
||||||
|
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||||
|
// names.
|
||||||
|
hostnameRegexp = expression(
|
||||||
|
hostnameComponentRegexp,
|
||||||
|
optional(repeated(literal(`.`), hostnameComponentRegexp)),
|
||||||
|
optional(literal(`:`), match(`[0-9]+`)))
|
||||||
|
|
||||||
|
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||||
|
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||||
|
|
||||||
|
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||||
|
// end of the matched string.
|
||||||
|
anchoredTagRegexp = anchored(TagRegexp)
|
||||||
|
|
||||||
|
// DigestRegexp matches valid digests.
|
||||||
|
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||||
|
|
||||||
|
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||||
|
// end of the matched string.
|
||||||
|
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||||
|
|
||||||
|
// NameRegexp is the format for the name component of references. The
|
||||||
|
// regexp has capturing groups for the hostname and name part omitting
|
||||||
|
// the separating forward slash from either.
|
||||||
|
NameRegexp = expression(
|
||||||
|
optional(hostnameRegexp, literal(`/`)),
|
||||||
|
nameComponentRegexp,
|
||||||
|
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||||
|
|
||||||
|
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||||
|
// hostname and trailing components.
|
||||||
|
anchoredNameRegexp = anchored(
|
||||||
|
optional(capture(hostnameRegexp), literal(`/`)),
|
||||||
|
capture(nameComponentRegexp,
|
||||||
|
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||||
|
|
||||||
|
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||||
|
// is anchored and has capturing groups for name, tag, and digest
|
||||||
|
// components.
|
||||||
|
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||||
|
optional(literal(":"), capture(TagRegexp)),
|
||||||
|
optional(literal("@"), capture(DigestRegexp)))
|
||||||
|
)
|
||||||
|
|
||||||
|
// match compiles the string to a regular expression.
|
||||||
|
var match = regexp.MustCompile
|
||||||
|
|
||||||
|
// literal compiles s into a literal regular expression, escaping any regexp
|
||||||
|
// reserved characters.
|
||||||
|
func literal(s string) *regexp.Regexp {
|
||||||
|
re := match(regexp.QuoteMeta(s))
|
||||||
|
|
||||||
|
if _, complete := re.LiteralPrefix(); !complete {
|
||||||
|
panic("must be a literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
return re
|
||||||
|
}
|
||||||
|
|
||||||
|
// expression defines a full expression, where each regular expression must
|
||||||
|
// follow the previous.
|
||||||
|
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
var s string
|
||||||
|
for _, re := range res {
|
||||||
|
s += re.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return match(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// optional wraps the expression in a non-capturing group and makes the
|
||||||
|
// production optional.
|
||||||
|
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(group(expression(res...)).String() + `?`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||||
|
// matches.
|
||||||
|
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(group(expression(res...)).String() + `+`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// group wraps the regexp in a non-capturing group.
|
||||||
|
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(`(?:` + expression(res...).String() + `)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// capture wraps the expression in a capturing group.
|
||||||
|
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(`(` + expression(res...).String() + `)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// anchored anchors the regular expression by adding start and end delimiters.
|
||||||
|
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||||
|
return match(`^` + expression(res...).String() + `$`)
|
||||||
|
}
|
||||||
|
|
@ -346,7 +346,7 @@ func TestCreateContainer(t *testing.T) {
|
||||||
id = "id3"
|
id = "id3"
|
||||||
apiClient.On("ImageList", mock.Anything, mock.AnythingOfType("ImageListOptions")).Return([]types.Image{}, nil).Once()
|
apiClient.On("ImageList", mock.Anything, mock.AnythingOfType("ImageListOptions")).Return([]types.Image{}, nil).Once()
|
||||||
mockConfig.CPUShares = int64(math.Ceil(float64(config.CPUShares*1024) / float64(mockInfo.NCPU)))
|
mockConfig.CPUShares = int64(math.Ceil(float64(config.CPUShares*1024) / float64(mockInfo.NCPU)))
|
||||||
apiClient.On("ImagePull", mock.Anything, types.ImagePullOptions{ImageID: config.Image + ":latest"}, mock.Anything).Return(readCloser, nil).Once()
|
apiClient.On("ImagePull", mock.Anything, types.ImagePullOptions{ImageID: config.Image}, mock.Anything).Return(readCloser, nil).Once()
|
||||||
// FIXMEENGINEAPI : below should return an engine-api error, or something custom
|
// FIXMEENGINEAPI : below should return an engine-api error, or something custom
|
||||||
apiClient.On("ContainerCreate", mock.Anything, &mockConfig.Config, &mockConfig.HostConfig, &mockConfig.NetworkingConfig, name).Return(types.ContainerCreateResponse{}, dockerclient.ErrImageNotFound).Once()
|
apiClient.On("ContainerCreate", mock.Anything, &mockConfig.Config, &mockConfig.HostConfig, &mockConfig.NetworkingConfig, name).Return(types.ContainerCreateResponse{}, dockerclient.ErrImageNotFound).Once()
|
||||||
// FIXMEENGINEAPI : below should return an engine-api error, or something custom
|
// FIXMEENGINEAPI : below should return an engine-api error, or something custom
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue