Fix problems found by codespell

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh 2020-09-11 10:24:36 -04:00
parent 2081d3dcd5
commit 33bcba75bb
No known key found for this signature in database
GPG Key ID: A2DF901DABE2C028
35 changed files with 56 additions and 56 deletions

View File

@ -194,7 +194,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -50,7 +50,7 @@ type archiveReference struct {
// Must not be set if ref is set.
sourceIndex int
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
// file, not necesarily path precisely).
// file, not necessarily path precisely).
archiveReader *tarfile.Reader
// If not nil, must have been created for path
archiveWriter *tarfile.Writer

View File

@ -32,7 +32,7 @@ func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
"registry.example.com/ns/stream",
"registry.example.com/ns",
"registry.example.com",
sha256digestHex, // Accept also unqualified hexdigest valies, they are in principle possible host names.
sha256digestHex, // Accept also unqualified hexdigest values, they are in principle possible host names.
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)

View File

@ -284,7 +284,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
@ -335,7 +335,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
// so, be a nice client and don't create unnecesary upload sessions on the server.
// so, be a nice client and don't create unnecessary upload sessions on the server.
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
if err != nil {
logrus.Debugf("... Failed: %v", err)

View File

@ -53,7 +53,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// contain the image, it will be used for all future pull actions. Always try the
// non-mirror original location last; this both transparently handles the case
// of no mirrors configured, and ensures we return the error encountered when
// acessing the upstream location if all endpoints fail.
// accessing the upstream location if all endpoints fail.
pullSources, err := registry.PullSourcesFromReference(ref.ref)
if err != nil {
return nil, err

View File

@ -94,7 +94,7 @@ func (d *Destination) HasThreadSafePutBlob() bool {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
@ -159,7 +159,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -287,7 +287,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
// In particular, because the v2s2 manifest being generated uses
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
// layers not their _actual_ digest. The result is that copy/... will
// be verifing a "digest" which is not the actual layer's digest (but
// be verifying a "digest" which is not the actual layer's digest (but
// is instead the DiffID).
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)

View File

@ -48,7 +48,7 @@ func NewWriter(dest io.Writer) *Writer {
}
// lock does some sanity checks and locks the Writer.
// If this function suceeds, the caller must call w.unlock.
// If this function succeeds, the caller must call w.unlock.
// Do not use Writer.mutex directly.
func (w *Writer) lock() error {
w.mutex.Lock()
@ -67,7 +67,7 @@ func (w *Writer) unlock() {
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
// info.Digest must not be empty.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {

View File

@ -196,7 +196,7 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
return ""
}
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest, or nil if not applicable.
// Returns nil iff base == nil.
// NOTE: Keep this in sync with docs/signature-protocols.md!
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {

View File

@ -639,7 +639,7 @@ func TestParseNamed(t *testing.T) {
failf("error parsing name: %s", err)
continue
} else if err == nil && testcase.err != nil {
failf("parsing succeded: expected error %v", testcase.err)
failf("parsing succeeded: expected error %v", testcase.err)
continue
} else if err != testcase.err {
failf("unexpected error %v, expected %v", err, testcase.err)

View File

@ -86,7 +86,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -177,7 +177,7 @@ One of the following alternatives are supported:
```json
{"type":"matchRepoDigestOrExact"}
```
- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version.
- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifying an exact image version.
```json
{"type":"matchRepository"}

View File

@ -224,7 +224,7 @@ The contents of this string is not defined in detail; however each implementatio
Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator`
(perhaps augmented with `optional.timestamp`),
and MAY change their processing of the signature based on these values
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
(usually to accommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
as long as the semantics of the invalid document, as created by such an implementation, is clear.
If consumers of signatures do change their behavior based on the `optional.creator` value,

View File

@ -48,7 +48,7 @@ where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing
the signature storage is always disambiguated using digest references).
Note that in the URLs used for signatures,
_digest-algo_ and _digest-value_ are separated using the `=` character,
not `:` like when acessing the manifest using the docker/distribution API.
not `:` like when accessing the manifest using the docker/distribution API.
Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,

View File

@ -131,7 +131,7 @@ var baseVariants = map[string]string{
"arm64": "v8",
}
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overriden by user,
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {

View File

@ -103,7 +103,7 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -186,7 +186,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -251,7 +251,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig.
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
// both, so we have to split the objects and merge them separately
// we want this order of precedence for user identifcation
// we want this order of precedence for user identification
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file

View File

@ -410,7 +410,7 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -339,7 +339,7 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -117,7 +117,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
}
}
// TODO(keyring): if we ever reenable the keyring support, we had to
// TODO(keyring): if we ever re-enable the keyring support, we had to
// query all credentials from the keyring here.
return authConfigs, nil
@ -245,7 +245,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
})
}
// getPathToAuth gets the path of the auth.json file used for reading and writting credentials
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
// returns the path, and a bool specifies whether the file is in legacy format
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
if sys != nil {

View File

@ -63,7 +63,7 @@ func removeAllAuthFromKernelKeyring() error {
// split string "type;uid;gid;perm;description"
keyAttrs := strings.SplitN(keyAttr, ";", 5)
if len(keyAttrs) < 5 {
return errors.Errorf("Key attributes of %d are not avaliable", k.ID())
return errors.Errorf("Key attributes of %d are not available", k.ID())
}
keyDescribe := keyAttrs[4]
if strings.HasPrefix(keyDescribe, keyDescribePrefix) {

View File

@ -363,7 +363,7 @@ type configWrapper struct {
configPath string
// path to system-wide registries.conf.d directory, or "" if not used
configDirPath string
// path to user specificed registries.conf.d directory, or "" if not used
// path to user specified registries.conf.d directory, or "" if not used
userConfigDirPath string
}

View File

@ -93,7 +93,7 @@ func TestSetupCertificates(t *testing.T) {
assert.NoError(t, err)
assert.Nil(t, tlsc.RootCAs)
// Misssing key file
// Missing key file
tlsc = tls.Config{}
err = SetupCertificates("testdata/missing-key", &tlsc)
assert.Error(t, err)

View File

@ -28,8 +28,8 @@ type SigningMechanism interface {
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
// along with a short identifier of the key used for signing.
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls ot this interface, and
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls to this interface, and
// the values may have no recognizable relationship if the public key is not available.
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
}
@ -58,8 +58,8 @@ func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, e
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
// along with a short identifier of the key used for signing.
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls ot this interface, and
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls to this interface, and
// the values may have no recognizable relationship if the public key is not available.
func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
// This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
@ -75,7 +75,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents
// Coverage: An error during reading the body can happen only if
// 1) the message is encrypted, which is not our case (and we dont give ReadMessage the key
// to decrypt the contents anyway), or
// 2) the message is signed AND we give ReadMessage a correspnding public key, which we dont.
// 2) the message is signed AND we give ReadMessage a corresponding public key, which we dont.
return nil, "", err
}

View File

@ -167,8 +167,8 @@ func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []b
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
// along with a short identifier of the key used for signing.
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls ot this interface, and
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls to this interface, and
// the values may have no recognizable relationship if the public key is not available.
func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
return gpgUntrustedSignatureContents(untrustedSignature)

View File

@ -151,8 +151,8 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
// along with a short identifier of the key used for signing.
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls ot this interface, and
// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys)
// is NOT the same as a "key identity" used in other calls to this interface, and
// the values may have no recognizable relationship if the public key is not available.
func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
return gpgUntrustedSignatureContents(untrustedSignature)

View File

@ -1,4 +1,4 @@
// policy_config.go hanles creation of policy objects, either by parsing JSON
// policy_config.go handles creation of policy objects, either by parsing JSON
// or by programs building them programmatically.
// The New* constructors are intended to be a stable API. FIXME: after an independent review.
@ -516,7 +516,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
return res, nil
}
// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type.
// newPRMMatchExact is NewPRMMatchExact, except it returns the private type.
func newPRMMatchExact() *prmMatchExact {
return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
}
@ -546,7 +546,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
return nil
}
// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type.
// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type.
func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
}
@ -576,7 +576,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
return nil
}
// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type.
func newPRMMatchRepository() *prmMatchRepository {
return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
}
@ -606,7 +606,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
return nil
}
// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
// newPRMExactReference is NewPRMExactReference, except it returns the private type.
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
ref, err := reference.ParseNormalizedNamed(dockerReference)
if err != nil {
@ -652,7 +652,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
return nil
}
// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))

View File

@ -1,5 +1,5 @@
// This defines the top-level policy evaluation API.
// To the extent possible, the interface of the fuctions provided
// To the extent possible, the interface of the functions provided
// here is intended to be completely unambiguous, and stable for users
// to rely on.
@ -47,7 +47,7 @@ type PolicyRequirement interface {
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for futher processing,
// WARNING: This makes the signature contents acceptable for further processing,
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
@ -166,7 +166,7 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// verified).
// NOTE: This may legitimately return an empty list and no error, if the image
// has no signatures or only invalid signatures.
// WARNING: This makes the signature contents acceptable for futher processing,
// WARNING: This makes the signature contents acceptable for further processing,
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:

View File

@ -496,7 +496,7 @@ func assertSARRejected(t *testing.T, sar signatureAcceptanceResult, parsedSig *S
assert.Error(t, err)
}
// assertSARRejectedPolicyRequiremnt verifies that isSignatureAuthorAccepted returns a consistent sarRejected resul,
// assertSARRejectedPolicyRequiremnt verifies that isSignatureAuthorAccepted returns a consistent sarRejected result,
// and that the returned error is a PolicyRequirementError..
func assertSARRejectedPolicyRequirement(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assertSARRejected(t, sar, parsedSig, err)

View File

@ -51,7 +51,7 @@ func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.Unparse
return signature.String() == intended.String()
case reference.Canonical:
// We dont actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
// Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
// Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
// we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
return signature.Name() == intended.Name()
default: // !reference.IsNameOnly(intended)

View File

@ -237,7 +237,7 @@ func TestPMMMatchRepoDigestOrExactMatchesDockerReference(t *testing.T) {
}
}
// The other cases, possibly assymetrical:
// The other cases, possibly asymmetrical:
for _, test := range []struct {
imageRef, sigRef string
result bool

View File

@ -210,7 +210,7 @@ type signatureAcceptanceRules struct {
validateSignedDockerManifestDigest func(digest.Digest) error
}
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components
// match expected values, both as specified by rules, and returns it
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
@ -248,7 +248,7 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
// There is NO REASON to expect the values to be correct, or not intentionally misleading
// (including things like “✅ Verified by $authority”)
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
// NOTE: This should eventualy do format autodetection.
// NOTE: This should eventually do format autodetection.
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
if err != nil {
return nil, err

View File

@ -463,7 +463,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
@ -657,7 +657,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// that relies on using a blob digest that has never been seeen by the store had better call
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)

View File

@ -170,7 +170,7 @@ type BICReplacementCandidate struct {
Location BICLocationReference
}
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
//
// It records two kinds of data:
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
@ -193,7 +193,7 @@ type BICReplacementCandidate struct {
// can be directly reused within a registry, or mounted across registries within a registry server.)
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cahce should just fall back to copying the blobs the usual way.
// users of the cache should just fall back to copying the blobs the usual way.
type BlobInfoCache interface {
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
@ -306,7 +306,7 @@ type ImageDestination interface {
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)