mirror of https://github.com/knative/func.git
feat: s2i builder with preliminary node support (#923)
* fix: stuck build ticker
* feat: s2i builder prototype
* default builders
* use s2i fork with updated docker
* in-code builder defaults
* s2i builder verbosity constructor arg
* typed errors
* typed error tests
* remove unneeded env code
* s2i build e2e test
* e2e tests
* update licenses
* cleanup
* codegen debug
* update licenses
* Revert "Update actions (#921)"
This reverts commit 8312b5c560
.
* update licenses
* e2e test updates
* use GetDefaultDockerConfig for s2i config
* docker.NewClient docs
This commit is contained in:
parent
e9251f518c
commit
a91bcc5fcf
|
@ -3,7 +3,6 @@ package buildpacks
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -21,6 +20,12 @@ import (
|
|||
"github.com/buildpacks/pack/pkg/logging"
|
||||
)
|
||||
|
||||
// DefaultBuilderImages for Pack builders indexed by Runtime Language
|
||||
var DefaultBuilderImages = map[string]string{
|
||||
"node": "gcr.io/paketo-buildpacks/builder:base",
|
||||
"go": "gcr.io/paketo-buildpacks/builder:base",
|
||||
}
|
||||
|
||||
//Builder holds the configuration that will be passed to
|
||||
//Buildpack builder
|
||||
type Builder struct {
|
||||
|
@ -46,7 +51,10 @@ func (builder *Builder) Build(ctx context.Context, f fn.Function) (err error) {
|
|||
packBuilder = pb
|
||||
}
|
||||
} else {
|
||||
return errors.New("no buildpack configured for function")
|
||||
packBuilder, err = defaultBuilderImage(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Build options for the pack client.
|
||||
|
@ -139,6 +147,16 @@ func (builder *Builder) Build(ctx context.Context, f fn.Function) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// defaultBuilderImage for the given function based on its runtime, or an
|
||||
// error if no default is defined for the given runtime.
|
||||
func defaultBuilderImage(f fn.Function) (string, error) {
|
||||
v, ok := DefaultBuilderImages[f.Runtime]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("Pack builder has no default builder image specified for the '%v' language runtime. Please provide one.", f.Runtime)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// hack this makes stdout non-closeable
|
||||
type stdoutWrapper struct {
|
||||
impl io.Writer
|
||||
|
|
|
@ -644,7 +644,6 @@ func (c *Client) printBuildActivity(ctx context.Context) {
|
|||
}
|
||||
i := 0
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
|
@ -654,6 +653,7 @@ func (c *Client) printBuildActivity(ctx context.Context) {
|
|||
i = i % len(m)
|
||||
case <-ctx.Done():
|
||||
c.progressListener.Stopping()
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -900,8 +900,7 @@ func TestClient_Deploy_UnbuiltErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestClient_New_BuildersPersisted Asserts that the client preserves user-
|
||||
// provided Builders on the Function configuration with the internal default
|
||||
// if not provided.
|
||||
// provided Builders
|
||||
func TestClient_New_BuildersPersisted(t *testing.T) {
|
||||
root := "testdata/example.com/testConfiguredBuilders" // Root from which to run the test
|
||||
defer Using(t, root)()
|
||||
|
@ -931,10 +930,11 @@ func TestClient_New_BuildersPersisted(t *testing.T) {
|
|||
t.Fatalf("Expected %v but got %v", f0.Builders, f1.Builders)
|
||||
}
|
||||
|
||||
// But that the default exists
|
||||
if f1.Builder == "" {
|
||||
t.Fatal("Expected default builder to be set")
|
||||
}
|
||||
// A Default Builder(image) is not asserted here, because that is
|
||||
// the responsibility of the Builder(type) being used to build the Function.
|
||||
// The builder (Buildpack,s2i, etc) will have a default builder image for
|
||||
// the given Function or will error that the Function is not supported.
|
||||
// A builder image may also be manually specified of course.
|
||||
}
|
||||
|
||||
// TestClient_New_BuilderDefault ensures that if a custom builder is
|
||||
|
|
52
cmd/build.go
52
cmd/build.go
|
@ -9,6 +9,9 @@ import (
|
|||
"github.com/ory/viper"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"knative.dev/kn-plugin-func/buildpacks"
|
||||
"knative.dev/kn-plugin-func/s2i"
|
||||
|
||||
fn "knative.dev/kn-plugin-func"
|
||||
)
|
||||
|
||||
|
@ -40,17 +43,18 @@ and the image name is stored in the configuration file.
|
|||
{{.Name}} build --builder cnbs/sample-builder:bionic
|
||||
`,
|
||||
SuggestFor: []string{"biuld", "buidl", "built"},
|
||||
PreRunE: bindEnv("image", "path", "builder", "registry", "confirm", "push"),
|
||||
PreRunE: bindEnv("image", "path", "builder", "registry", "confirm", "push", "builder-image"),
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("builder", "b", "", "Buildpack builder, either an as a an image name or a mapping name.\nSpecified value is stored in func.yaml for subsequent builds.")
|
||||
cmd.Flags().StringP("builder", "b", "pack", "builder to use when creating the underlying image. Currently supported builders are 'pack' and 's2i'.")
|
||||
cmd.Flags().StringP("builder-image", "", "", "builder image, either an as a an image name or a mapping name.\nSpecified value is stored in func.yaml for subsequent builds. ($FUNC_BUILDER_IMAGE)")
|
||||
cmd.Flags().BoolP("confirm", "c", false, "Prompt to confirm all configuration options (Env: $FUNC_CONFIRM)")
|
||||
cmd.Flags().StringP("image", "i", "", "Full image name in the form [registry]/[namespace]/[name]:[tag] (optional). This option takes precedence over --registry (Env: $FUNC_IMAGE)")
|
||||
cmd.Flags().StringP("registry", "r", GetDefaultRegistry(), "Registry + namespace part of the image to build, ex 'quay.io/myuser'. The full image name is automatically determined based on the local directory name. If not provided the registry will be taken from func.yaml (Env: $FUNC_REGISTRY)")
|
||||
cmd.Flags().BoolP("push", "u", false, "Attempt to push the function image after being successfully built")
|
||||
setPathFlag(cmd)
|
||||
|
||||
if err := cmd.RegisterFlagCompletionFunc("builder", CompleteBuilderList); err != nil {
|
||||
if err := cmd.RegisterFlagCompletionFunc("builder", CompleteBuilderImageList); err != nil {
|
||||
fmt.Println("internal: error while calling RegisterFlagCompletionFunc: ", err)
|
||||
}
|
||||
|
||||
|
@ -89,7 +93,7 @@ func runBuild(cmd *cobra.Command, _ []string, newClient ClientFactory) (err erro
|
|||
return
|
||||
}
|
||||
|
||||
function, err := functionWithOverrides(config.Path, functionOverrides{Builder: config.Builder, Image: config.Image})
|
||||
function, err := functionWithOverrides(config.Path, functionOverrides{BuilderImage: config.BuilderImage, Image: config.Image})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -144,15 +148,22 @@ func runBuild(cmd *cobra.Command, _ []string, newClient ClientFactory) (err erro
|
|||
config.Registry = ""
|
||||
}
|
||||
|
||||
// TODO(lkingland): The below deferred options gathering is what will
|
||||
// re-enable the addition of alternative implementations of the Builder,
|
||||
// unblocking PR https://github.com/knative-sandbox/kn-plugin-func/pull/842
|
||||
// the implementation of which will be inserted here.
|
||||
// Choose a builder based on the value of the --builder flag
|
||||
var builder fn.Builder
|
||||
if config.Builder == "pack" {
|
||||
builder = buildpacks.NewBuilder(config.Verbose)
|
||||
} else if config.Builder == "s2i" {
|
||||
builder = s2i.NewBuilder(config.Verbose)
|
||||
} else {
|
||||
err = errors.New("unrecognized builder")
|
||||
return
|
||||
}
|
||||
|
||||
// Create a client using the registry defined in config plus any additional
|
||||
// options provided (such as mocks for testing)
|
||||
client, done := newClient(ClientConfig{Verbose: config.Verbose},
|
||||
fn.WithRegistry(config.Registry))
|
||||
fn.WithRegistry(config.Registry),
|
||||
fn.WithBuilder(builder))
|
||||
defer done()
|
||||
|
||||
err = client.Build(cmd.Context(), config.Path)
|
||||
|
@ -184,18 +195,27 @@ type buildConfig struct {
|
|||
// Confirm: confirm values arrived upon from environment plus flags plus defaults,
|
||||
// with interactive prompting (only applicable when attached to a TTY).
|
||||
Confirm bool
|
||||
|
||||
// Builder is the name of the subsystem that will complete the underlying
|
||||
// build (Pack, s2i, remote pipeline, etc). Currently ad-hoc rather than
|
||||
// an enumerated field. See the Client constructory for logic.
|
||||
Builder string
|
||||
|
||||
// BuilderImage is the image (name or mapping) to use for building. Usually
|
||||
// set automatically.
|
||||
BuilderImage string
|
||||
}
|
||||
|
||||
func newBuildConfig() buildConfig {
|
||||
return buildConfig{
|
||||
Image: viper.GetString("image"),
|
||||
Path: viper.GetString("path"),
|
||||
Registry: viper.GetString("registry"),
|
||||
Verbose: viper.GetBool("verbose"), // defined on root
|
||||
Confirm: viper.GetBool("confirm"),
|
||||
Builder: viper.GetString("builder"),
|
||||
Push: viper.GetBool("push"),
|
||||
Image: viper.GetString("image"),
|
||||
Path: viper.GetString("path"),
|
||||
Registry: viper.GetString("registry"),
|
||||
Verbose: viper.GetBool("verbose"), // defined on root
|
||||
Confirm: viper.GetBool("confirm"),
|
||||
Builder: viper.GetString("builder"),
|
||||
BuilderImage: viper.GetString("builder-image"),
|
||||
Push: viper.GetBool("push"),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ func CompleteRegistryList(cmd *cobra.Command, args []string, toComplete string)
|
|||
return
|
||||
}
|
||||
|
||||
func CompleteBuilderList(cmd *cobra.Command, args []string, complete string) (strings []string, directive cobra.ShellCompDirective) {
|
||||
func CompleteBuilderImageList(cmd *cobra.Command, args []string, complete string) (strings []string, directive cobra.ShellCompDirective) {
|
||||
directive = cobra.ShellCompDirectiveError
|
||||
|
||||
var (
|
||||
|
|
|
@ -168,9 +168,9 @@ func bindEnv(flags ...string) bindFunc {
|
|||
}
|
||||
|
||||
type functionOverrides struct {
|
||||
Image string
|
||||
Namespace string
|
||||
Builder string
|
||||
Image string
|
||||
Namespace string
|
||||
BuilderImage string
|
||||
}
|
||||
|
||||
// functionWithOverrides sets the namespace and image strings for the
|
||||
|
@ -187,7 +187,7 @@ func functionWithOverrides(root string, overrides functionOverrides) (f fn.Funct
|
|||
src string
|
||||
dest *string
|
||||
}{
|
||||
{overrides.Builder, &f.Builder},
|
||||
{overrides.BuilderImage, &f.Builder},
|
||||
{overrides.Image, &f.Image},
|
||||
{overrides.Namespace, &f.Namespace},
|
||||
}
|
||||
|
|
|
@ -19,6 +19,14 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// NewClient creates a new docker client.
|
||||
// reads the DOCKER_HOST envvar but it may or may not return it as dockerHost.
|
||||
// - For local connection (unix socket and windows named pipe) it returns the
|
||||
// DOCKER_HOST directly.
|
||||
// - For ssh connections it reads the DOCKER_HOST from the ssh remote.
|
||||
// - For TCP connections it returns "" so it defaults in the remote (note that
|
||||
// one should not be use client.DefaultDockerHost in this situation). This is
|
||||
// needed beaus of TCP+tls connections.
|
||||
func NewClient(defaultHost string) (dockerClient client.CommonAPIClient, dockerHost string, err error) {
|
||||
var _url *url.URL
|
||||
|
||||
|
|
6
go.mod
6
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/alecthomas/jsonschema v0.0.0-20210526225647-edb03dcab7bc
|
||||
github.com/buildpacks/pack v0.24.0
|
||||
github.com/cloudevents/sdk-go/v2 v2.5.0
|
||||
github.com/containers/image/v5 v5.10.6
|
||||
github.com/containers/image/v5 v5.19.1
|
||||
github.com/coreos/go-semver v0.3.0
|
||||
github.com/docker/cli v20.10.12+incompatible
|
||||
github.com/docker/docker v20.10.12+incompatible
|
||||
|
@ -23,6 +23,7 @@ require (
|
|||
github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/openshift/source-to-image v1.3.1
|
||||
github.com/ory/viper v1.7.5
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.3.0
|
||||
|
@ -45,6 +46,9 @@ require (
|
|||
)
|
||||
|
||||
replace (
|
||||
// update docker to be compatible with version used by pack and removes invalid pseudo-version
|
||||
github.com/openshift/source-to-image => github.com/boson-project/source-to-image v1.3.2
|
||||
|
||||
// Pin k8s.io dependencies to align with Knative and Tekton needs
|
||||
k8s.io/api => k8s.io/api v0.22.5
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.5
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
package s2i
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
|
||||
"github.com/openshift/source-to-image/pkg/api"
|
||||
"github.com/openshift/source-to-image/pkg/api/validation"
|
||||
"github.com/openshift/source-to-image/pkg/build"
|
||||
"github.com/openshift/source-to-image/pkg/build/strategies"
|
||||
s2idocker "github.com/openshift/source-to-image/pkg/docker"
|
||||
"github.com/openshift/source-to-image/pkg/scm/git"
|
||||
|
||||
fn "knative.dev/kn-plugin-func"
|
||||
docker "knative.dev/kn-plugin-func/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrRuntimeRequired indicates the required value of Function Runtime was not provided
|
||||
ErrRuntimeRequired = errors.New("runtime is required to build")
|
||||
|
||||
// ErrRuntimeNotSupported indicates the given runtime is not (yet) supported
|
||||
// by this builder.
|
||||
ErrRuntimeNotSupported = errors.New("runtime not supported")
|
||||
)
|
||||
|
||||
// DefaultBuilderImages for s2i builders indexed by Runtime Language
|
||||
var DefaultBuilderImages = map[string]string{
|
||||
"node": "registry.access.redhat.com/ubi8/nodejs-16", // TODO: finalize choice and include version
|
||||
}
|
||||
|
||||
// Builder of Functions using the s2i subsystem.
|
||||
type Builder struct {
|
||||
verbose bool
|
||||
}
|
||||
|
||||
// NewBuilder creates a new instance of a Builder with static defaults.
|
||||
func NewBuilder(verbose bool) *Builder {
|
||||
return &Builder{verbose: verbose}
|
||||
}
|
||||
|
||||
func (b *Builder) Build(ctx context.Context, f fn.Function) (err error) {
|
||||
// Ensure the Function has a builder specified
|
||||
if f.Builder == "" {
|
||||
f.Builder, err = defaultBuilderImage(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
client, _, err := docker.NewClient(dockerClient.DefaultDockerHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Build Config
|
||||
cfg := &api.Config{}
|
||||
cfg.Quiet = !b.verbose
|
||||
cfg.Tag = f.Image
|
||||
cfg.Source = &git.URL{URL: url.URL{Path: f.Root}, Type: git.URLTypeLocal}
|
||||
cfg.BuilderImage = f.Builder
|
||||
cfg.BuilderPullPolicy = api.DefaultBuilderPullPolicy
|
||||
cfg.PreviousImagePullPolicy = api.DefaultPreviousImagePullPolicy
|
||||
cfg.RuntimeImagePullPolicy = api.DefaultRuntimeImagePullPolicy
|
||||
cfg.DockerConfig = s2idocker.GetDefaultDockerConfig()
|
||||
if errs := validation.ValidateConfig(cfg); len(errs) > 0 {
|
||||
for _, e := range errs {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n", e)
|
||||
}
|
||||
return errors.New("Unable to build via the s2i builder.")
|
||||
}
|
||||
|
||||
builder, _, err := strategies.Strategy(client, cfg, build.Overrides{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := builder.Build(cfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if b.verbose {
|
||||
for _, message := range result.Messages {
|
||||
fmt.Println(message)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// defaultBuilderImage for the given function based on its runtime, or an
|
||||
// error if no default is defined for the given runtime.
|
||||
func defaultBuilderImage(f fn.Function) (string, error) {
|
||||
if f.Runtime == "" {
|
||||
return "", ErrRuntimeRequired
|
||||
}
|
||||
v, ok := DefaultBuilderImages[f.Runtime]
|
||||
if !ok {
|
||||
return "", ErrRuntimeNotSupported
|
||||
}
|
||||
return v, nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package s2i_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
fn "knative.dev/kn-plugin-func"
|
||||
"knative.dev/kn-plugin-func/s2i"
|
||||
)
|
||||
|
||||
// Test_ErrRuntimeRequired ensures that a request to build without a runtime
|
||||
// defined for the Function yields an ErrRuntimeRequired
|
||||
func Test_ErrRuntimeRequired(t *testing.T) {
|
||||
b := s2i.NewBuilder(true)
|
||||
err := b.Build(context.Background(), fn.Function{})
|
||||
|
||||
if !errors.Is(err, s2i.ErrRuntimeRequired) {
|
||||
t.Fatal("expected ErrRuntimeRequired not received")
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ErrRuntimeNotSupported ensures that a request to build a function whose
|
||||
// runtime is not yet supported yields an ErrRuntimeNotSupported
|
||||
func Test_ErrRuntimeNotSupported(t *testing.T) {
|
||||
b := s2i.NewBuilder(true)
|
||||
err := b.Build(context.Background(), fn.Function{Runtime: "unsupported"})
|
||||
|
||||
if !errors.Is(err, s2i.ErrRuntimeNotSupported) {
|
||||
t.Fatal("expected ErrRuntimeNotSupported not received")
|
||||
}
|
||||
}
|
|
@ -1,14 +1,5 @@
|
|||
# Required. One or more Buildpack builder image names capable of transforming
|
||||
# this language pack's function source code into a container image. These values
|
||||
# are copied directly to a function project's func.yaml file, allowing the
|
||||
# function developer to choose between them in local build configurations
|
||||
builders:
|
||||
default: gcr.io/paketo-buildpacks/builder:base
|
||||
base: gcr.io/paketo-buildpacks/builder:base
|
||||
full: gcr.io/paketo-buildpacks/builder:full
|
||||
|
||||
# Optional. A list of additional Buildpacks to be applied to the language pack's
|
||||
# builder image when the function is built
|
||||
# Optional list of additional Buildpacks to be applied to the language pack's
|
||||
# builder image when the Function is built using a Buildpack builder.
|
||||
buildpacks:
|
||||
- paketo-buildpacks/go-dist
|
||||
- ghcr.io/boson-project/go-function-buildpack:tip
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
builders:
|
||||
default: gcr.io/paketo-buildpacks/builder:base
|
||||
base: gcr.io/paketo-buildpacks/builder:base
|
||||
full: gcr.io/paketo-buildpacks/builder:full
|
|
@ -1,8 +1,15 @@
|
|||
//go:build e2e
|
||||
// +build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "knative.dev/kn-plugin-func/testing"
|
||||
)
|
||||
|
||||
// Build runs `func build' command for a given test project.
|
||||
|
@ -24,3 +31,20 @@ func Build(t *testing.T, knFunc *TestShellCmdRunner, project *FunctionTestProjec
|
|||
project.IsBuilt = true
|
||||
|
||||
}
|
||||
|
||||
// TestBuild_S2I runs `func build` using the S2I builder.
|
||||
func TestBuild_S2I(t *testing.T) {
|
||||
var (
|
||||
root = "testdata/e2e/testbuild"
|
||||
bin, prefix = bin()
|
||||
cleanup = Within(t, root) // TODO: replace with Fromtemp?
|
||||
cwd, _ = os.Getwd()
|
||||
)
|
||||
defer cleanup()
|
||||
|
||||
run(t, bin, prefix, "create", "-v", "--language=node", cwd)
|
||||
output := run(t, bin, prefix, "build", "-v", "--builder=s2i", "--registry", GetRegistry())
|
||||
if !strings.Contains(output, "Function image built:") {
|
||||
t.Fatal("image not built")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2014 Will Fitzgerald. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
202
third_party/VENDOR-LICENSE/github.com/openshift/source-to-image/pkg/LICENSE
vendored
Normal file
202
third_party/VENDOR-LICENSE/github.com/openshift/source-to-image/pkg/LICENSE
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,3 +1,38 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
.idea
|
||||
.vscode
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Ignore vscode setting files
|
||||
.vscode/
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
# Ignore gcs bin directory
|
||||
service/bin/
|
||||
service/pkg/
|
||||
|
||||
*.img
|
||||
*.vhd
|
||||
*.tar.gz
|
||||
|
||||
# Make stuff
|
||||
.rootfs-done
|
||||
bin/*
|
||||
rootfs/*
|
||||
*.o
|
||||
/build/
|
||||
|
||||
deps/*
|
||||
out/*
|
||||
|
||||
.idea/
|
||||
.vscode/
|
|
@ -0,0 +1,99 @@
|
|||
run:
|
||||
timeout: 8m
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- stylecheck
|
||||
|
||||
linters-settings:
|
||||
stylecheck:
|
||||
# https://staticcheck.io/docs/checks
|
||||
checks: ["all"]
|
||||
|
||||
|
||||
issues:
|
||||
# This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like
|
||||
# (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go
|
||||
# friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't
|
||||
# supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
|
||||
# while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
|
||||
exclude-rules:
|
||||
- path: layer.go
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: hcsshim.go
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\hcs\\schema2\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\wclayer\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: hcn\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\hcs\\schema1\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\hns\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: ext4\\internal\\compactext4\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: ext4\\internal\\format\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\guestrequest\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\guest\\prot\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\windevice\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\winapi\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\vmcompute\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\regstate\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
- path: internal\\hcserror\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
|
@ -0,0 +1,87 @@
|
|||
BASE:=base.tar.gz
|
||||
|
||||
GO:=go
|
||||
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
|
||||
CGO_ENABLED:=0
|
||||
GOMODVENDOR:=
|
||||
|
||||
CFLAGS:=-O2 -Wall
|
||||
LDFLAGS:=-static -s # strip C binaries
|
||||
|
||||
GO_FLAGS_EXTRA:=
|
||||
ifeq "$(GOMODVENDOR)" "1"
|
||||
GO_FLAGS_EXTRA += -mod=vendor
|
||||
endif
|
||||
GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
|
||||
|
||||
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
|
||||
|
||||
# The link aliases for gcstools
|
||||
GCS_TOOLS=\
|
||||
generichook
|
||||
|
||||
.PHONY: all always rootfs test
|
||||
|
||||
all: out/initrd.img out/rootfs.tar.gz
|
||||
|
||||
clean:
|
||||
find -name '*.o' -print0 | xargs -0 -r rm
|
||||
rm -rf bin deps rootfs out
|
||||
|
||||
test:
|
||||
cd $(SRCROOT) && go test -v ./internal/guest/...
|
||||
|
||||
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile
|
||||
@mkdir -p out
|
||||
rm -rf rootfs
|
||||
mkdir -p rootfs/bin/
|
||||
cp bin/init rootfs/
|
||||
cp bin/vsockexec rootfs/bin/
|
||||
cp bin/cmd/gcs rootfs/bin/
|
||||
cp bin/cmd/gcstools rootfs/bin/
|
||||
for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
|
||||
git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \
|
||||
git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch
|
||||
tar -zcf $@ -C rootfs .
|
||||
rm -rf rootfs
|
||||
|
||||
out/rootfs.tar.gz: out/initrd.img
|
||||
rm -rf rootfs-conv
|
||||
mkdir rootfs-conv
|
||||
gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
|
||||
tar -zcf $@ -C rootfs-conv .
|
||||
rm -rf rootfs-conv
|
||||
|
||||
out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh
|
||||
$(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed
|
||||
gzip -c out/initrd.img.uncompressed > $@
|
||||
rm out/initrd.img.uncompressed
|
||||
|
||||
-include deps/cmd/gcs.gomake
|
||||
-include deps/cmd/gcstools.gomake
|
||||
|
||||
# Implicit rule for includes that define Go targets.
|
||||
%.gomake: $(SRCROOT)/Makefile
|
||||
@mkdir -p $(dir $@)
|
||||
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
|
||||
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
|
||||
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
|
||||
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
|
||||
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
|
||||
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
|
||||
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
|
||||
mv $@.new $@
|
||||
|
||||
VPATH=$(SRCROOT)
|
||||
|
||||
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
|
||||
@mkdir -p bin
|
||||
$(CC) $(LDFLAGS) -o $@ $^
|
||||
|
||||
bin/init: init/init.o vsockexec/vsock.o
|
||||
@mkdir -p bin
|
||||
$(CC) $(LDFLAGS) -o $@ $^
|
||||
|
||||
%.o: %.c
|
||||
@mkdir -p $(dir $@)
|
||||
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
|
|
@ -2,13 +2,67 @@
|
|||
|
||||
[](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster)
|
||||
|
||||
This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
|
||||
This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers.
|
||||
|
||||
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
|
||||
It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well.
|
||||
|
||||
## Building
|
||||
|
||||
While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
|
||||
### Linux Hyper-V Container Guest Agent
|
||||
|
||||
To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs.
|
||||
```powershell
|
||||
C:\> $env:GOOS="linux"
|
||||
C:\> go build .\cmd\gcs\
|
||||
```
|
||||
|
||||
or on a Linux machine
|
||||
```sh
|
||||
> go build ./cmd/gcs
|
||||
```
|
||||
|
||||
If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container.
|
||||
|
||||
```sh
|
||||
docker pull busybox
|
||||
docker run --name base_image_container busybox
|
||||
docker export base_image_container | gzip > base.tar.gz
|
||||
BASE=./base.tar.gz
|
||||
make all
|
||||
```
|
||||
|
||||
If the build is successful, in the `./out` folder you should see:
|
||||
```sh
|
||||
> ls ./out/
|
||||
delta.tar.gz initrd.img rootfs.tar.gz
|
||||
```
|
||||
|
||||
### Containerd Shim
|
||||
For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md.
|
||||
|
||||
Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.
|
||||
|
||||
```powershell
|
||||
C:\> $env:GOOS="windows"
|
||||
C:\> go build .\cmd\containerd-shim-runhcs-v1
|
||||
```
|
||||
|
||||
Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running:
|
||||
```powershell
|
||||
.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii
|
||||
```
|
||||
|
||||
This config file will already have the shim set as the default runtime for cri interactions.
|
||||
|
||||
To trial using the shim out with ctr.exe:
|
||||
```powershell
|
||||
C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!"
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
|
@ -16,7 +70,27 @@ When you submit a pull request, a CLA-bot will automatically determine whether y
|
|||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to
|
||||
certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for
|
||||
more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
|
||||
that all commits in a given PR are signed-off.
|
||||
|
||||
### Test Directory (Important to note)
|
||||
|
||||
This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this
|
||||
project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has
|
||||
its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file
|
||||
has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project
|
||||
(which is the repo itself on your disk).
|
||||
|
||||
```
|
||||
replace (
|
||||
github.com/Microsoft/hcsshim => ../
|
||||
)
|
||||
```
|
||||
|
||||
Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the
|
||||
CI in this project will check if the files are out of date and will fail if this is true.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
|
|
|
@ -3,26 +3,34 @@ module github.com/Microsoft/hcsshim
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/Microsoft/go-winio v0.4.17
|
||||
github.com/cenkalti/backoff/v4 v4.1.1
|
||||
github.com/containerd/cgroups v1.0.1
|
||||
github.com/containerd/console v1.0.2
|
||||
github.com/containerd/containerd v1.4.9
|
||||
github.com/containerd/continuity v0.1.0 // indirect
|
||||
github.com/containerd/fifo v1.0.0 // indirect
|
||||
github.com/containerd/containerd v1.5.7
|
||||
github.com/containerd/go-runc v1.0.0
|
||||
github.com/containerd/ttrpc v1.1.0
|
||||
github.com/containerd/typeurl v1.0.2
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/go-containerregistry v0.5.1
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3
|
||||
github.com/mattn/go-shellwords v1.0.6
|
||||
github.com/opencontainers/runc v1.0.2
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/urfave/cli v1.22.2
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
go.opencensus.io v0.22.3
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492
|
||||
google.golang.org/grpc v1.33.2
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||
google.golang.org/grpc v1.40.0
|
||||
)
|
||||
|
||||
replace (
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -78,6 +78,13 @@ var (
|
|||
|
||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
||||
|
||||
// ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
|
||||
ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
|
||||
|
||||
// ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that
|
||||
// compute system has already been closed.
|
||||
ErrInvalidHandle = syscall.Errno(0x6)
|
||||
)
|
||||
|
||||
type ErrorEvent struct {
|
||||
|
@ -249,6 +256,14 @@ func IsNotExist(err error) bool {
|
|||
err == ErrElementNotFound
|
||||
}
|
||||
|
||||
// IsErrorInvalidHandle checks whether the error is the result of an operation carried
|
||||
// out on a handle that is invalid/closed. This error popped up while trying to query
|
||||
// stats on a container in the process of being stopped.
|
||||
func IsErrorInvalidHandle(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrInvalidHandle
|
||||
}
|
||||
|
||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||
// already closed by a call to the Close() method.
|
||||
func IsAlreadyClosed(err error) bool {
|
||||
|
@ -281,6 +296,7 @@ func IsTimeout(err error) bool {
|
|||
func IsAlreadyStopped(err error) bool {
|
||||
err = getInnerError(err)
|
||||
return err == ErrVmcomputeAlreadyStopped ||
|
||||
err == ErrProcessAlreadyStopped ||
|
||||
err == ErrElementNotFound
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,9 @@ package hcs
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -16,16 +18,17 @@ import (
|
|||
|
||||
// ContainerError is an error encountered in HCS
|
||||
type Process struct {
|
||||
handleLock sync.RWMutex
|
||||
handle vmcompute.HcsProcess
|
||||
processID int
|
||||
system *System
|
||||
hasCachedStdio bool
|
||||
stdioLock sync.Mutex
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
callbackNumber uintptr
|
||||
handleLock sync.RWMutex
|
||||
handle vmcompute.HcsProcess
|
||||
processID int
|
||||
system *System
|
||||
hasCachedStdio bool
|
||||
stdioLock sync.Mutex
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
callbackNumber uintptr
|
||||
killSignalDelivered bool
|
||||
|
||||
closedWaitOnce sync.Once
|
||||
waitBlock chan struct{}
|
||||
|
@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
|
|||
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
if process.killSignalDelivered {
|
||||
// A kill signal has already been sent to this process. Sending a second
|
||||
// one offers no real benefit, as processes cannot stop themselves from
|
||||
// being terminated, once a TerminateProcess has been issued. Sending a
|
||||
// second kill may result in a number of errors (two of which detailed bellow)
|
||||
// and which we can avoid handling.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
|
||||
if err != nil {
|
||||
// We still need to check these two cases, as processes may still be killed by an
|
||||
// external actor (human operator, OOM, random script etc).
|
||||
if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
|
||||
// There are two cases where it should be safe to ignore an error returned
|
||||
// by HcsTerminateProcess. The first one is cause by the fact that
|
||||
// HcsTerminateProcess ends up calling TerminateProcess in the context
|
||||
// of a container. According to the TerminateProcess documentation:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
|
||||
// After a process has terminated, call to TerminateProcess with open
|
||||
// handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
|
||||
// It's safe to ignore this error here. HCS should always have permissions
|
||||
// to kill processes inside any container. So an ERROR_ACCESS_DENIED
|
||||
// is unlikely to be anything else than what the ending remarks in the
|
||||
// documentation states.
|
||||
//
|
||||
// The second case is generated by hcs itself, if for any reason HcsTerminateProcess
|
||||
// is called twice in a very short amount of time. In such cases, hcs may return
|
||||
// HCS_E_PROCESS_ALREADY_STOPPED.
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
delivered, err := process.processSignalResult(ctx, err)
|
||||
if err != nil {
|
||||
err = makeProcessError(process, operation, err, events)
|
||||
}
|
||||
|
||||
process.killSignalDelivered = delivered
|
||||
return delivered, err
|
||||
}
|
||||
|
||||
|
|
|
@ -27,4 +27,10 @@ type Attachment struct {
|
|||
CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"`
|
||||
|
||||
ReadOnly bool `json:"ReadOnly,omitempty"`
|
||||
|
||||
SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"`
|
||||
|
||||
AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"`
|
||||
|
||||
ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"`
|
||||
}
|
||||
|
|
|
@ -31,4 +31,6 @@ type Container struct {
|
|||
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
|
||||
|
||||
AssignedDevices []Device `json:"AssignedDevices,omitempty"`
|
||||
|
||||
AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"`
|
||||
}
|
||||
|
|
|
@ -14,5 +14,5 @@ type CpuGroupConfig struct {
|
|||
Affinity *CpuGroupAffinity `json:"Affinity,omitempty"`
|
||||
GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"`
|
||||
// Hypervisor CPU group IDs exposed to clients
|
||||
HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"`
|
||||
HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"`
|
||||
}
|
||||
|
|
|
@ -12,9 +12,9 @@ package hcsschema
|
|||
type DeviceType string
|
||||
|
||||
const (
|
||||
ClassGUID DeviceType = "ClassGuid"
|
||||
DeviceInstance DeviceType = "DeviceInstance"
|
||||
GPUMirror DeviceType = "GpuMirror"
|
||||
ClassGUID DeviceType = "ClassGuid"
|
||||
DeviceInstanceID DeviceType = "DeviceInstance"
|
||||
GPUMirror DeviceType = "GpuMirror"
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
|
@ -22,6 +22,6 @@ type Device struct {
|
|||
Type DeviceType `json:"Type,omitempty"`
|
||||
// The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid.
|
||||
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
|
||||
// The location path of the device to assign to the container. Only used when Type is DeviceInstance.
|
||||
// The location path of the device to assign to the container. Only used when Type is DeviceInstanceID.
|
||||
LocationPath string `json:"LocationPath,omitempty"`
|
||||
}
|
||||
|
|
14
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go
generated
vendored
Normal file
14
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerDefinitionDevice struct {
|
||||
DeviceExtension []DeviceExtension `json:"device_extension,omitempty"`
|
||||
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type DeviceCategory struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
|
||||
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type DeviceExtension struct {
|
||||
DeviceCategory *DeviceCategory `json:"device_category,omitempty"`
|
||||
Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"`
|
||||
}
|
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type DeviceInstance struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
LocationPath string `json:"location_path,omitempty"`
|
||||
PortName string `json:"port_name,omitempty"`
|
||||
InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
|
||||
}
|
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type DeviceNamespace struct {
|
||||
RequiresDriverstore bool `json:"requires_driverstore,omitempty"`
|
||||
DeviceCategory []DeviceCategory `json:"device_category,omitempty"`
|
||||
DeviceInstance []DeviceInstance `json:"device_instance,omitempty"`
|
||||
}
|
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type InterfaceClass struct {
|
||||
Type_ string `json:"type,omitempty"`
|
||||
Identifier string `json:"identifier,omitempty"`
|
||||
Recurse bool `json:"recurse,omitempty"`
|
||||
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type DeviceExtensionNamespace struct {
|
||||
Ob *ObjectNamespace `json:"ob,omitempty"`
|
||||
Device *DeviceNamespace `json:"device,omitempty"`
|
||||
}
|
18
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go
generated
vendored
Normal file
18
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ObjectDirectory struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Clonesd string `json:"clonesd,omitempty"`
|
||||
Shadow string `json:"shadow,omitempty"`
|
||||
Symlink []ObjectSymlink `json:"symlink,omitempty"`
|
||||
Objdir []ObjectDirectory `json:"objdir,omitempty"`
|
||||
}
|
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go
generated
vendored
Normal file
16
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ObjectNamespace struct {
|
||||
Shadow string `json:"shadow,omitempty"`
|
||||
Symlink []ObjectSymlink `json:"symlink,omitempty"`
|
||||
Objdir []ObjectDirectory `json:"objdir,omitempty"`
|
||||
}
|
18
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go
generated
vendored
Normal file
18
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ObjectSymlink struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
Scope string `json:"scope,omitempty"`
|
||||
Pathtoclone string `json:"pathtoclone,omitempty"`
|
||||
AccessMask int32 `json:"access_mask,omitempty"`
|
||||
}
|
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type VirtualPMemMapping struct {
|
||||
HostPath string `json:"HostPath,omitempty"`
|
||||
ImageFormat string `json:"ImageFormat,omitempty"`
|
||||
}
|
|
@ -20,6 +20,7 @@ type HNSEndpoint struct {
|
|||
IPv6Address net.IP `json:",omitempty"`
|
||||
DNSSuffix string `json:",omitempty"`
|
||||
DNSServerList string `json:",omitempty"`
|
||||
DNSDomain string `json:",omitempty"`
|
||||
GatewayAddress string `json:",omitempty"`
|
||||
GatewayAddressV6 string `json:",omitempty"`
|
||||
EnableInternalDNS bool `json:",omitempty"`
|
||||
|
|
|
@ -22,9 +22,9 @@ const (
|
|||
|
||||
type NatPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol string
|
||||
InternalPort uint16
|
||||
ExternalPort uint16
|
||||
Protocol string `json:",omitempty"`
|
||||
InternalPort uint16 `json:",omitempty"`
|
||||
ExternalPort uint16 `json:",omitempty"`
|
||||
}
|
||||
|
||||
type QosPolicy struct {
|
||||
|
@ -88,20 +88,20 @@ const (
|
|||
type ACLPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
Id string `json:"Id,omitempty"`
|
||||
Protocol uint16
|
||||
Protocols string `json:"Protocols,omitempty"`
|
||||
InternalPort uint16
|
||||
Protocol uint16 `json:",omitempty"`
|
||||
Protocols string `json:"Protocols,omitempty"`
|
||||
InternalPort uint16 `json:",omitempty"`
|
||||
Action ActionType
|
||||
Direction DirectionType
|
||||
LocalAddresses string
|
||||
RemoteAddresses string
|
||||
LocalPorts string `json:"LocalPorts,omitempty"`
|
||||
LocalPort uint16
|
||||
RemotePorts string `json:"RemotePorts,omitempty"`
|
||||
RemotePort uint16
|
||||
RuleType RuleType `json:"RuleType,omitempty"`
|
||||
Priority uint16
|
||||
ServiceName string
|
||||
LocalAddresses string `json:",omitempty"`
|
||||
RemoteAddresses string `json:",omitempty"`
|
||||
LocalPorts string `json:"LocalPorts,omitempty"`
|
||||
LocalPort uint16 `json:",omitempty"`
|
||||
RemotePorts string `json:"RemotePorts,omitempty"`
|
||||
RemotePort uint16 `json:",omitempty"`
|
||||
RuleType RuleType `json:"RuleType,omitempty"`
|
||||
Priority uint16 `json:",omitempty"`
|
||||
ServiceName string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Policy struct {
|
||||
|
|
|
@ -21,7 +21,7 @@ func ActivateLayer(ctx context.Context, path string) (err error) {
|
|||
|
||||
err = activateLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func CreateLayer(ctx context.Context, path, parent string) (err error) {
|
|||
|
||||
err = createLayer(&stdDriverInfo, path, parent)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []str
|
|||
|
||||
err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ func DestroyLayer(ctx context.Context, path string) (err error) {
|
|||
|
||||
err = destroyLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error
|
|||
|
||||
err = expandSandboxSize(&stdDriverInfo, path, size)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
|
||||
// Manually expand the volume now in order to work around bugs in 19H1 and
|
||||
|
|
|
@ -35,7 +35,7 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare
|
|||
|
||||
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
|
|||
log.G(ctx).Debug("Calling proc (1)")
|
||||
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
|
||||
if err != nil {
|
||||
return "", hcserror.New(err, title+" - failed", "(first call)")
|
||||
return "", hcserror.New(err, title, "(first call)")
|
||||
}
|
||||
|
||||
// Allocate a mount path of the returned length.
|
||||
|
@ -41,7 +41,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
|
|||
log.G(ctx).Debug("Calling proc (2)")
|
||||
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
|
||||
if err != nil {
|
||||
return "", hcserror.New(err, title+" - failed", "(second call)")
|
||||
return "", hcserror.New(err, title, "(second call)")
|
||||
}
|
||||
|
||||
mountPath := syscall.UTF16ToString(mountPathp[0:])
|
||||
|
|
|
@ -21,7 +21,7 @@ func GetSharedBaseImages(ctx context.Context) (_ string, err error) {
|
|||
var buffer *uint16
|
||||
err = getBaseImages(&buffer)
|
||||
if err != nil {
|
||||
return "", hcserror.New(err, title+" - failed", "")
|
||||
return "", hcserror.New(err, title, "")
|
||||
}
|
||||
imageData := interop.ConvertAndFreeCoTaskMemString(buffer)
|
||||
span.AddAttributes(trace.StringAttribute("imageData", imageData))
|
||||
|
|
|
@ -20,7 +20,7 @@ func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error
|
|||
|
||||
err = grantVmAccess(vmid, filepath)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func ImportLayer(ctx context.Context, path string, importFolderPath string, pare
|
|||
|
||||
err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func LayerExists(ctx context.Context, path string) (_ bool, err error) {
|
|||
var exists uint32
|
||||
err = layerExists(&stdDriverInfo, path, &exists)
|
||||
if err != nil {
|
||||
return false, hcserror.New(err, title+" - failed", "")
|
||||
return false, hcserror.New(err, title, "")
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0))
|
||||
return exists != 0, nil
|
||||
|
|
|
@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) {
|
|||
defer tf.Close()
|
||||
s := bufio.NewScanner(tf)
|
||||
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
|
||||
return nil, errors.New("Invalid tombstones file")
|
||||
return nil, errors.New("invalid tombstones file")
|
||||
}
|
||||
|
||||
ts := make(map[string]([]string))
|
||||
|
|
|
@ -17,12 +17,12 @@ func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) {
|
|||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("name", name))
|
||||
span.AddAttributes(trace.StringAttribute("objectName", name))
|
||||
|
||||
var id guid.GUID
|
||||
err = nameToGuid(name, &id)
|
||||
if err != nil {
|
||||
return guid.GUID{}, hcserror.New(err, title+" - failed", "")
|
||||
return guid.GUID{}, hcserror.New(err, title, "")
|
||||
}
|
||||
span.AddAttributes(trace.StringAttribute("guid", id.String()))
|
||||
return id, nil
|
||||
|
|
|
@ -38,7 +38,7 @@ func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (
|
|||
defer prepareLayerLock.Unlock()
|
||||
err = prepareLayer(&stdDriverInfo, path, layers)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ func UnprepareLayer(ctx context.Context, path string) (err error) {
|
|||
|
||||
err = unprepareLayer(&stdDriverInfo, path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
package winapi
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
|
||||
|
||||
// CreatePseudoConsole creates a windows pseudo console.
|
||||
func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
|
||||
// We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
|
||||
return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
|
||||
}
|
||||
|
||||
// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
|
||||
func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
|
||||
// We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
|
||||
return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
|
||||
}
|
||||
|
||||
// HRESULT WINAPI CreatePseudoConsole(
|
||||
// _In_ COORD size,
|
||||
// _In_ HANDLE hInput,
|
||||
// _In_ HANDLE hOutput,
|
||||
// _In_ DWORD dwFlags,
|
||||
// _Out_ HPCON* phPC
|
||||
// );
|
||||
//
|
||||
//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
|
||||
|
||||
// void WINAPI ClosePseudoConsole(
|
||||
// _In_ HPCON hPC
|
||||
// );
|
||||
//
|
||||
//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
|
||||
|
||||
// HRESULT WINAPI ResizePseudoConsole(
|
||||
// _In_ HPCON hPC ,
|
||||
// _In_ COORD size
|
||||
// );
|
||||
//
|
||||
//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
|
|
@ -1,27 +1,4 @@
|
|||
package winapi
|
||||
|
||||
// VOID RtlMoveMemory(
|
||||
// _Out_ VOID UNALIGNED *Destination,
|
||||
// _In_ const VOID UNALIGNED *Source,
|
||||
// _In_ SIZE_T Length
|
||||
// );
|
||||
//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory
|
||||
|
||||
//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
|
||||
//sys LocalFree(ptr uintptr) = kernel32.LocalFree
|
||||
|
||||
// BOOL QueryWorkingSet(
|
||||
// HANDLE hProcess,
|
||||
// PVOID pv,
|
||||
// DWORD cb
|
||||
// );
|
||||
//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet
|
||||
|
||||
type PSAPI_WORKING_SET_INFORMATION struct {
|
||||
NumberOfEntries uintptr
|
||||
WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK
|
||||
}
|
||||
|
||||
type PSAPI_WORKING_SET_BLOCK struct {
|
||||
Flags uintptr
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@ package winapi
|
|||
|
||||
const PROCESS_ALL_ACCESS uint32 = 2097151
|
||||
|
||||
// DWORD GetProcessImageFileNameW(
|
||||
// HANDLE hProcess,
|
||||
// LPWSTR lpImageFileName,
|
||||
// DWORD nSize
|
||||
// );
|
||||
//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW
|
||||
const (
|
||||
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
|
||||
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
|
||||
)
|
||||
|
|
|
@ -20,36 +20,41 @@ func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) {
|
|||
return
|
||||
}
|
||||
|
||||
// UnicodeString corresponds to UNICODE_STRING win32 struct defined here
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string
|
||||
type UnicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer *uint16
|
||||
}
|
||||
|
||||
// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value
|
||||
// denotes the maximum number of wide chars a path can have.
|
||||
const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767
|
||||
|
||||
//String converts a UnicodeString to a golang string
|
||||
func (uni UnicodeString) String() string {
|
||||
// UnicodeString is not guaranteed to be null terminated, therefore
|
||||
// use the UnicodeString's Length field
|
||||
return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
|
||||
return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
|
||||
}
|
||||
|
||||
// NewUnicodeString allocates a new UnicodeString and copies `s` into
|
||||
// the buffer of the new UnicodeString.
|
||||
func NewUnicodeString(s string) (*UnicodeString, error) {
|
||||
// Get length of original `s` to use in the UnicodeString since the `buf`
|
||||
// created later will have an additional trailing null character
|
||||
length := len(s)
|
||||
if length > 32767 {
|
||||
return nil, syscall.ENAMETOOLONG
|
||||
}
|
||||
|
||||
buf, err := windows.UTF16FromString(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH {
|
||||
return nil, syscall.ENAMETOOLONG
|
||||
}
|
||||
|
||||
uni := &UnicodeString{
|
||||
Length: uint16(length * 2),
|
||||
MaximumLength: uint16(length * 2),
|
||||
// The length is in bytes and should not include the trailing null character.
|
||||
Length: uint16((len(buf) - 1) * 2),
|
||||
MaximumLength: uint16((len(buf) - 1) * 2),
|
||||
Buffer: &buf[0],
|
||||
}
|
||||
return uni, nil
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
// be thought of as an extension to golang.org/x/sys/windows.
|
||||
package winapi
|
||||
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||
|
|
|
@ -37,13 +37,15 @@ func errnoErr(e syscall.Errno) error {
|
|||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
modpsapi = windows.NewLazySystemDLL("psapi.dll")
|
||||
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
|
||||
|
||||
procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
|
||||
procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
|
||||
procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
|
||||
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
|
||||
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
||||
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||
|
@ -57,11 +59,8 @@ var (
|
|||
procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject")
|
||||
procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject")
|
||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||
procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet")
|
||||
procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
|
||||
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||
|
@ -74,6 +73,33 @@ var (
|
|||
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
|
||||
)
|
||||
|
||||
func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
|
||||
r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ClosePseudoConsole(hpc windows.Handle) {
|
||||
syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
status = uint32(r0)
|
||||
|
@ -219,18 +245,6 @@ func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uin
|
|||
return
|
||||
}
|
||||
|
||||
func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
||||
ptr = uintptr(r0)
|
||||
|
@ -242,31 +256,6 @@ func LocalFree(ptr uintptr) {
|
|||
return
|
||||
}
|
||||
|
||||
func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
|
||||
size = uint32(r0)
|
||||
if size == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||
amount = uint32(r0)
|
||||
|
|
|
@ -35,4 +35,16 @@ const (
|
|||
|
||||
// V20H2 corresponds to Windows Server 20H2 (semi-annual channel).
|
||||
V20H2 = 19042
|
||||
|
||||
// V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
|
||||
V21H1 = 19043
|
||||
|
||||
// V21H2Win10 corresponds to Windows 10 (November 2021 Update).
|
||||
V21H2Win10 = 19044
|
||||
|
||||
// V21H2Server corresponds to Windows Server 2022 (ltsc2022).
|
||||
V21H2Server = 20348
|
||||
|
||||
// V21H2Win11 corresponds to Windows 11 (original release).
|
||||
V21H2Win11 = 22000
|
||||
)
|
||||
|
|
|
@ -74,6 +74,15 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
|
|||
selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128))
|
||||
}
|
||||
|
||||
// We set CompressionNone as the preferred compression algorithm because
|
||||
// of compression side channel attacks, then append the configured
|
||||
// DefaultCompressionAlgo if any is set (to signal support for cases
|
||||
// where the application knows that using compression is safe).
|
||||
selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)}
|
||||
if config.Compression() != packet.CompressionNone {
|
||||
selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression()))
|
||||
}
|
||||
|
||||
// And for DefaultMode.
|
||||
selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())}
|
||||
if config.AEAD().Mode() != packet.AEADModeEAX {
|
||||
|
|
|
@ -37,15 +37,17 @@ type Identity struct {
|
|||
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
|
||||
UserId *packet.UserId
|
||||
SelfSignature *packet.Signature
|
||||
Signatures []*packet.Signature
|
||||
Revocations []*packet.Signature
|
||||
Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures
|
||||
}
|
||||
|
||||
// A Subkey is an additional public key in an Entity. Subkeys can be used for
|
||||
// encryption.
|
||||
type Subkey struct {
|
||||
PublicKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
Sig *packet.Signature
|
||||
PublicKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
Sig *packet.Signature
|
||||
Revocations []*packet.Signature
|
||||
}
|
||||
|
||||
// A Key identifies a specific public key in an Entity. This is either the
|
||||
|
@ -55,6 +57,7 @@ type Key struct {
|
|||
PublicKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
SelfSignature *packet.Signature
|
||||
Revocations []*packet.Signature
|
||||
}
|
||||
|
||||
// A KeyRing provides access to public and private keys.
|
||||
|
@ -71,28 +74,53 @@ type KeyRing interface {
|
|||
DecryptionKeys() []Key
|
||||
}
|
||||
|
||||
// PrimaryIdentity returns the Identity marked as primary or the first identity
|
||||
// if none are so marked.
|
||||
// PrimaryIdentity returns an Identity, preferring non-revoked identities,
|
||||
// identities marked as primary, or the latest-created identity, in that order.
|
||||
func (e *Entity) PrimaryIdentity() *Identity {
|
||||
var firstIdentity *Identity
|
||||
var primaryIdentity *Identity
|
||||
for _, ident := range e.Identities {
|
||||
if firstIdentity == nil {
|
||||
firstIdentity = ident
|
||||
}
|
||||
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
||||
return ident
|
||||
if shouldPreferIdentity(primaryIdentity, ident) {
|
||||
primaryIdentity = ident
|
||||
}
|
||||
}
|
||||
return firstIdentity
|
||||
return primaryIdentity
|
||||
}
|
||||
|
||||
func shouldPreferIdentity(existingId, potentialNewId *Identity) bool {
|
||||
if (existingId == nil) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (len(existingId.Revocations) > len(potentialNewId.Revocations)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (len(existingId.Revocations) < len(potentialNewId.Revocations)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId &&
|
||||
!(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) &&
|
||||
potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) {
|
||||
return true
|
||||
}
|
||||
|
||||
return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime)
|
||||
}
|
||||
|
||||
// EncryptionKey returns the best candidate Key for encrypting a message to the
|
||||
// given Entity.
|
||||
func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
|
||||
// Fail to find any encryption key if the primary key has expired.
|
||||
// Fail to find any encryption key if the...
|
||||
i := e.PrimaryIdentity()
|
||||
primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now)
|
||||
if primaryKeyExpired {
|
||||
if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired
|
||||
i.SelfSignature.SigExpired(now) || // user ID self-signature has expired
|
||||
e.Revoked(now) || // primary key has been revoked
|
||||
i.Revoked(now) { // user ID has been revoked
|
||||
return Key{}, false
|
||||
}
|
||||
|
||||
|
@ -104,6 +132,8 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
|
|||
subkey.Sig.FlagEncryptCommunications &&
|
||||
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
|
||||
!subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
|
||||
!subkey.Sig.SigExpired(now) &&
|
||||
!subkey.Revoked(now) &&
|
||||
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
|
||||
candidateSubkey = i
|
||||
maxTime = subkey.Sig.CreationTime
|
||||
|
@ -112,17 +142,16 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
|
|||
|
||||
if candidateSubkey != -1 {
|
||||
subkey := e.Subkeys[candidateSubkey]
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
|
||||
}
|
||||
|
||||
// If we don't have any candidate subkeys for encryption and
|
||||
// the primary key doesn't have any usage metadata then we
|
||||
// assume that the primary key is ok. Or, if the primary key is
|
||||
// marked as ok to encrypt with, then we can obviously use it.
|
||||
// Also, check expiry again just to be safe.
|
||||
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
|
||||
e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||
e.PrimaryKey.PubKeyAlgo.CanEncrypt() {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true
|
||||
}
|
||||
|
||||
return Key{}, false
|
||||
|
@ -137,10 +166,12 @@ func (e *Entity) SigningKey(now time.Time) (Key, bool) {
|
|||
// SigningKeyById return the Key for signing a message with this
|
||||
// Entity and keyID.
|
||||
func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) {
|
||||
// Fail to find any signing key if the primary key has expired.
|
||||
// Fail to find any signing key if the...
|
||||
i := e.PrimaryIdentity()
|
||||
primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now)
|
||||
if primaryKeyExpired {
|
||||
if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired
|
||||
i.SelfSignature.SigExpired(now) || // user ID self-signature has expired
|
||||
e.Revoked(now) || // primary key has been revoked
|
||||
i.Revoked(now) { // user ID has been revoked
|
||||
return Key{}, false
|
||||
}
|
||||
|
||||
|
@ -152,8 +183,10 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) {
|
|||
subkey.Sig.FlagSign &&
|
||||
subkey.PublicKey.PubKeyAlgo.CanSign() &&
|
||||
!subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
|
||||
!subkey.Sig.SigExpired(now) &&
|
||||
!subkey.Revoked(now) &&
|
||||
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) &&
|
||||
(id == 0 || subkey.PrivateKey.KeyId == id) {
|
||||
(id == 0 || subkey.PublicKey.KeyId == id) {
|
||||
candidateSubkey = idx
|
||||
maxTime = subkey.Sig.CreationTime
|
||||
}
|
||||
|
@ -161,22 +194,63 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) {
|
|||
|
||||
if candidateSubkey != -1 {
|
||||
subkey := e.Subkeys[candidateSubkey]
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
|
||||
}
|
||||
|
||||
// If we have no candidate subkey then we assume that it's ok to sign
|
||||
// with the primary key. Or, if the primary key is marked as ok to
|
||||
// sign with, then we can use it. Also, check expiry again just to be safe.
|
||||
// sign with, then we can use it.
|
||||
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
|
||||
e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired &&
|
||||
(id == 0 || e.PrivateKey.KeyId == id) {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||
e.PrimaryKey.PubKeyAlgo.CanSign() &&
|
||||
(id == 0 || e.PrimaryKey.KeyId == id) {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true
|
||||
}
|
||||
|
||||
// No keys with a valid Signing Flag or no keys matched the id passed in
|
||||
return Key{}, false
|
||||
}
|
||||
|
||||
func revoked(revocations []*packet.Signature, now time.Time) bool {
|
||||
for _, revocation := range revocations {
|
||||
if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised {
|
||||
// If the key is compromised, the key is considered revoked even before the revocation date.
|
||||
return true
|
||||
}
|
||||
if !revocation.SigExpired(now) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Revoked returns whether the entity has any direct key revocation signatures.
|
||||
// Note that third-party revocation signatures are not supported.
|
||||
// Note also that Identity and Subkey revocation should be checked separately.
|
||||
func (e *Entity) Revoked(now time.Time) bool {
|
||||
return revoked(e.Revocations, now)
|
||||
}
|
||||
|
||||
// Revoked returns whether the identity has been revoked by a self-signature.
|
||||
// Note that third-party revocation signatures are not supported.
|
||||
func (i *Identity) Revoked(now time.Time) bool {
|
||||
return revoked(i.Revocations, now)
|
||||
}
|
||||
|
||||
// Revoked returns whether the subkey has been revoked by a self-signature.
|
||||
// Note that third-party revocation signatures are not supported.
|
||||
func (s *Subkey) Revoked(now time.Time) bool {
|
||||
return revoked(s.Revocations, now)
|
||||
}
|
||||
|
||||
// Revoked returns whether the key or subkey has been revoked by a self-signature.
|
||||
// Note that third-party revocation signatures are not supported.
|
||||
// Note also that Identity revocation should be checked separately.
|
||||
// Normally, it's not necessary to call this function, except on keys returned by
|
||||
// KeysById or KeysByIdUsage.
|
||||
func (key *Key) Revoked(now time.Time) bool {
|
||||
return revoked(key.Revocations, now)
|
||||
}
|
||||
|
||||
// An EntityList contains one or more Entities.
|
||||
type EntityList []*Entity
|
||||
|
||||
|
@ -184,21 +258,14 @@ type EntityList []*Entity
|
|||
func (el EntityList) KeysById(id uint64) (keys []Key) {
|
||||
for _, e := range el {
|
||||
if e.PrimaryKey.KeyId == id {
|
||||
var selfSig *packet.Signature
|
||||
for _, ident := range e.Identities {
|
||||
if selfSig == nil {
|
||||
selfSig = ident.SelfSignature
|
||||
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
||||
selfSig = ident.SelfSignature
|
||||
break
|
||||
}
|
||||
}
|
||||
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
|
||||
ident := e.PrimaryIdentity()
|
||||
selfSig := ident.SelfSignature
|
||||
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations})
|
||||
}
|
||||
|
||||
for _, subKey := range e.Subkeys {
|
||||
if subKey.PublicKey.KeyId == id {
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -210,14 +277,6 @@ func (el EntityList) KeysById(id uint64) (keys []Key) {
|
|||
// the bitwise-OR of packet.KeyFlag* values.
|
||||
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
|
||||
for _, key := range el.KeysById(id) {
|
||||
if len(key.Entity.Revocations) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if key.SelfSignature.RevocationReason != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
|
||||
var usage byte
|
||||
if key.SelfSignature.FlagCertify {
|
||||
|
@ -247,7 +306,7 @@ func (el EntityList) DecryptionKeys() (keys []Key) {
|
|||
for _, e := range el {
|
||||
for _, subKey := range e.Subkeys {
|
||||
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -442,11 +501,22 @@ func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
|
|||
break
|
||||
}
|
||||
|
||||
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) {
|
||||
if sig.SigType != packet.SigTypeGenericCert &&
|
||||
sig.SigType != packet.SigTypePersonaCert &&
|
||||
sig.SigType != packet.SigTypeCasualCert &&
|
||||
sig.SigType != packet.SigTypePositiveCert &&
|
||||
sig.SigType != packet.SigTypeCertificationRevocation {
|
||||
return errors.StructuralError("user ID signature with wrong type")
|
||||
}
|
||||
|
||||
|
||||
if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) {
|
||||
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
|
||||
return errors.StructuralError("user ID self-signature invalid: " + err.Error())
|
||||
}
|
||||
if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) {
|
||||
if sig.SigType == packet.SigTypeCertificationRevocation {
|
||||
identity.Revocations = append(identity.Revocations, sig)
|
||||
} else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) {
|
||||
identity.SelfSignature = sig
|
||||
}
|
||||
identity.Signatures = append(identity.Signatures, sig)
|
||||
|
@ -488,9 +558,9 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p
|
|||
|
||||
switch sig.SigType {
|
||||
case packet.SigTypeSubkeyRevocation:
|
||||
subKey.Sig = sig
|
||||
subKey.Revocations = append(subKey.Revocations, sig)
|
||||
case packet.SigTypeSubkeyBinding:
|
||||
if shouldReplaceSubkeySig(subKey.Sig, sig) {
|
||||
if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) {
|
||||
subKey.Sig = sig
|
||||
}
|
||||
}
|
||||
|
@ -505,22 +575,6 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p
|
|||
return nil
|
||||
}
|
||||
|
||||
func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
|
||||
if potentialNewSig == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if existingSig == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if existingSig.SigType == packet.SigTypeSubkeyRevocation {
|
||||
return false // never override a revocation signature
|
||||
}
|
||||
|
||||
return potentialNewSig.CreationTime.After(existingSig.CreationTime)
|
||||
}
|
||||
|
||||
// SerializePrivate serializes an Entity, including private key material, but
|
||||
// excluding signatures from other entities, to the given Writer.
|
||||
// Identities and subkeys are re-signed in case they changed since NewEntry.
|
||||
|
@ -549,6 +603,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, revocation := range e.Revocations {
|
||||
err := revocation.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, ident := range e.Identities {
|
||||
err = ident.UserId.Serialize(w)
|
||||
if err != nil {
|
||||
|
@ -560,6 +620,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo
|
|||
return
|
||||
}
|
||||
}
|
||||
for _, revocation := range ident.Revocations {
|
||||
err := revocation.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = ident.SelfSignature.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -583,6 +649,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo
|
|||
}
|
||||
}
|
||||
}
|
||||
for _, revocation := range subkey.Revocations {
|
||||
err := revocation.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = subkey.Sig.Serialize(w)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -598,6 +670,12 @@ func (e *Entity) Serialize(w io.Writer) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, revocation := range e.Revocations {
|
||||
err := revocation.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, ident := range e.Identities {
|
||||
err = ident.UserId.Serialize(w)
|
||||
if err != nil {
|
||||
|
@ -615,6 +693,12 @@ func (e *Entity) Serialize(w io.Writer) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, revocation := range subkey.Revocations {
|
||||
err := revocation.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = subkey.Sig.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -659,14 +743,13 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co
|
|||
// specified reason code and text (RFC4880 section-5.2.3.23).
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
|
||||
reasonCode := uint8(reason)
|
||||
revSig := &packet.Signature{
|
||||
Version: e.PrimaryKey.Version,
|
||||
CreationTime: config.Now(),
|
||||
SigType: packet.SigTypeKeyRevocation,
|
||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||
PubKeyAlgo: e.PrimaryKey.PubKeyAlgo,
|
||||
Hash: config.Hash(),
|
||||
RevocationReason: &reasonCode,
|
||||
RevocationReason: &reason,
|
||||
RevocationReasonText: reasonText,
|
||||
IssuerKeyId: &e.PrimaryKey.KeyId,
|
||||
}
|
||||
|
@ -686,22 +769,21 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea
|
|||
return errors.InvalidArgumentError("given subkey is not associated with this key")
|
||||
}
|
||||
|
||||
reasonCode := uint8(reason)
|
||||
revSig := &packet.Signature{
|
||||
Version: e.PrimaryKey.Version,
|
||||
CreationTime: config.Now(),
|
||||
SigType: packet.SigTypeSubkeyRevocation,
|
||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||
PubKeyAlgo: e.PrimaryKey.PubKeyAlgo,
|
||||
Hash: config.Hash(),
|
||||
RevocationReason: &reasonCode,
|
||||
RevocationReason: &reason,
|
||||
RevocationReasonText: reasonText,
|
||||
IssuerKeyId: &e.PrimaryKey.KeyId,
|
||||
}
|
||||
|
||||
if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil {
|
||||
if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sk.Sig = revSig
|
||||
sk.Revocations = append(sk.Revocations, revSig)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -138,6 +138,69 @@ heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
|
|||
=IKnw
|
||||
-----END PGP PUBLIC KEY BLOCK-----`
|
||||
|
||||
const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: OpenPGP.js v4.10.10
|
||||
Comment: https://openpgpjs.org
|
||||
|
||||
xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q
|
||||
lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN
|
||||
91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO
|
||||
XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb
|
||||
naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX
|
||||
8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB
|
||||
AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK
|
||||
ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS
|
||||
gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT
|
||||
6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ
|
||||
q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy
|
||||
+I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY
|
||||
bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j
|
||||
SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk
|
||||
kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu
|
||||
Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC
|
||||
GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW
|
||||
IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1
|
||||
mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG
|
||||
LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK
|
||||
zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/
|
||||
BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ
|
||||
aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD
|
||||
rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm
|
||||
yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2
|
||||
UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb
|
||||
YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt
|
||||
amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM
|
||||
u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/
|
||||
Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS
|
||||
gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB
|
||||
cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N
|
||||
nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN
|
||||
kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF
|
||||
I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb
|
||||
hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z
|
||||
FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw=
|
||||
=+2T8
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
`
|
||||
|
||||
const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll
|
||||
JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+
|
||||
iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0
|
||||
yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e
|
||||
/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI
|
||||
q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V
|
||||
xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw
|
||||
ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB
|
||||
B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh
|
||||
BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u
|
||||
8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA
|
||||
mcG3/TwG5GSQUUPkrDsGDA==
|
||||
=mFWy
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
`
|
||||
|
||||
const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR
|
||||
|
@ -304,33 +367,34 @@ X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
|
|||
qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
|
||||
SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
|
||||
vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
|
||||
bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE
|
||||
FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL
|
||||
CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl
|
||||
1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo
|
||||
aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy
|
||||
OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5
|
||||
/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA
|
||||
g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla
|
||||
d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP
|
||||
2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l
|
||||
nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX
|
||||
nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX
|
||||
SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6
|
||||
rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA
|
||||
0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/
|
||||
wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa
|
||||
LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV
|
||||
8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz
|
||||
j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy
|
||||
AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4
|
||||
1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ
|
||||
QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp
|
||||
f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn
|
||||
3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK
|
||||
2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA
|
||||
ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi
|
||||
f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj
|
||||
5KjhX2PVNEJd3XZRzaXZE2aAMQ==
|
||||
=522n
|
||||
bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe
|
||||
ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY
|
||||
495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ
|
||||
mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t
|
||||
yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG
|
||||
Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN
|
||||
/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm
|
||||
pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR
|
||||
eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH
|
||||
DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05
|
||||
ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ
|
||||
UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT
|
||||
74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3
|
||||
ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/
|
||||
i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj
|
||||
LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/
|
||||
iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc
|
||||
selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n
|
||||
TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+
|
||||
Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm
|
||||
bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk
|
||||
jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6
|
||||
6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO//
|
||||
rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm
|
||||
U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR
|
||||
LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw
|
||||
sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx
|
||||
1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld
|
||||
I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ==
|
||||
=AmgT
|
||||
-----END PGP PUBLIC KEY BLOCK-----`
|
||||
|
|
|
@ -395,6 +395,7 @@ const (
|
|||
SigTypeDirectSignature = 0x1F
|
||||
SigTypeKeyRevocation = 0x20
|
||||
SigTypeSubkeyRevocation = 0x28
|
||||
SigTypeCertificationRevocation = 0x30
|
||||
)
|
||||
|
||||
// PublicKeyAlgorithm represents the different public key system specified for
|
||||
|
|
|
@ -735,13 +735,13 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
|
|||
}
|
||||
|
||||
// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature,
|
||||
// made by the passed in signingKey.
|
||||
func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) {
|
||||
h, err := keyRevocationHash(pk, sig.Hash)
|
||||
// made by this public key, of signed.
|
||||
func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) {
|
||||
h, err := keySignatureHash(pk, signed, sig.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return signingKey.VerifySignature(h, sig)
|
||||
return pk.VerifySignature(h, sig)
|
||||
}
|
||||
|
||||
// userIdSignatureHash returns a Hash of the message that needs to be signed
|
||||
|
|
|
@ -68,6 +68,11 @@ type Signature struct {
|
|||
IssuerFingerprint []byte
|
||||
IsPrimaryId *bool
|
||||
|
||||
// PolicyURI can be set to the URI of a document that describes the
|
||||
// policy under which the signature was issued. See RFC 4880, section
|
||||
// 5.2.3.20 for details.
|
||||
PolicyURI string
|
||||
|
||||
// FlagsValid is set if any flags were given. See RFC 4880, section
|
||||
// 5.2.3.21 for details.
|
||||
FlagsValid bool
|
||||
|
@ -75,7 +80,7 @@ type Signature struct {
|
|||
|
||||
// RevocationReason is set if this signature has been revoked.
|
||||
// See RFC 4880, section 5.2.3.23 for details.
|
||||
RevocationReason *uint8
|
||||
RevocationReason *ReasonForRevocation
|
||||
RevocationReasonText string
|
||||
|
||||
// In a self-signature, these flags are set there is a features subpacket
|
||||
|
@ -218,6 +223,7 @@ const (
|
|||
prefHashAlgosSubpacket signatureSubpacketType = 21
|
||||
prefCompressionSubpacket signatureSubpacketType = 22
|
||||
primaryUserIdSubpacket signatureSubpacketType = 25
|
||||
policyUriSubpacket signatureSubpacketType = 26
|
||||
keyFlagsSubpacket signatureSubpacketType = 27
|
||||
reasonForRevocationSubpacket signatureSubpacketType = 29
|
||||
featuresSubpacket signatureSubpacketType = 30
|
||||
|
@ -377,8 +383,8 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
|
|||
err = errors.StructuralError("empty revocation reason subpacket")
|
||||
return
|
||||
}
|
||||
sig.RevocationReason = new(uint8)
|
||||
*sig.RevocationReason = subpacket[0]
|
||||
sig.RevocationReason = new(ReasonForRevocation)
|
||||
*sig.RevocationReason = ReasonForRevocation(subpacket[0])
|
||||
sig.RevocationReasonText = string(subpacket[1:])
|
||||
case featuresSubpacket:
|
||||
// Features subpacket, section 5.2.3.24 specifies a very general
|
||||
|
@ -416,6 +422,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r
|
|||
if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
|
||||
return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
|
||||
}
|
||||
case policyUriSubpacket:
|
||||
// Policy URI, section 5.2.3.20
|
||||
if !isHashed {
|
||||
return
|
||||
}
|
||||
sig.PolicyURI = string(subpacket)
|
||||
case issuerFingerprintSubpacket:
|
||||
v, l := subpacket[0], len(subpacket[1:])
|
||||
if v == 5 && l != 32 || v != 5 && l != 20 {
|
||||
|
@ -507,6 +519,9 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
|
|||
if subpacket.hashed == hashed {
|
||||
n := serializeSubpacketLength(to, len(subpacket.contents)+1)
|
||||
to[n] = byte(subpacket.subpacketType)
|
||||
if subpacket.isCritical {
|
||||
to[n] |= 0x80
|
||||
}
|
||||
to = to[1+n:]
|
||||
n = copy(to, subpacket.contents)
|
||||
to = to[n:]
|
||||
|
@ -714,6 +729,14 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config
|
|||
return sig.Sign(h, priv, config)
|
||||
}
|
||||
|
||||
// RevokeSubkey computes a subkey revocation signature of pub using priv.
|
||||
// On success, the signature is stored in sig. Call Serialize to write it out.
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error {
|
||||
// Identical to a subkey binding signature
|
||||
return sig.SignKey(pub, priv, config)
|
||||
}
|
||||
|
||||
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
|
||||
// called first.
|
||||
func (sig *Signature) Serialize(w io.Writer) (err error) {
|
||||
|
@ -892,6 +915,10 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
|
|||
subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
|
||||
}
|
||||
|
||||
if len(sig.PolicyURI) > 0 {
|
||||
subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)})
|
||||
}
|
||||
|
||||
if len(sig.PreferredAEAD) > 0 {
|
||||
subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD})
|
||||
}
|
||||
|
@ -899,7 +926,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
|
|||
// Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23.
|
||||
if sig.RevocationReason != nil {
|
||||
subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true,
|
||||
append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)})
|
||||
append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)})
|
||||
}
|
||||
|
||||
// EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26.
|
||||
|
|
|
@ -370,11 +370,25 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) {
|
|||
|
||||
// If signature KeyID matches
|
||||
if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId {
|
||||
scr.md.Signature = sig
|
||||
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
|
||||
if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) {
|
||||
scr.md.SignatureError = errors.ErrSignatureExpired
|
||||
key := scr.md.SignedBy
|
||||
signatureError := key.PublicKey.VerifySignature(scr.h, sig)
|
||||
if signatureError == nil {
|
||||
now := scr.config.Now()
|
||||
if key.Revoked(now) ||
|
||||
key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key)
|
||||
key.Entity.PrimaryIdentity().Revoked(now) {
|
||||
signatureError = errors.ErrKeyRevoked
|
||||
}
|
||||
if sig.SigExpired(now) {
|
||||
signatureError = errors.ErrSignatureExpired
|
||||
}
|
||||
if key.PublicKey.KeyExpired(key.SelfSignature, now) ||
|
||||
key.SelfSignature.SigExpired(now) {
|
||||
signatureError = errors.ErrKeyExpired
|
||||
}
|
||||
}
|
||||
scr.md.Signature = sig
|
||||
scr.md.SignatureError = signatureError
|
||||
} else {
|
||||
scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig)
|
||||
}
|
||||
|
@ -483,10 +497,16 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader,
|
|||
err = key.PublicKey.VerifySignature(h, sig)
|
||||
if err == nil {
|
||||
now := config.Now()
|
||||
if key.Revoked(now) ||
|
||||
key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key)
|
||||
key.Entity.PrimaryIdentity().Revoked(now) {
|
||||
return key.Entity, errors.ErrKeyRevoked
|
||||
}
|
||||
if sig.SigExpired(now) {
|
||||
return key.Entity, errors.ErrSignatureExpired
|
||||
}
|
||||
if key.PublicKey.KeyExpired(key.SelfSignature, now) {
|
||||
if key.PublicKey.KeyExpired(key.SelfSignature, now) ||
|
||||
key.SelfSignature.SigExpired(now) {
|
||||
return key.Entity, errors.ErrKeyExpired
|
||||
}
|
||||
return key.Entity, nil
|
||||
|
|
|
@ -208,6 +208,15 @@ func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signe
|
|||
return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config)
|
||||
}
|
||||
|
||||
// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs
|
||||
// it. hints contains optional information, that is also encrypted, that aids
|
||||
// the recipients in processing the message. The resulting WriteCloser must
|
||||
// be closed after the contents of the file have been written.
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
||||
return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config)
|
||||
}
|
||||
|
||||
// writeAndSign writes the data as a payload package and, optionally, signs
|
||||
// it. hints contains optional information, that is also encrypted,
|
||||
// that aids the recipients in processing the message. The resulting
|
||||
|
@ -358,7 +367,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En
|
|||
var ok bool
|
||||
encryptKeys[i], ok = to[i].EncryptionKey(config.Now())
|
||||
if !ok {
|
||||
return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
|
||||
return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys")
|
||||
}
|
||||
|
||||
sig := to[i].PrimaryIdentity().SelfSignature
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
target
|
|
@ -1,37 +0,0 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
branches:
|
||||
except:
|
||||
- release
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- travis
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
before_install:
|
||||
- if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
|
||||
- if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
before_script:
|
||||
- make deps
|
||||
|
||||
script:
|
||||
- make qa
|
||||
|
||||
after_failure:
|
||||
- cat ./target/test/report.xml
|
||||
|
||||
after_success:
|
||||
- if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2014 Will Fitzgerald. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,93 +0,0 @@
|
|||
# bitset
|
||||
|
||||
*Go language library to map between non-negative integers and boolean values*
|
||||
|
||||
[](https://github.com/willf/bitset/actions?query=workflow%3ATest)
|
||||
[](https://goreportcard.com/report/github.com/willf/bitset)
|
||||
[](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
Package bitset implements bitsets, a mapping between non-negative integers and boolean values.
|
||||
It should be more efficient than map[uint] bool.
|
||||
|
||||
It provides methods for setting, clearing, flipping, and testing individual integers.
|
||||
|
||||
But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits.
|
||||
|
||||
BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used.
|
||||
|
||||
Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining.
|
||||
|
||||
### Example use:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Printf("Hello from BitSet!\n")
|
||||
var b bitset.BitSet
|
||||
// play some Go Fish
|
||||
for i := 0; i < 100; i++ {
|
||||
card1 := uint(rand.Intn(52))
|
||||
card2 := uint(rand.Intn(52))
|
||||
b.Set(card1)
|
||||
if b.Test(card2) {
|
||||
fmt.Println("Go Fish!")
|
||||
}
|
||||
b.Clear(card1)
|
||||
}
|
||||
|
||||
// Chaining
|
||||
b.Set(10).Set(11)
|
||||
|
||||
for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) {
|
||||
fmt.Println("The following bit is set:", i)
|
||||
}
|
||||
if b.Intersection(bitset.New(100).Set(10)).Count() == 1 {
|
||||
fmt.Println("Intersection works.")
|
||||
} else {
|
||||
fmt.Println("Intersection doesn't work???")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets.
|
||||
|
||||
Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
|
||||
|
||||
## Memory Usage
|
||||
|
||||
The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring).
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed.
|
||||
|
||||
It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/bits-and-blooms/bitset
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)")
|
||||
|
||||
## Running all tests
|
||||
|
||||
Before committing the code, please check if it passes tests, has adequate coverage, etc.
|
||||
```bash
|
||||
go test
|
||||
go test -cover
|
||||
```
|
|
@ -1,39 +0,0 @@
|
|||
# Go
|
||||
# Build your Go project.
|
||||
# Add steps that test, save build artifacts, deploy, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
|
||||
|
||||
trigger:
|
||||
- master
|
||||
|
||||
pool:
|
||||
vmImage: 'Ubuntu-16.04'
|
||||
|
||||
variables:
|
||||
GOBIN: '$(GOPATH)/bin' # Go binaries path
|
||||
GOROOT: '/usr/local/go1.11' # Go installation path
|
||||
GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
mkdir -p '$(GOBIN)'
|
||||
mkdir -p '$(GOPATH)/pkg'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
displayName: 'Set up the Go workspace'
|
||||
|
||||
- script: |
|
||||
go version
|
||||
go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
go build -v .
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: 'Get dependencies, then build'
|
|
@ -1,952 +0,0 @@
|
|||
/*
|
||||
Package bitset implements bitsets, a mapping
|
||||
between non-negative integers and boolean values. It should be more
|
||||
efficient than map[uint] bool.
|
||||
|
||||
It provides methods for setting, clearing, flipping, and testing
|
||||
individual integers.
|
||||
|
||||
But it also provides set intersection, union, difference,
|
||||
complement, and symmetric operations, as well as tests to
|
||||
check whether any, all, or no bits are set, and querying a
|
||||
bitset's current length and number of positive bits.
|
||||
|
||||
BitSets are expanded to the size of the largest set bit; the
|
||||
memory allocation is approximately Max bits, where Max is
|
||||
the largest set bit. BitSets are never shrunk. On creation,
|
||||
a hint can be given for the number of bits that will be used.
|
||||
|
||||
Many of the methods, including Set,Clear, and Flip, return
|
||||
a BitSet pointer, which allows for chaining.
|
||||
|
||||
Example use:
|
||||
|
||||
import "bitset"
|
||||
var b BitSet
|
||||
b.Set(10).Set(11)
|
||||
if b.Test(1000) {
|
||||
b.Clear(1000)
|
||||
}
|
||||
if B.Intersection(bitset.New(100).Set(10)).Count() > 1 {
|
||||
fmt.Println("Intersection works.")
|
||||
}
|
||||
|
||||
As an alternative to BitSets, one should check out the 'big' package,
|
||||
which provides a (less set-theoretical) view of bitsets.
|
||||
|
||||
*/
|
||||
package bitset
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// the wordSize of a bit set
|
||||
const wordSize = uint(64)
|
||||
|
||||
// log2WordSize is lg(wordSize)
|
||||
const log2WordSize = uint(6)
|
||||
|
||||
// allBits has every bit set
|
||||
const allBits uint64 = 0xffffffffffffffff
|
||||
|
||||
// default binary BigEndian
|
||||
var binaryOrder binary.ByteOrder = binary.BigEndian
|
||||
|
||||
// default json encoding base64.URLEncoding
|
||||
var base64Encoding = base64.URLEncoding
|
||||
|
||||
// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding)
|
||||
func Base64StdEncoding() { base64Encoding = base64.StdEncoding }
|
||||
|
||||
// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian)
|
||||
func LittleEndian() { binaryOrder = binary.LittleEndian }
|
||||
|
||||
// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0.
|
||||
type BitSet struct {
|
||||
length uint
|
||||
set []uint64
|
||||
}
|
||||
|
||||
// Error is used to distinguish errors (panics) generated in this package.
|
||||
type Error string
|
||||
|
||||
// safeSet will fixup b.set to be non-nil and return the field value
|
||||
func (b *BitSet) safeSet() []uint64 {
|
||||
if b.set == nil {
|
||||
b.set = make([]uint64, wordsNeeded(0))
|
||||
}
|
||||
return b.set
|
||||
}
|
||||
|
||||
// From is a constructor used to create a BitSet from an array of integers
|
||||
func From(buf []uint64) *BitSet {
|
||||
return &BitSet{uint(len(buf)) * 64, buf}
|
||||
}
|
||||
|
||||
// Bytes returns the bitset as array of integers
|
||||
func (b *BitSet) Bytes() []uint64 {
|
||||
return b.set
|
||||
}
|
||||
|
||||
// wordsNeeded calculates the number of words needed for i bits
|
||||
func wordsNeeded(i uint) int {
|
||||
if i > (Cap() - wordSize + 1) {
|
||||
return int(Cap() >> log2WordSize)
|
||||
}
|
||||
return int((i + (wordSize - 1)) >> log2WordSize)
|
||||
}
|
||||
|
||||
// New creates a new BitSet with a hint that length bits will be required
|
||||
func New(length uint) (bset *BitSet) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
bset = &BitSet{
|
||||
0,
|
||||
make([]uint64, 0),
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
bset = &BitSet{
|
||||
length,
|
||||
make([]uint64, wordsNeeded(length)),
|
||||
}
|
||||
|
||||
return bset
|
||||
}
|
||||
|
||||
// Cap returns the total possible capacity, or number of bits
|
||||
func Cap() uint {
|
||||
return ^uint(0)
|
||||
}
|
||||
|
||||
// Len returns the number of bits in the BitSet.
|
||||
// Note the difference to method Count, see example.
|
||||
func (b *BitSet) Len() uint {
|
||||
return b.length
|
||||
}
|
||||
|
||||
// extendSetMaybe adds additional words to incorporate new bits if needed
|
||||
func (b *BitSet) extendSetMaybe(i uint) {
|
||||
if i >= b.length { // if we need more bits, make 'em
|
||||
if i >= Cap() {
|
||||
panic("You are exceeding the capacity")
|
||||
}
|
||||
nsize := wordsNeeded(i + 1)
|
||||
if b.set == nil {
|
||||
b.set = make([]uint64, nsize)
|
||||
} else if cap(b.set) >= nsize {
|
||||
b.set = b.set[:nsize] // fast resize
|
||||
} else if len(b.set) < nsize {
|
||||
newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x
|
||||
copy(newset, b.set)
|
||||
b.set = newset
|
||||
}
|
||||
b.length = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Test whether bit i is set.
|
||||
func (b *BitSet) Test(i uint) bool {
|
||||
if i >= b.length {
|
||||
return false
|
||||
}
|
||||
return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0
|
||||
}
|
||||
|
||||
// Set bit i to 1, the capacity of the bitset is automatically
|
||||
// increased accordingly.
|
||||
// If i>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'i'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) Set(i uint) *BitSet {
|
||||
b.extendSetMaybe(i)
|
||||
b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// Clear bit i to 0
|
||||
func (b *BitSet) Clear(i uint) *BitSet {
|
||||
if i >= b.length {
|
||||
return b
|
||||
}
|
||||
b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// SetTo sets bit i to value.
|
||||
// If i>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'i'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) SetTo(i uint, value bool) *BitSet {
|
||||
if value {
|
||||
return b.Set(i)
|
||||
}
|
||||
return b.Clear(i)
|
||||
}
|
||||
|
||||
// Flip bit at i.
|
||||
// If i>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'i'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) Flip(i uint) *BitSet {
|
||||
if i >= b.length {
|
||||
return b.Set(i)
|
||||
}
|
||||
b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// FlipRange bit in [start, end).
|
||||
// If end>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'end'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) FlipRange(start, end uint) *BitSet {
|
||||
if start >= end {
|
||||
return b
|
||||
}
|
||||
|
||||
b.extendSetMaybe(end - 1)
|
||||
var startWord uint = start >> log2WordSize
|
||||
var endWord uint = end >> log2WordSize
|
||||
b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1)))
|
||||
for i := startWord; i < endWord; i++ {
|
||||
b.set[i] = ^b.set[i]
|
||||
}
|
||||
b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// Shrink shrinks BitSet so that the provided value is the last possible
|
||||
// set value. It clears all bits > the provided index and reduces the size
|
||||
// and length of the set.
|
||||
//
|
||||
// Note that the parameter value is not the new length in bits: it is the
|
||||
// maximal value that can be stored in the bitset after the function call.
|
||||
// The new length in bits is the parameter value + 1. Thus it is not possible
|
||||
// to use this function to set the length to 0, the minimal value of the length
|
||||
// after this function call is 1.
|
||||
//
|
||||
// A new slice is allocated to store the new bits, so you may see an increase in
|
||||
// memory usage until the GC runs. Normally this should not be a problem, but if you
|
||||
// have an extremely large BitSet its important to understand that the old BitSet will
|
||||
// remain in memory until the GC frees it.
|
||||
func (b *BitSet) Shrink(lastbitindex uint) *BitSet {
|
||||
length := lastbitindex + 1
|
||||
idx := wordsNeeded(length)
|
||||
if idx > len(b.set) {
|
||||
return b
|
||||
}
|
||||
shrunk := make([]uint64, idx)
|
||||
copy(shrunk, b.set[:idx])
|
||||
b.set = shrunk
|
||||
b.length = length
|
||||
b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1))))
|
||||
return b
|
||||
}
|
||||
|
||||
// Compact shrinks BitSet to so that we preserve all set bits, while minimizing
|
||||
// memory usage. Compact calls Shrink.
|
||||
func (b *BitSet) Compact() *BitSet {
|
||||
idx := len(b.set) - 1
|
||||
for ; idx >= 0 && b.set[idx] == 0; idx-- {
|
||||
}
|
||||
newlength := uint((idx + 1) << log2WordSize)
|
||||
if newlength >= b.length {
|
||||
return b // nothing to do
|
||||
}
|
||||
if newlength > 0 {
|
||||
return b.Shrink(newlength - 1)
|
||||
}
|
||||
// We preserve one word
|
||||
return b.Shrink(63)
|
||||
}
|
||||
|
||||
// InsertAt takes an index which indicates where a bit should be
|
||||
// inserted. Then it shifts all the bits in the set to the left by 1, starting
|
||||
// from the given index position, and sets the index position to 0.
|
||||
//
|
||||
// Depending on the size of your BitSet, and where you are inserting the new entry,
|
||||
// this method could be extremely slow and in some cases might cause the entire BitSet
|
||||
// to be recopied.
|
||||
func (b *BitSet) InsertAt(idx uint) *BitSet {
|
||||
insertAtElement := (idx >> log2WordSize)
|
||||
|
||||
// if length of set is a multiple of wordSize we need to allocate more space first
|
||||
if b.isLenExactMultiple() {
|
||||
b.set = append(b.set, uint64(0))
|
||||
}
|
||||
|
||||
var i uint
|
||||
for i = uint(len(b.set) - 1); i > insertAtElement; i-- {
|
||||
// all elements above the position where we want to insert can simply by shifted
|
||||
b.set[i] <<= 1
|
||||
|
||||
// we take the most significant bit of the previous element and set it as
|
||||
// the least significant bit of the current element
|
||||
b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63
|
||||
}
|
||||
|
||||
// generate a mask to extract the data that we need to shift left
|
||||
// within the element where we insert a bit
|
||||
dataMask := ^(uint64(1)<<uint64(idx&(wordSize-1)) - 1)
|
||||
|
||||
// extract that data that we'll shift
|
||||
data := b.set[i] & dataMask
|
||||
|
||||
// set the positions of the data mask to 0 in the element where we insert
|
||||
b.set[i] &= ^dataMask
|
||||
|
||||
// shift data mask to the left and insert its data to the slice element
|
||||
b.set[i] |= data << 1
|
||||
|
||||
// add 1 to length of BitSet
|
||||
b.length++
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// String creates a string representation of the Bitmap
|
||||
func (b *BitSet) String() string {
|
||||
// follows code from https://github.com/RoaringBitmap/roaring
|
||||
var buffer bytes.Buffer
|
||||
start := []byte("{")
|
||||
buffer.Write(start)
|
||||
counter := 0
|
||||
i, e := b.NextSet(0)
|
||||
for e {
|
||||
counter = counter + 1
|
||||
// to avoid exhausting the memory
|
||||
if counter > 0x40000 {
|
||||
buffer.WriteString("...")
|
||||
break
|
||||
}
|
||||
buffer.WriteString(strconv.FormatInt(int64(i), 10))
|
||||
i, e = b.NextSet(i + 1)
|
||||
if e {
|
||||
buffer.WriteString(",")
|
||||
}
|
||||
}
|
||||
buffer.WriteString("}")
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// DeleteAt deletes the bit at the given index position from
|
||||
// within the bitset
|
||||
// All the bits residing on the left of the deleted bit get
|
||||
// shifted right by 1
|
||||
// The running time of this operation may potentially be
|
||||
// relatively slow, O(length)
|
||||
func (b *BitSet) DeleteAt(i uint) *BitSet {
|
||||
// the index of the slice element where we'll delete a bit
|
||||
deleteAtElement := i >> log2WordSize
|
||||
|
||||
// generate a mask for the data that needs to be shifted right
|
||||
// within that slice element that gets modified
|
||||
dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1)
|
||||
|
||||
// extract the data that we'll shift right from the slice element
|
||||
data := b.set[deleteAtElement] & dataMask
|
||||
|
||||
// set the masked area to 0 while leaving the rest as it is
|
||||
b.set[deleteAtElement] &= ^dataMask
|
||||
|
||||
// shift the previously extracted data to the right and then
|
||||
// set it in the previously masked area
|
||||
b.set[deleteAtElement] |= (data >> 1) & dataMask
|
||||
|
||||
// loop over all the consecutive slice elements to copy each
|
||||
// lowest bit into the highest position of the previous element,
|
||||
// then shift the entire content to the right by 1
|
||||
for i := int(deleteAtElement) + 1; i < len(b.set); i++ {
|
||||
b.set[i-1] |= (b.set[i] & 1) << 63
|
||||
b.set[i] >>= 1
|
||||
}
|
||||
|
||||
b.length = b.length - 1
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// NextSet returns the next bit set from the specified index,
|
||||
// including possibly the current index
|
||||
// along with an error code (true = valid, false = no set bit found)
|
||||
// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...}
|
||||
//
|
||||
// Users concerned with performance may want to use NextSetMany to
|
||||
// retrieve several values at once.
|
||||
func (b *BitSet) NextSet(i uint) (uint, bool) {
|
||||
x := int(i >> log2WordSize)
|
||||
if x >= len(b.set) {
|
||||
return 0, false
|
||||
}
|
||||
w := b.set[x]
|
||||
w = w >> (i & (wordSize - 1))
|
||||
if w != 0 {
|
||||
return i + trailingZeroes64(w), true
|
||||
}
|
||||
x = x + 1
|
||||
for x < len(b.set) {
|
||||
if b.set[x] != 0 {
|
||||
return uint(x)*wordSize + trailingZeroes64(b.set[x]), true
|
||||
}
|
||||
x = x + 1
|
||||
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// NextSetMany returns many next bit sets from the specified index,
|
||||
// including possibly the current index and up to cap(buffer).
|
||||
// If the returned slice has len zero, then no more set bits were found
|
||||
//
|
||||
// buffer := make([]uint, 256) // this should be reused
|
||||
// j := uint(0)
|
||||
// j, buffer = bitmap.NextSetMany(j, buffer)
|
||||
// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) {
|
||||
// for k := range buffer {
|
||||
// do something with buffer[k]
|
||||
// }
|
||||
// j += 1
|
||||
// }
|
||||
//
|
||||
//
|
||||
// It is possible to retrieve all set bits as follow:
|
||||
//
|
||||
// indices := make([]uint, bitmap.Count())
|
||||
// bitmap.NextSetMany(0, indices)
|
||||
//
|
||||
// However if bitmap.Count() is large, it might be preferable to
|
||||
// use several calls to NextSetMany, for performance reasons.
|
||||
func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) {
|
||||
myanswer := buffer
|
||||
capacity := cap(buffer)
|
||||
x := int(i >> log2WordSize)
|
||||
if x >= len(b.set) || capacity == 0 {
|
||||
return 0, myanswer[:0]
|
||||
}
|
||||
skip := i & (wordSize - 1)
|
||||
word := b.set[x] >> skip
|
||||
myanswer = myanswer[:capacity]
|
||||
size := int(0)
|
||||
for word != 0 {
|
||||
r := trailingZeroes64(word)
|
||||
t := word & ((^word) + 1)
|
||||
myanswer[size] = r + i
|
||||
size++
|
||||
if size == capacity {
|
||||
goto End
|
||||
}
|
||||
word = word ^ t
|
||||
}
|
||||
x++
|
||||
for idx, word := range b.set[x:] {
|
||||
for word != 0 {
|
||||
r := trailingZeroes64(word)
|
||||
t := word & ((^word) + 1)
|
||||
myanswer[size] = r + (uint(x+idx) << 6)
|
||||
size++
|
||||
if size == capacity {
|
||||
goto End
|
||||
}
|
||||
word = word ^ t
|
||||
}
|
||||
}
|
||||
End:
|
||||
if size > 0 {
|
||||
return myanswer[size-1], myanswer[:size]
|
||||
}
|
||||
return 0, myanswer[:0]
|
||||
}
|
||||
|
||||
// NextClear returns the next clear bit from the specified index,
|
||||
// including possibly the current index
|
||||
// along with an error code (true = valid, false = no bit found i.e. all bits are set)
|
||||
func (b *BitSet) NextClear(i uint) (uint, bool) {
|
||||
x := int(i >> log2WordSize)
|
||||
if x >= len(b.set) {
|
||||
return 0, false
|
||||
}
|
||||
w := b.set[x]
|
||||
w = w >> (i & (wordSize - 1))
|
||||
wA := allBits >> (i & (wordSize - 1))
|
||||
index := i + trailingZeroes64(^w)
|
||||
if w != wA && index < b.length {
|
||||
return index, true
|
||||
}
|
||||
x++
|
||||
for x < len(b.set) {
|
||||
index = uint(x)*wordSize + trailingZeroes64(^b.set[x])
|
||||
if b.set[x] != allBits && index < b.length {
|
||||
return index, true
|
||||
}
|
||||
x++
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// ClearAll clears the entire BitSet
|
||||
func (b *BitSet) ClearAll() *BitSet {
|
||||
if b != nil && b.set != nil {
|
||||
for i := range b.set {
|
||||
b.set[i] = 0
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// wordCount returns the number of words used in a bit set
|
||||
func (b *BitSet) wordCount() int {
|
||||
return len(b.set)
|
||||
}
|
||||
|
||||
// Clone this BitSet
|
||||
func (b *BitSet) Clone() *BitSet {
|
||||
c := New(b.length)
|
||||
if b.set != nil { // Clone should not modify current object
|
||||
copy(c.set, b.set)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Copy into a destination BitSet
|
||||
// Returning the size of the destination BitSet
|
||||
// like array copy
|
||||
func (b *BitSet) Copy(c *BitSet) (count uint) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
if b.set != nil { // Copy should not modify current object
|
||||
copy(c.set, b.set)
|
||||
}
|
||||
count = c.length
|
||||
if b.length < c.length {
|
||||
count = b.length
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Count (number of set bits).
|
||||
// Also known as "popcount" or "population count".
|
||||
func (b *BitSet) Count() uint {
|
||||
if b != nil && b.set != nil {
|
||||
return uint(popcntSlice(b.set))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Equal tests the equivalence of two BitSets.
|
||||
// False if they are of different sizes, otherwise true
|
||||
// only if all the same bits are set
|
||||
func (b *BitSet) Equal(c *BitSet) bool {
|
||||
if c == nil || b == nil {
|
||||
return c == b
|
||||
}
|
||||
if b.length != c.length {
|
||||
return false
|
||||
}
|
||||
if b.length == 0 { // if they have both length == 0, then could have nil set
|
||||
return true
|
||||
}
|
||||
// testing for equality shoud not transform the bitset (no call to safeSet)
|
||||
|
||||
for p, v := range b.set {
|
||||
if c.set[p] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func panicIfNull(b *BitSet) {
|
||||
if b == nil {
|
||||
panic(Error("BitSet must not be null"))
|
||||
}
|
||||
}
|
||||
|
||||
// Difference of base set and other set
|
||||
// This is the BitSet equivalent of &^ (and not)
|
||||
func (b *BitSet) Difference(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
result = b.Clone() // clone b (in case b is bigger than compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
result.set[i] = b.set[i] &^ compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DifferenceCardinality computes the cardinality of the differnce
|
||||
func (b *BitSet) DifferenceCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
cnt := uint64(0)
|
||||
cnt += popcntMaskSlice(b.set[:l], compare.set[:l])
|
||||
cnt += popcntSlice(b.set[l:])
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceDifference computes the difference of base set and other set
|
||||
// This is the BitSet equivalent of &^ (and not)
|
||||
func (b *BitSet) InPlaceDifference(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] &^= compare.set[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience function: return two bitsets ordered by
|
||||
// increasing length. Note: neither can be nil
|
||||
func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) {
|
||||
if a.length <= b.length {
|
||||
ap, bp = a, b
|
||||
} else {
|
||||
ap, bp = b, a
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Intersection of base set and other set
|
||||
// This is the BitSet equivalent of & (and)
|
||||
func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
result = New(b.length)
|
||||
for i, word := range b.set {
|
||||
result.set[i] = word & compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IntersectionCardinality computes the cardinality of the union
|
||||
func (b *BitSet) IntersectionCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
cnt := popcntAndSlice(b.set, compare.set)
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceIntersection destructively computes the intersection of
|
||||
// base set and the compare set.
|
||||
// This is the BitSet equivalent of & (and)
|
||||
func (b *BitSet) InPlaceIntersection(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] &= compare.set[i]
|
||||
}
|
||||
for i := l; i < len(b.set); i++ {
|
||||
b.set[i] = 0
|
||||
}
|
||||
if compare.length > 0 {
|
||||
b.extendSetMaybe(compare.length - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Union of base set and other set
|
||||
// This is the BitSet equivalent of | (or)
|
||||
func (b *BitSet) Union(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
result = compare.Clone()
|
||||
for i, word := range b.set {
|
||||
result.set[i] = word | compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnionCardinality computes the cardinality of the uniton of the base set
|
||||
// and the compare set.
|
||||
func (b *BitSet) UnionCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
cnt := popcntOrSlice(b.set, compare.set)
|
||||
if len(compare.set) > len(b.set) {
|
||||
cnt += popcntSlice(compare.set[len(b.set):])
|
||||
}
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceUnion creates the destructive union of base set and compare set.
|
||||
// This is the BitSet equivalent of | (or).
|
||||
func (b *BitSet) InPlaceUnion(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
if compare.length > 0 {
|
||||
b.extendSetMaybe(compare.length - 1)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] |= compare.set[i]
|
||||
}
|
||||
if len(compare.set) > l {
|
||||
for i := l; i < len(compare.set); i++ {
|
||||
b.set[i] = compare.set[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SymmetricDifference of base set and other set
|
||||
// This is the BitSet equivalent of ^ (xor)
|
||||
func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
// compare is bigger, so clone it
|
||||
result = compare.Clone()
|
||||
for i, word := range b.set {
|
||||
result.set[i] = word ^ compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference
|
||||
func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
cnt := popcntXorSlice(b.set, compare.set)
|
||||
if len(compare.set) > len(b.set) {
|
||||
cnt += popcntSlice(compare.set[len(b.set):])
|
||||
}
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set
|
||||
// This is the BitSet equivalent of ^ (xor)
|
||||
func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
if compare.length > 0 {
|
||||
b.extendSetMaybe(compare.length - 1)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] ^= compare.set[i]
|
||||
}
|
||||
if len(compare.set) > l {
|
||||
for i := l; i < len(compare.set); i++ {
|
||||
b.set[i] = compare.set[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is the length an exact multiple of word sizes?
|
||||
func (b *BitSet) isLenExactMultiple() bool {
|
||||
return b.length%wordSize == 0
|
||||
}
|
||||
|
||||
// Clean last word by setting unused bits to 0
|
||||
func (b *BitSet) cleanLastWord() {
|
||||
if !b.isLenExactMultiple() {
|
||||
b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Complement computes the (local) complement of a biset (up to length bits)
|
||||
func (b *BitSet) Complement() (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
result = New(b.length)
|
||||
for i, word := range b.set {
|
||||
result.set[i] = ^word
|
||||
}
|
||||
result.cleanLastWord()
|
||||
return
|
||||
}
|
||||
|
||||
// All returns true if all bits are set, false otherwise. Returns true for
|
||||
// empty sets.
|
||||
func (b *BitSet) All() bool {
|
||||
panicIfNull(b)
|
||||
return b.Count() == b.length
|
||||
}
|
||||
|
||||
// None returns true if no bit is set, false otherwise. Returns true for
|
||||
// empty sets.
|
||||
func (b *BitSet) None() bool {
|
||||
panicIfNull(b)
|
||||
if b != nil && b.set != nil {
|
||||
for _, word := range b.set {
|
||||
if word > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Any returns true if any bit is set, false otherwise
|
||||
func (b *BitSet) Any() bool {
|
||||
panicIfNull(b)
|
||||
return !b.None()
|
||||
}
|
||||
|
||||
// IsSuperSet returns true if this is a superset of the other set
|
||||
func (b *BitSet) IsSuperSet(other *BitSet) bool {
|
||||
for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) {
|
||||
if !b.Test(i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsStrictSuperSet returns true if this is a strict superset of the other set
|
||||
func (b *BitSet) IsStrictSuperSet(other *BitSet) bool {
|
||||
return b.Count() > other.Count() && b.IsSuperSet(other)
|
||||
}
|
||||
|
||||
// DumpAsBits dumps a bit set as a string of bits
|
||||
func (b *BitSet) DumpAsBits() string {
|
||||
if b.set == nil {
|
||||
return "."
|
||||
}
|
||||
buffer := bytes.NewBufferString("")
|
||||
i := len(b.set) - 1
|
||||
for ; i >= 0; i-- {
|
||||
fmt.Fprintf(buffer, "%064b.", b.set[i])
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// BinaryStorageSize returns the binary storage requirements
|
||||
func (b *BitSet) BinaryStorageSize() int {
|
||||
return binary.Size(uint64(0)) + binary.Size(b.set)
|
||||
}
|
||||
|
||||
// WriteTo writes a BitSet to a stream
|
||||
func (b *BitSet) WriteTo(stream io.Writer) (int64, error) {
|
||||
length := uint64(b.length)
|
||||
|
||||
// Write length
|
||||
err := binary.Write(stream, binaryOrder, length)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Write set
|
||||
err = binary.Write(stream, binaryOrder, b.set)
|
||||
return int64(b.BinaryStorageSize()), err
|
||||
}
|
||||
|
||||
// ReadFrom reads a BitSet from a stream written using WriteTo
|
||||
func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) {
|
||||
var length uint64
|
||||
|
||||
// Read length first
|
||||
err := binary.Read(stream, binaryOrder, &length)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
newset := New(uint(length))
|
||||
|
||||
if uint64(newset.length) != length {
|
||||
return 0, errors.New("unmarshalling error: type mismatch")
|
||||
}
|
||||
|
||||
// Read remaining bytes as set
|
||||
err = binary.Read(stream, binaryOrder, newset.set)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
*b = *newset
|
||||
return int64(b.BinaryStorageSize()), nil
|
||||
}
|
||||
|
||||
// MarshalBinary encodes a BitSet into a binary form and returns the result.
|
||||
func (b *BitSet) MarshalBinary() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
writer := bufio.NewWriter(&buf)
|
||||
|
||||
_, err := b.WriteTo(writer)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
err = writer.Flush()
|
||||
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
func (b *BitSet) UnmarshalBinary(data []byte) error {
|
||||
buf := bytes.NewReader(data)
|
||||
reader := bufio.NewReader(buf)
|
||||
|
||||
_, err := b.ReadFrom(reader)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a BitSet as a JSON structure
|
||||
func (b *BitSet) MarshalJSON() ([]byte, error) {
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize()))
|
||||
_, err := b.WriteTo(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// URLEncode all bytes
|
||||
return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes()))
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON
|
||||
func (b *BitSet) UnmarshalJSON(data []byte) error {
|
||||
// Unmarshal as string
|
||||
var s string
|
||||
err := json.Unmarshal(data, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// URLDecode string
|
||||
buf, err := base64Encoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = b.ReadFrom(bytes.NewReader(buf))
|
||||
return err
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
module github.com/bits-and-blooms/bitset
|
||||
|
||||
go 1.14
|
|
@ -1,53 +0,0 @@
|
|||
package bitset
|
||||
|
||||
// bit population count, take from
|
||||
// https://code.google.com/p/go/issues/detail?id=4988#c11
|
||||
// credit: https://code.google.com/u/arnehormann/
|
||||
func popcount(x uint64) (n uint64) {
|
||||
x -= (x >> 1) & 0x5555555555555555
|
||||
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
|
||||
x += x >> 4
|
||||
x &= 0x0f0f0f0f0f0f0f0f
|
||||
x *= 0x0101010101010101
|
||||
return x >> 56
|
||||
}
|
||||
|
||||
func popcntSliceGo(s []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for _, x := range s {
|
||||
cnt += popcount(x)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntMaskSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] &^ m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntAndSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] & m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntOrSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] | m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntXorSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] ^ m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
// +build go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
var cnt int
|
||||
for _, x := range s {
|
||||
cnt += bits.OnesCount64(x)
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] &^ m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] & m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] | m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] ^ m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
// +build !go1.9
|
||||
// +build amd64,!appengine
|
||||
|
||||
package bitset
|
||||
|
||||
// *** the following functions are defined in popcnt_amd64.s
|
||||
|
||||
//go:noescape
|
||||
|
||||
func hasAsm() bool
|
||||
|
||||
// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
|
||||
var useAsm = hasAsm()
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntSliceAsm(s []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntMaskSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntAndSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntOrSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntXorSliceAsm(s, m []uint64) uint64
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntSliceAsm(s)
|
||||
}
|
||||
return popcntSliceGo(s)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntMaskSliceAsm(s, m)
|
||||
}
|
||||
return popcntMaskSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntAndSliceAsm(s, m)
|
||||
}
|
||||
return popcntAndSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntOrSliceAsm(s, m)
|
||||
}
|
||||
return popcntOrSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntXorSliceAsm(s, m)
|
||||
}
|
||||
return popcntXorSliceGo(s, m)
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
// +build !go1.9
|
||||
// +build amd64,!appengine
|
||||
|
||||
TEXT ·hasAsm(SB),4,$0-1
|
||||
MOVQ $1, AX
|
||||
CPUID
|
||||
SHRQ $23, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
|
||||
|
||||
TEXT ·popcntSliceAsm(SB),4,$0-32
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntSliceEnd
|
||||
popcntSliceLoop:
|
||||
BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
LOOP popcntSliceLoop
|
||||
popcntSliceEnd:
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntMaskSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntMaskSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntMaskSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
NOTQ DX
|
||||
ANDQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntMaskSliceLoop
|
||||
popcntMaskSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntAndSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntAndSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntAndSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
ANDQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntAndSliceLoop
|
||||
popcntAndSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntOrSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntOrSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntOrSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
ORQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntOrSliceLoop
|
||||
popcntOrSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntXorSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntXorSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntXorSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
XORQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntXorSliceLoop
|
||||
popcntXorSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
|
@ -1,24 +0,0 @@
|
|||
// +build !go1.9
|
||||
// +build !amd64 appengine
|
||||
|
||||
package bitset
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
return popcntSliceGo(s)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
return popcntMaskSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
return popcntAndSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
return popcntOrSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
return popcntXorSliceGo(s, m)
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
// +build !go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
var deBruijn = [...]byte{
|
||||
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
|
||||
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
|
||||
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
|
||||
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
|
||||
}
|
||||
|
||||
func trailingZeroes64(v uint64) uint {
|
||||
return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58])
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
// +build go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func trailingZeroes64(v uint64) uint {
|
||||
return uint(bits.TrailingZeros64(v))
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package keyctl
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package keyctl
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package keyctl
|
||||
|
|
25
vendor/github.com/containers/image/v5/internal/rootless/rootless.go
generated
vendored
Normal file
25
vendor/github.com/containers/image/v5/internal/rootless/rootless.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package rootless
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any)
|
||||
//
|
||||
// Podman and similar software, in “rootless” configuration, when run as a non-root
|
||||
// user, very early switches to a user namespace, where Geteuid() == 0 (but does not
|
||||
// switch to a limited mount namespace); so, code relying on Geteuid() would use
|
||||
// system-wide paths in e.g. /var, when the user is actually not privileged to write to
|
||||
// them, and expects state to be stored in the home directory.
|
||||
//
|
||||
// If Podman is setting up such a user namespace, it records the original UID in an
|
||||
// environment variable, allowing us to make choices based on the actual user’s identity.
|
||||
func GetRootlessEUID() int {
|
||||
euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if euidEnv != "" {
|
||||
euid, _ := strconv.Atoi(euidEnv)
|
||||
return euid
|
||||
}
|
||||
return os.Geteuid()
|
||||
}
|
|
@ -4,7 +4,7 @@ import "io"
|
|||
|
||||
// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
|
||||
// The caller must call Close() on the stream (even if the input stream does not need closing!).
|
||||
type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error)
|
||||
type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
|
||||
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||
|
@ -13,7 +13,8 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
|||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
prefix []byte
|
||||
mime string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
}
|
||||
|
@ -21,9 +22,10 @@ type Algorithm struct {
|
|||
// NewAlgorithm creates an Algorithm instance.
|
||||
// This function exists so that Algorithm instances can only be created by code that
|
||||
// is allowed to import this internal subpackage.
|
||||
func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
return Algorithm{
|
||||
name: name,
|
||||
mime: mime,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
|
@ -35,6 +37,12 @@ func (c Algorithm) Name() string {
|
|||
return c.name
|
||||
}
|
||||
|
||||
// InternalUnstableUndocumentedMIMEQuestionMark ???
|
||||
// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
|
||||
func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
||||
return c.mime
|
||||
}
|
||||
|
||||
// AlgorithmCompressor returns the compressor field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
|
|
|
@ -11,3 +11,31 @@ type DecompressorFunc = internal.DecompressorFunc
|
|||
// Algorithm is a compression algorithm provided and supported by pkg/compression.
|
||||
// It can’t be supplied from the outside.
|
||||
type Algorithm = internal.Algorithm
|
||||
|
||||
const (
|
||||
// GzipAlgorithmName is the name used by pkg/compression.Gzip.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
GzipAlgorithmName = "gzip"
|
||||
// Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
Bzip2AlgorithmName = "bzip2"
|
||||
// XzAlgorithmName is the name used by pkg/compression.Xz.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
XzAlgorithmName = "Xz"
|
||||
// ZstdAlgorithmName is the name used by pkg/compression.Zstd.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
ZstdAlgorithmName = "zstd"
|
||||
// ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
ZstdChunkedAlgorithmName = "zstd:chunked"
|
||||
)
|
||||
|
|
|
@ -6,14 +6,18 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -41,12 +45,6 @@ var (
|
|||
dockerLegacyHomePath = ".dockercfg"
|
||||
nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json")
|
||||
|
||||
// Note that the keyring support has been disabled as it was causing
|
||||
// regressions. Before enabling, please revisit TODO(keyring) comments
|
||||
// which need to be addressed if the need remerged to support the
|
||||
// kernel keyring.
|
||||
enableKeyring = false
|
||||
|
||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||
// that they are trying to logout of
|
||||
ErrNotLoggedIn = errors.New("not logged in")
|
||||
|
@ -54,72 +52,151 @@ var (
|
|||
ErrNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
// SetAuthentication stores the username and password in the auth.json file
|
||||
func SetAuthentication(sys *types.SystemContext, registry, username, password string) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
return false, setAuthToCredHelper(ch, registry, username, password)
|
||||
}
|
||||
// SetCredentials stores the username and password in a location
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
// Returns a human-redable description of the location that was updated.
|
||||
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
||||
// it may change (or new forms can be added) any time.
|
||||
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
|
||||
isNamespaced, err := validateKey(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Set the credentials to kernel keyring if enableKeyring is true.
|
||||
// The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms.
|
||||
// Hence, we want to fall-back to using the authfile in case the keyring failed.
|
||||
// However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring.
|
||||
if enableKeyring {
|
||||
err := setAuthToKernelKeyring(registry, username, password)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username)
|
||||
return false, nil
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var desc string
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if ch, exists := auths.CredHelpers[key]; exists {
|
||||
if isNamespaced {
|
||||
return false, unsupportedNamespaceErr(ch)
|
||||
}
|
||||
return false, setAuthToCredHelper(ch, key, username, password)
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[key] = newCreds
|
||||
return true, nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
if isNamespaced {
|
||||
err = unsupportedNamespaceErr(helper)
|
||||
} else {
|
||||
desc = fmt.Sprintf("credential helper: %s", helper)
|
||||
err = setAuthToCredHelper(helper, key, username, password)
|
||||
}
|
||||
logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err)
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[registry] = newCreds
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||
return desc, nil
|
||||
}
|
||||
return "", multiErr
|
||||
}
|
||||
|
||||
func unsupportedNamespaceErr(helper string) error {
|
||||
return errors.Errorf("namespaced key is not supported for credential helper %s", helper)
|
||||
}
|
||||
|
||||
// SetAuthentication stores the username and password in the credential helper or file
|
||||
// See the documentation of SetCredentials for format of "key"
|
||||
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
|
||||
_, err := SetCredentials(sys, key, username, password)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAllCredentials returns the registry credentials for all registries stored
|
||||
// in either the auth.json file or the docker/config.json.
|
||||
// in any of the configured credential helpers.
|
||||
func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
|
||||
// Note: we need to read the auth files in the inverse order to prevent
|
||||
// a priority inversion when writing to the map.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
paths := getAuthFilePaths(sys, homedir.Get())
|
||||
for i := len(paths) - 1; i >= 0; i-- {
|
||||
path := paths[i]
|
||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading JSON file %q", path.path)
|
||||
}
|
||||
// To keep things simple, let's first extract all registries from all
|
||||
// possible sources, and then call `GetCredentials` on them. That
|
||||
// prevents us from having to reverse engineer the logic in
|
||||
// `GetCredentials`.
|
||||
allKeys := make(map[string]bool)
|
||||
addKey := func(s string) {
|
||||
allKeys[s] = true
|
||||
}
|
||||
|
||||
for registry, data := range auths.AuthConfigs {
|
||||
conf, err := decodeDockerAuth(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authConfigs[normalizeRegistry(registry)] = conf
|
||||
}
|
||||
// To use GetCredentials, we must at least convert the URL forms into host names.
|
||||
// While we're at it, we’ll also canonicalize docker.io to the standard format.
|
||||
normalizedDockerIORegistry := normalizeRegistry("docker.io")
|
||||
|
||||
// Credential helpers may override credentials from the auth file.
|
||||
for registry, credHelper := range auths.CredHelpers {
|
||||
username, password, err := getAuthFromCredHelper(credHelper, registry)
|
||||
if err != nil {
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
continue
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, helper := range helpers {
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
|
||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reading JSON file %q", path.path)
|
||||
}
|
||||
// Credential helpers in the auth file have a
|
||||
// direct mapping to a registry, so we can just
|
||||
// walk the map.
|
||||
for registry := range auths.CredHelpers {
|
||||
addKey(registry)
|
||||
}
|
||||
for key := range auths.AuthConfigs {
|
||||
key := normalizeAuthFileKey(key, path.legacyFormat)
|
||||
if key == normalizedDockerIORegistry {
|
||||
key = "docker.io"
|
||||
}
|
||||
addKey(key)
|
||||
}
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err := listAuthsFromCredHelper(helper)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
|
||||
}
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
for registry := range creds {
|
||||
addKey(registry)
|
||||
}
|
||||
case exec.ErrNotFound:
|
||||
// It's okay if the helper doesn't exist.
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := types.DockerAuthConfig{Username: username, Password: password}
|
||||
authConfigs[normalizeRegistry(registry)] = conf
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(keyring): if we ever re-enable the keyring support, we had to
|
||||
// query all credentials from the keyring here.
|
||||
// Now use `GetCredentials` to the specific auth configs for each
|
||||
// previously listed registry.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
for key := range allKeys {
|
||||
authConf, err := GetCredentials(sys, key)
|
||||
if err != nil {
|
||||
// Note: we rely on the logging in `GetCredentials`.
|
||||
return nil, err
|
||||
}
|
||||
if authConf != (types.DockerAuthConfig{}) {
|
||||
authConfigs[key] = authConf
|
||||
}
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
}
|
||||
|
@ -159,65 +236,124 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
|
|||
return paths
|
||||
}
|
||||
|
||||
// GetCredentials returns the registry credentials stored in either auth.json
|
||||
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
|
||||
// GetCredentials returns the registry credentials matching key, appropriate for
|
||||
// sys and the users’ configuration.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
|
||||
return getCredentialsWithHomeDir(sys, registry, homedir.Get())
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname.
|
||||
//
|
||||
// GetCredentialsForRef should almost always be used in favor of this API.
|
||||
func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) {
|
||||
return getCredentialsWithHomeDir(sys, key, homedir.Get())
|
||||
}
|
||||
|
||||
// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials,
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) {
|
||||
// GetCredentialsForRef returns the registry credentials necessary for
|
||||
// accessing ref on the registry ref points to,
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) {
|
||||
return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get())
|
||||
}
|
||||
|
||||
// getCredentialsWithHomeDir is an internal implementation detail of
|
||||
// GetCredentialsForRef and GetCredentials. It exists only to allow testing it
|
||||
// with an artificial home directory.
|
||||
func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) {
|
||||
_, err := validateKey(key)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
if sys != nil && sys.DockerAuthConfig != nil {
|
||||
logrus.Debug("Returning credentials from DockerAuthConfig")
|
||||
logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key)
|
||||
return *sys.DockerAuthConfig, nil
|
||||
}
|
||||
|
||||
if enableKeyring {
|
||||
username, password, err := getAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debug("returning credentials from kernel keyring")
|
||||
return types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
}
|
||||
var registry string // We compute this once because it is used in several places.
|
||||
if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 {
|
||||
registry = key[:firstSlash]
|
||||
} else {
|
||||
registry = key
|
||||
}
|
||||
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
|
||||
// Anonymous function to query credentials from auth files.
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, "", err
|
||||
}
|
||||
|
||||
if authConfig != (types.DockerAuthConfig{}) {
|
||||
return authConfig, path.path, nil
|
||||
}
|
||||
}
|
||||
return types.DockerAuthConfig{}, "", nil
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var (
|
||||
creds types.DockerAuthConfig
|
||||
helperKey string
|
||||
credHelperPath string
|
||||
err error
|
||||
)
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
helperKey = key
|
||||
creds, credHelperPath, err = getCredentialsFromAuthFiles()
|
||||
// External helpers.
|
||||
default:
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers, but a "registry" is a valid parent of "key".
|
||||
helperKey = registry
|
||||
creds, err = getAuthFromCredHelper(helper, registry)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Credentials not found")
|
||||
return types.DockerAuthConfig{}, err
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
|
||||
logrus.Debugf("Returning credentials from %s", path.path)
|
||||
return authConfig, nil
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper)
|
||||
if credHelperPath != "" {
|
||||
msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
|
||||
}
|
||||
logrus.Debug(msg)
|
||||
return creds, nil
|
||||
}
|
||||
}
|
||||
if multiErr != nil {
|
||||
return types.DockerAuthConfig{}, multiErr
|
||||
}
|
||||
|
||||
logrus.Debugf("Credentials not found")
|
||||
logrus.Debugf("No credentials for %s found", key)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
// GetAuthentication returns the registry credentials stored in
|
||||
// either auth.json file or .docker/config.json
|
||||
// If an entry is not found empty strings are returned for the username and password
|
||||
// GetAuthentication returns the registry credentials matching key, appropriate for
|
||||
// sys and the users’ configuration.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname.
|
||||
//
|
||||
// Deprecated: This API only has support for username and password. To get the
|
||||
// support for oauth2 in docker registry authentication, we added the new
|
||||
// GetCredentials API. The new API should be used and this API is kept to
|
||||
// support for oauth2 in container registry authentication, we added the new
|
||||
// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to
|
||||
// maintain backward compatibility.
|
||||
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
|
||||
return getAuthenticationWithHomeDir(sys, registry, homedir.Get())
|
||||
func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) {
|
||||
return getAuthenticationWithHomeDir(sys, key, homedir.Get())
|
||||
}
|
||||
|
||||
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
|
||||
auth, err := getCredentialsWithHomeDir(sys, registry, homeDir)
|
||||
func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
|
||||
auth, err := getCredentialsWithHomeDir(sys, key, homeDir)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -227,50 +363,138 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st
|
|||
return auth.Username, auth.Password, nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication deletes the credentials stored in auth.json
|
||||
func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
// First try cred helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
return false, deleteAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
// RemoveAuthentication removes credentials for `key` from all possible
|
||||
// sources such as credential helpers and auth files.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
||||
isNamespaced, err := validateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next if keyring is enabled try kernel keyring
|
||||
if enableKeyring {
|
||||
err := deleteAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry)
|
||||
return false, nil
|
||||
}
|
||||
logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles")
|
||||
}
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := auths.AuthConfigs[registry]; ok {
|
||||
delete(auths.AuthConfigs, registry)
|
||||
} else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok {
|
||||
delete(auths.AuthConfigs, normalizeRegistry(registry))
|
||||
var multiErr error
|
||||
isLoggedIn := false
|
||||
|
||||
removeFromCredHelper := func(helper string) {
|
||||
if isNamespaced {
|
||||
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||
return
|
||||
} else {
|
||||
return false, ErrNotLoggedIn
|
||||
err := deleteAuthFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||
return
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper))
|
||||
}
|
||||
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if innerHelper, exists := auths.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := auths.AuthConfigs[key]; ok {
|
||||
isLoggedIn = true
|
||||
delete(auths.AuthConfigs, key)
|
||||
}
|
||||
return true, multiErr
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
removeFromCredHelper(helper)
|
||||
}
|
||||
}
|
||||
|
||||
if multiErr != nil {
|
||||
return multiErr
|
||||
}
|
||||
if !isLoggedIn {
|
||||
return ErrNotLoggedIn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring
|
||||
// RemoveAllAuthentication deletes all the credentials stored in credential
|
||||
// helpers and auth files.
|
||||
func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if enableKeyring {
|
||||
err := removeAllAuthFromKernelKeyring()
|
||||
if err == nil {
|
||||
logrus.Debugf("removing all credentials from kernel keyring")
|
||||
return false, nil
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
for registry, helper := range auths.CredHelpers {
|
||||
// Helpers in auth files are expected
|
||||
// to exist, so no special treatment
|
||||
// for them.
|
||||
if err := deleteAuthFromCredHelper(helper, registry); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
var creds map[string]string
|
||||
creds, err = listAuthsFromCredHelper(helper)
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
for registry := range creds {
|
||||
err = deleteAuthFromCredHelper(helper, registry)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
case exec.ErrNotFound:
|
||||
// It's okay if the helper doesn't exist.
|
||||
continue
|
||||
default:
|
||||
// fall through
|
||||
}
|
||||
logrus.Debugf("error removing credentials from kernel keyring")
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("All credentials removed from credential helper %s", helper)
|
||||
}
|
||||
|
||||
return multiErr
|
||||
}
|
||||
|
||||
func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.List(p)
|
||||
}
|
||||
|
||||
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
||||
|
@ -330,13 +554,13 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
|||
|
||||
if legacyFormat {
|
||||
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
|
||||
return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
|
||||
return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
|
||||
}
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(raw, &auths); err != nil {
|
||||
return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
|
||||
return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
|
||||
}
|
||||
|
||||
if auths.AuthConfigs == nil {
|
||||
|
@ -349,52 +573,69 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
|||
return auths, nil
|
||||
}
|
||||
|
||||
// modifyJSON writes to auth.json if the dockerConfigFile has been updated
|
||||
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error {
|
||||
// modifyJSON finds an auth.json file, calls editor on the contents, and
|
||||
// writes it back if editor returns true.
|
||||
// Returns a human-redable description of the file, to be returned by SetCredentials.
|
||||
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) (string, error) {
|
||||
path, legacyFormat, err := getPathToAuth(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
if legacyFormat {
|
||||
return fmt.Errorf("writes to %s using legacy format are not supported", path)
|
||||
return "", fmt.Errorf("writes to %s using legacy format are not supported", path)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if err = os.MkdirAll(dir, 0700); err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
auths, err := readJSONFile(path, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading JSON file %q", path)
|
||||
return "", errors.Wrapf(err, "reading JSON file %q", path)
|
||||
}
|
||||
|
||||
updated, err := editor(&auths)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error updating %q", path)
|
||||
return "", errors.Wrapf(err, "updating %q", path)
|
||||
}
|
||||
if updated {
|
||||
newData, err := json.MarshalIndent(auths, "", "\t")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error marshaling JSON %q", path)
|
||||
return "", errors.Wrapf(err, "marshaling JSON %q", path)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(path, newData, 0600); err != nil {
|
||||
return errors.Wrapf(err, "error writing to file %q", path)
|
||||
return "", errors.Wrapf(err, "writing to file %q", path)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func getAuthFromCredHelper(credHelper, registry string) (string, string, error) {
|
||||
func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds, err := helperclient.Get(p, registry)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper)
|
||||
err = nil
|
||||
}
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
switch creds.Username {
|
||||
case "<token>":
|
||||
return types.DockerAuthConfig{
|
||||
IdentityToken: creds.Secret,
|
||||
}, nil
|
||||
default:
|
||||
return types.DockerAuthConfig{
|
||||
Username: creds.Username,
|
||||
Password: creds.Secret,
|
||||
}, nil
|
||||
}
|
||||
return creds.Username, creds.Secret, nil
|
||||
}
|
||||
|
||||
func setAuthToCredHelper(credHelper, registry, username, password string) error {
|
||||
|
@ -414,45 +655,82 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
|
|||
return helperclient.Erase(p, registry)
|
||||
}
|
||||
|
||||
// findAuthentication looks for auth of registry in path
|
||||
func findAuthentication(registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
|
||||
// findCredentialsInFile looks for credentials matching "key"
|
||||
// (which is "registry" or a namespace in "registry") in "path".
|
||||
func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
|
||||
auths, err := readJSONFile(path, legacyFormat)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, errors.Wrapf(err, "error reading JSON file %q", path)
|
||||
return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path)
|
||||
}
|
||||
|
||||
// First try cred helpers. They should always be normalized.
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
username, password, err := getAuthFromCredHelper(ch, registry)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
return types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path)
|
||||
return getAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
// I'm feeling lucky
|
||||
if val, exists := auths.AuthConfigs[registry]; exists {
|
||||
return decodeDockerAuth(val)
|
||||
// Support sub-registry namespaces in auth.
|
||||
// (This is not a feature of ~/.docker/config.json; we support it even for
|
||||
// those files as an extension.)
|
||||
var keys []string
|
||||
if !legacyFormat {
|
||||
keys = authKeysForKey(key)
|
||||
} else {
|
||||
keys = []string{registry}
|
||||
}
|
||||
|
||||
// Repo or namespace keys are only supported as exact matches. For registry
|
||||
// keys we prefer exact matches as well.
|
||||
for _, key := range keys {
|
||||
if val, exists := auths.AuthConfigs[key]; exists {
|
||||
return decodeDockerAuth(val)
|
||||
}
|
||||
}
|
||||
|
||||
// bad luck; let's normalize the entries first
|
||||
// This primarily happens for legacyFormat, which for a time used API URLs
|
||||
// (http[s:]//…/v1/) as keys.
|
||||
// Secondarily, (docker login) accepted URLs with no normalization for
|
||||
// several years, and matched registry hostnames against that, so support
|
||||
// those entries even in non-legacyFormat ~/.docker/config.json.
|
||||
// The docker.io registry still uses the /v1/ key with a special host name,
|
||||
// so account for that as well.
|
||||
registry = normalizeRegistry(registry)
|
||||
normalizedAuths := map[string]dockerAuthConfig{}
|
||||
for k, v := range auths.AuthConfigs {
|
||||
normalizedAuths[normalizeRegistry(k)] = v
|
||||
}
|
||||
|
||||
if val, exists := normalizedAuths[registry]; exists {
|
||||
return decodeDockerAuth(val)
|
||||
if normalizeAuthFileKey(k, legacyFormat) == registry {
|
||||
return decodeDockerAuth(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Only log this if we found nothing; getCredentialsWithHomeDir logs the
|
||||
// source of found data.
|
||||
logrus.Debugf("No credentials matching %s found in %s", key, path)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
// authKeysForKey returns the keys matching a provided auth file key, in order
|
||||
// from the best match to worst. For example,
|
||||
// when given a repository key "quay.io/repo/ns/image", it returns
|
||||
// - quay.io/repo/ns/image
|
||||
// - quay.io/repo/ns
|
||||
// - quay.io/repo
|
||||
// - quay.io
|
||||
func authKeysForKey(key string) (res []string) {
|
||||
for {
|
||||
res = append(res, key)
|
||||
|
||||
lastSlash := strings.LastIndex(key, "/")
|
||||
if lastSlash == -1 {
|
||||
break
|
||||
}
|
||||
key = key[:lastSlash]
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// decodeDockerAuth decodes the username and password, which is
|
||||
// encoded in base64.
|
||||
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
|
@ -476,27 +754,49 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// convertToHostname converts a registry url which has http|https prepended
|
||||
// to just an hostname.
|
||||
// Copied from github.com/docker/docker/registry/auth.go
|
||||
func convertToHostname(url string) string {
|
||||
stripped := url
|
||||
if strings.HasPrefix(url, "http://") {
|
||||
stripped = strings.TrimPrefix(url, "http://")
|
||||
} else if strings.HasPrefix(url, "https://") {
|
||||
stripped = strings.TrimPrefix(url, "https://")
|
||||
// normalizeAuthFileKey takes a key, converts it to a host name and normalizes
|
||||
// the resulting registry.
|
||||
func normalizeAuthFileKey(key string, legacyFormat bool) string {
|
||||
stripped := strings.TrimPrefix(key, "http://")
|
||||
stripped = strings.TrimPrefix(stripped, "https://")
|
||||
|
||||
if legacyFormat || stripped != key {
|
||||
stripped = strings.SplitN(stripped, "/", 2)[0]
|
||||
}
|
||||
|
||||
nameParts := strings.SplitN(stripped, "/", 2)
|
||||
|
||||
return nameParts[0]
|
||||
return normalizeRegistry(stripped)
|
||||
}
|
||||
|
||||
// normalizeRegistry converts the provided registry if a known docker.io host
|
||||
// is provided.
|
||||
func normalizeRegistry(registry string) string {
|
||||
normalized := convertToHostname(registry)
|
||||
switch normalized {
|
||||
switch registry {
|
||||
case "registry-1.docker.io", "docker.io":
|
||||
return "index.docker.io"
|
||||
}
|
||||
return normalized
|
||||
return registry
|
||||
}
|
||||
|
||||
// validateKey verifies that the input key does not have a prefix that is not
|
||||
// allowed and returns an indicator if the key is namespaced.
|
||||
func validateKey(key string) (bool, error) {
|
||||
if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
|
||||
return false, errors.Errorf("key %s contains http[s]:// prefix", key)
|
||||
}
|
||||
|
||||
// Ideally this should only accept explicitly valid keys, compare
|
||||
// validateIdentityRemappingPrefix. For now, just reject values that look
|
||||
// like tagged or digested values.
|
||||
if strings.ContainsRune(key, '@') {
|
||||
return false, fmt.Errorf(`key %s contains a '@' character`, key)
|
||||
}
|
||||
|
||||
firstSlash := strings.IndexRune(key, '/')
|
||||
isNamespaced := firstSlash != -1
|
||||
// Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo.
|
||||
if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') {
|
||||
return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key)
|
||||
}
|
||||
// check if the provided key contains one or more subpaths.
|
||||
return isNamespaced, nil
|
||||
}
|
||||
|
|
|
@ -9,9 +9,13 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const keyDescribePrefix = "container-registry-login:"
|
||||
// NOTE: none of the functions here are currently used. If we ever want to
|
||||
// re-enable keyring support, we should introduce a similar built-in credential
|
||||
// helpers as for `sysregistriesv2.AuthenticationFileHelper`.
|
||||
|
||||
func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
||||
const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused
|
||||
|
||||
func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused
|
||||
userkeyring, err := keyctl.UserKeyring()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
@ -31,7 +35,7 @@ func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
|||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
func deleteAuthFromKernelKeyring(registry string) error {
|
||||
func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused
|
||||
userkeyring, err := keyctl.UserKeyring()
|
||||
|
||||
if err != nil {
|
||||
|
@ -44,7 +48,7 @@ func deleteAuthFromKernelKeyring(registry string) error {
|
|||
return key.Unlink()
|
||||
}
|
||||
|
||||
func removeAllAuthFromKernelKeyring() error {
|
||||
func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
|
||||
keys, err := keyctl.ReadUserKeyring()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -69,7 +73,7 @@ func removeAllAuthFromKernelKeyring() error {
|
|||
if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
|
||||
err := keyctl.Unlink(userkeyring, k)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error unlinking key %d", k.ID())
|
||||
return errors.Wrapf(err, "unlinking key %d", k.ID())
|
||||
}
|
||||
logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr)
|
||||
}
|
||||
|
@ -77,7 +81,7 @@ func removeAllAuthFromKernelKeyring() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func setAuthToKernelKeyring(registry, username, password string) error {
|
||||
func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused
|
||||
keyring, err := keyctl.SessionKeyring()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -96,20 +100,20 @@ func setAuthToKernelKeyring(registry, username, password string) error {
|
|||
// link the key to userKeyring
|
||||
userKeyring, err := keyctl.UserKeyring()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting user keyring")
|
||||
return errors.Wrapf(err, "getting user keyring")
|
||||
}
|
||||
err = keyctl.Link(userKeyring, id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error linking the key to user keyring")
|
||||
return errors.Wrapf(err, "linking the key to user keyring")
|
||||
}
|
||||
// unlink the key from session keyring
|
||||
err = keyctl.Unlink(keyring, id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error unlinking the key from session keyring")
|
||||
return errors.Wrapf(err, "unlinking the key from session keyring")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func genDescription(registry string) string {
|
||||
func genDescription(registry string) string { //nolint:deadcode,unused
|
||||
return fmt.Sprintf("%s%s", keyDescribePrefix, registry)
|
||||
}
|
||||
|
|
|
@ -1,20 +1,21 @@
|
|||
//go:build !linux && (!386 || !amd64)
|
||||
// +build !linux
|
||||
// +build !386 !amd64
|
||||
|
||||
package config
|
||||
|
||||
func getAuthFromKernelKeyring(registry string) (string, string, error) {
|
||||
func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused
|
||||
return "", "", ErrNotSupported
|
||||
}
|
||||
|
||||
func deleteAuthFromKernelKeyring(registry string) error {
|
||||
func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func setAuthToKernelKeyring(registry, username, password string) error {
|
||||
func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func removeAllAuthFromKernelKeyring() error {
|
||||
func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
|
347
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
Normal file
347
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
Normal file
|
@ -0,0 +1,347 @@
|
|||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// defaultShortNameMode is the default mode of registries.conf files if the
|
||||
// corresponding field is left empty.
|
||||
const defaultShortNameMode = types.ShortNameModePermissive
|
||||
|
||||
// userShortNamesFile is the user-specific config file to store aliases.
|
||||
var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf")
|
||||
|
||||
// shortNameAliasesConfPath returns the path to the machine-generated
|
||||
// short-name-aliases.conf file.
|
||||
func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) {
|
||||
if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 {
|
||||
return ctx.UserShortNameAliasConfPath, nil
|
||||
}
|
||||
|
||||
if rootless.GetRootlessEUID() == 0 {
|
||||
// Root user or in a non-conforming user NS
|
||||
return filepath.Join("/var/cache", userShortNamesFile), nil
|
||||
}
|
||||
|
||||
// Rootless user
|
||||
cacheRoot, err := homedir.GetCacheHome()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(cacheRoot, userShortNamesFile), nil
|
||||
}
|
||||
|
||||
// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the
|
||||
// software-maintained `userShortNamesFile`.
|
||||
type shortNameAliasConf struct {
|
||||
// A map for aliasing short names to their fully-qualified image
|
||||
// reference counter parts.
|
||||
// Note that Aliases is niled after being loaded from a file.
|
||||
Aliases map[string]string `toml:"aliases"`
|
||||
|
||||
// If you add any field, make sure to update nonempty() below.
|
||||
}
|
||||
|
||||
// nonempty returns true if config contains at least one configuration entry.
|
||||
func (c *shortNameAliasConf) nonempty() bool {
|
||||
copy := *c // A shallow copy
|
||||
if copy.Aliases != nil && len(copy.Aliases) == 0 {
|
||||
copy.Aliases = nil
|
||||
}
|
||||
return !reflect.DeepEqual(copy, shortNameAliasConf{})
|
||||
}
|
||||
|
||||
// alias combines the parsed value of an alias with the config file it has been
|
||||
// specified in. The config file is crucial for an improved user experience
|
||||
// such that users are able to resolve potential pull errors.
|
||||
type alias struct {
|
||||
// The parsed value of an alias. May be nil if set to "" in a config.
|
||||
value reference.Named
|
||||
// The config file the alias originates from.
|
||||
configOrigin string
|
||||
}
|
||||
|
||||
// shortNameAliasCache is the result of parsing shortNameAliasConf,
|
||||
// pre-processed for faster usage.
|
||||
type shortNameAliasCache struct {
|
||||
// Note that an alias value may be nil iff it's set as an empty string
|
||||
// in the config.
|
||||
namedAliases map[string]alias
|
||||
}
|
||||
|
||||
// ResolveShortNameAlias performs an alias resolution of the specified name.
|
||||
// The user-specific short-name-aliases.conf has precedence over aliases in the
|
||||
// assembled registries.conf. It returns the possibly resolved alias or nil, a
|
||||
// human-readable description of the config where the alias is specified, and
|
||||
// an error. The origin of the config file is crucial for an improved user
|
||||
// experience such that users are able to resolve potential pull errors.
|
||||
// Almost all callers should use pkg/shortnames instead.
|
||||
//
|
||||
// Note that it’s the caller’s responsibility to pass only a repository
|
||||
// (reference.IsNameOnly) as the short name.
|
||||
func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) {
|
||||
if err := validateShortName(name); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// Acquire the lock as a reader to allow for multiple routines in the
|
||||
// same process space to read simultaneously.
|
||||
lock.RLock()
|
||||
defer lock.Unlock()
|
||||
|
||||
_, aliasCache, err := loadShortNameAliasConf(confPath)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// First look up the short-name-aliases.conf. Note that a value may be
|
||||
// nil iff it's set as an empty string in the config.
|
||||
alias, resolved := aliasCache.namedAliases[name]
|
||||
if resolved {
|
||||
return alias.value, alias.configOrigin, nil
|
||||
}
|
||||
|
||||
config, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
alias, resolved = config.aliasCache.namedAliases[name]
|
||||
if resolved {
|
||||
return alias.value, alias.configOrigin, nil
|
||||
}
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// editShortNameAlias loads the aliases.conf file and changes it. If value is
|
||||
// set, it adds the name-value pair as a new alias. Otherwise, it will remove
|
||||
// name from the config.
|
||||
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
|
||||
if err := validateShortName(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if value != nil {
|
||||
if _, err := parseShortNameValue(*value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Acquire the lock as a writer to prevent data corruption.
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
// Load the short-name-alias.conf, add the specified name-value pair,
|
||||
// and write it back to the file.
|
||||
conf, _, err := loadShortNameAliasConf(confPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if conf.Aliases == nil { // Ensure we have a map to update.
|
||||
conf.Aliases = make(map[string]string)
|
||||
}
|
||||
if value != nil {
|
||||
conf.Aliases[name] = *value
|
||||
} else {
|
||||
// If the name does not exist, throw an error.
|
||||
if _, exists := conf.Aliases[name]; !exists {
|
||||
return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
|
||||
}
|
||||
|
||||
delete(conf.Aliases, name)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
encoder := toml.NewEncoder(f)
|
||||
return encoder.Encode(conf)
|
||||
}
|
||||
|
||||
// AddShortNameAlias adds the specified name-value pair as a new alias to the
|
||||
// user-specific aliases.conf. It may override an existing alias for `name`.
|
||||
//
|
||||
// Note that it’s the caller’s responsibility to pass only a repository
|
||||
// (reference.IsNameOnly) as the short name.
|
||||
func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error {
|
||||
return editShortNameAlias(ctx, name, &value)
|
||||
}
|
||||
|
||||
// RemoveShortNameAlias clears the alias for the specified name. It throws an
|
||||
// error in case name does not exist in the machine-generated
|
||||
// short-name-alias.conf. In such case, the alias must be specified in one of
|
||||
// the registries.conf files, which is the users' responsibility.
|
||||
//
|
||||
// Note that it’s the caller’s responsibility to pass only a repository
|
||||
// (reference.IsNameOnly) as the short name.
|
||||
func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
|
||||
return editShortNameAlias(ctx, name, nil)
|
||||
}
|
||||
|
||||
// parseShortNameValue parses the specified alias into a reference.Named. The alias is
|
||||
// expected to not be tagged or carry a digest and *must* include a
|
||||
// domain/registry.
|
||||
//
|
||||
// Note that the returned reference is always normalized.
|
||||
func parseShortNameValue(alias string) (reference.Named, error) {
|
||||
ref, err := reference.Parse(alias)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing alias %q", alias)
|
||||
}
|
||||
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
return nil, errors.Errorf("invalid alias %q: must not contain digest", alias)
|
||||
}
|
||||
|
||||
if _, ok := ref.(reference.Tagged); ok {
|
||||
return nil, errors.Errorf("invalid alias %q: must not contain tag", alias)
|
||||
}
|
||||
|
||||
named, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
|
||||
}
|
||||
|
||||
registry := reference.Domain(named)
|
||||
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
|
||||
return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
|
||||
}
|
||||
|
||||
// A final parse to make sure that docker.io references are correctly
|
||||
// normalized (e.g., docker.io/alpine to docker.io/library/alpine.
|
||||
named, err = reference.ParseNormalizedNamed(alias)
|
||||
return named, err
|
||||
}
|
||||
|
||||
// validateShortName parses the specified `name` of an alias (i.e., the left-hand
|
||||
// side) and checks if it's a short name and does not include a tag or digest.
|
||||
func validateShortName(name string) error {
|
||||
repo, err := reference.Parse(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot parse short name: %q", name)
|
||||
}
|
||||
|
||||
if _, ok := repo.(reference.Digested); ok {
|
||||
return errors.Errorf("invalid short name %q: must not contain digest", name)
|
||||
}
|
||||
|
||||
if _, ok := repo.(reference.Tagged); ok {
|
||||
return errors.Errorf("invalid short name %q: must not contain tag", name)
|
||||
}
|
||||
|
||||
named, ok := repo.(reference.Named)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid short name %q: no name", name)
|
||||
}
|
||||
|
||||
registry := reference.Domain(named)
|
||||
if strings.ContainsAny(registry, ".:") || registry == "localhost" {
|
||||
return errors.Errorf("invalid short name %q: must not contain registry", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal
|
||||
// representation.
|
||||
func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) {
|
||||
res := shortNameAliasCache{
|
||||
namedAliases: make(map[string]alias),
|
||||
}
|
||||
errs := []error{}
|
||||
for name, value := range conf.Aliases {
|
||||
if err := validateShortName(name); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Empty right-hand side values in config files allow to reset
|
||||
// an alias in a previously loaded config. This way, drop-in
|
||||
// config files from registries.conf.d can reset potentially
|
||||
// malconfigured aliases.
|
||||
if value == "" {
|
||||
res.namedAliases[name] = alias{nil, path}
|
||||
continue
|
||||
}
|
||||
|
||||
named, err := parseShortNameValue(value)
|
||||
if err != nil {
|
||||
// We want to report *all* malformed entries to avoid a
|
||||
// whack-a-mole for the user.
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
res.namedAliases[name] = alias{named, path}
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
err := errs[0]
|
||||
for i := 1; i < len(errs); i++ {
|
||||
err = errors.Wrapf(err, "%v\n", errs[i])
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// updateWithConfigurationFrom updates c with configuration from updates.
|
||||
// In case of conflict, updates is preferred.
|
||||
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
|
||||
for name, value := range updates.namedAliases {
|
||||
c.namedAliases[name] = value
|
||||
}
|
||||
}
|
||||
|
||||
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
|
||||
conf := shortNameAliasConf{}
|
||||
|
||||
_, err := toml.DecodeFile(confPath, &conf)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// It's okay if the config doesn't exist. Other errors are not.
|
||||
return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
|
||||
}
|
||||
|
||||
// Even if we don’t always need the cache, doing so validates the machine-generated config. The
|
||||
// file could still be corrupted by another process or user.
|
||||
cache, err := newShortNameAliasCache(confPath, &conf)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
|
||||
}
|
||||
|
||||
return &conf, cache, nil
|
||||
}
|
||||
|
||||
func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) {
|
||||
shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
// Make sure the path to file exists.
|
||||
if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
lockPath := shortNameAliasesConfPath + ".lock"
|
||||
locker, err := lockfile.GetLockfile(lockPath)
|
||||
return shortNameAliasesConfPath, locker, err
|
||||
}
|
1000
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
1000
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -147,7 +147,7 @@ type BlobInfo struct {
|
|||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest).
|
||||
// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
|
@ -179,7 +179,7 @@ type BICReplacementCandidate struct {
|
|||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
|
@ -219,7 +219,7 @@ type BlobInfoCache interface {
|
|||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
|
||||
|
@ -299,7 +299,7 @@ type ImageDestination interface {
|
|||
IgnoresEmbeddedDockerReference() bool
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
|
@ -334,6 +334,9 @@ type ImageDestination interface {
|
|||
// MUST be called after PutManifest (signatures may reference manifest contents).
|
||||
PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
|
@ -558,6 +561,11 @@ type SystemContext struct {
|
|||
UserShortNameAliasConfPath string
|
||||
// If set, short-name resolution in pkg/shortnames must follow the specified mode
|
||||
ShortNameMode *ShortNameMode
|
||||
// If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and
|
||||
// short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce
|
||||
// resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
|
||||
// specific context.
|
||||
PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
|
||||
// If not "", overrides the default path for the authentication file, but only new format files
|
||||
AuthFilePath string
|
||||
// if not "", overrides the default path for the authentication file, but with the legacy format;
|
||||
|
@ -582,7 +590,7 @@ type SystemContext struct {
|
|||
|
||||
// === OCI.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
// a client certificate (ending with ".cert") and a client ceritificate key
|
||||
// a client certificate (ending with ".cert") and a client certificate key
|
||||
// (ending with ".key") used when downloading OCI image layers.
|
||||
OCICertPath string
|
||||
// Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||
|
@ -594,13 +602,13 @@ type SystemContext struct {
|
|||
|
||||
// === docker.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
// a client certificate (ending with ".cert") and a client ceritificate key
|
||||
// (ending with ".key") used when talking to a Docker Registry.
|
||||
// a client certificate (ending with ".cert") and a client certificate key
|
||||
// (ending with ".key") used when talking to a container registry.
|
||||
DockerCertPath string
|
||||
// If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
|
||||
// Ignored if DockerCertPath is non-empty.
|
||||
DockerPerHostCertDirPath string
|
||||
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||
// Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||
DockerInsecureSkipTLSVerify OptionalBool
|
||||
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
|
||||
// Ignored if DockerBearerRegistryToken is non-empty.
|
||||
|
@ -619,6 +627,10 @@ type SystemContext struct {
|
|||
DockerLogMirrorChoice bool
|
||||
// Directory to use for OSTree temporary files
|
||||
OSTreeTmpDirPath string
|
||||
// If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
|
||||
// Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
|
||||
// when the digest for a blob is unknown.
|
||||
DockerRegistryPushPrecomputeDigests bool
|
||||
|
||||
// === docker/daemon.Transport overrides ===
|
||||
// A directory containing a CA certificate (ending with ".crt"),
|
||||
|
@ -633,6 +645,8 @@ type SystemContext struct {
|
|||
// === dir.Transport overrides ===
|
||||
// DirForceCompress compresses the image layers if set to true
|
||||
DirForceCompress bool
|
||||
// DirForceDecompress decompresses the image layers if set to true
|
||||
DirForceDecompress bool
|
||||
|
||||
// CompressionFormat is the format to use for the compression of the blobs
|
||||
CompressionFormat *compression.Algorithm
|
||||
|
|
|
@ -703,6 +703,7 @@ Joost Cassee <joost@cassee.net>
|
|||
Jordan <jjn2009@users.noreply.github.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Jordan Sissel <jls@semicomplete.com>
|
||||
Jordan Williams <jordan@jwillikers.com>
|
||||
Jose Diaz-Gonzalez <josegonzalez@users.noreply.github.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Joseph Hager <ajhager@gmail.com>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue