Compare commits

..

No commits in common. "master" and "v0.26.0-rc1" have entirely different histories.

207 changed files with 2806 additions and 16339 deletions

View File

@ -121,7 +121,7 @@ jobs:
fi
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
-
@ -191,10 +191,10 @@ jobs:
git config --global core.eol lf
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: "${{ env.GO_VERSION }}"
-
@ -274,7 +274,7 @@ jobs:
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Cache Vagrant boxes
uses: actions/cache@v4
@ -353,7 +353,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Create matrix
id: platforms
@ -380,7 +380,7 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -425,7 +425,7 @@ jobs:
swap-storage: true
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -513,10 +513,10 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Download binaries
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
path: ${{ env.DESTDIR }}
pattern: buildx-*

View File

@ -29,10 +29,10 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-

View File

@ -33,7 +33,7 @@ jobs:
steps:
-
name: Checkout docs repo
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
repository: docker/docs

View File

@ -111,14 +111,14 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
-
name: Install buildx
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: binary
path: /home/runner/.docker/cli-plugins
@ -214,7 +214,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v3
@ -230,7 +230,7 @@ jobs:
uses: docker/setup-qemu-action@v3
-
name: Install buildx
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: binary
path: /home/runner/.docker/cli-plugins

View File

@ -27,6 +27,6 @@ jobs:
steps:
-
name: Run
uses: actions/labeler@v6
uses: actions/labeler@v5
with:
sync-labels: true

View File

@ -33,20 +33,51 @@ jobs:
prepare:
runs-on: ubuntu-24.04
outputs:
includes: ${{ steps.generate.outputs.matrix }}
includes: ${{ steps.matrix.outputs.includes }}
steps:
-
name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
-
name: Generate matrix
id: generate
uses: docker/bake-action/subaction/matrix@v6
name: Matrix
id: matrix
uses: actions/github-script@v7
with:
target: validate
fields: platforms
env:
GOLANGCI_LINT_MULTIPLATFORM: ${{ github.repository == 'docker/buildx' && '1' || '' }}
script: |
let def = {};
await core.group(`Parsing definition`, async () => {
const printEnv = Object.assign({}, process.env, {
GOLANGCI_LINT_MULTIPLATFORM: process.env.GITHUB_REPOSITORY === 'docker/buildx' ? '1' : ''
});
const resPrint = await exec.getExecOutput('docker', ['buildx', 'bake', 'validate', '--print'], {
ignoreReturnCode: true,
env: printEnv
});
if (resPrint.stderr.length > 0 && resPrint.exitCode != 0) {
throw new Error(res.stderr);
}
def = JSON.parse(resPrint.stdout.trim());
});
await core.group(`Generating matrix`, async () => {
const includes = [];
for (const targetName of Object.keys(def.target)) {
const target = def.target[targetName];
if (target.platforms && target.platforms.length > 0) {
target.platforms.forEach(platform => {
includes.push({
target: targetName,
platform: platform
});
});
} else {
includes.push({
target: targetName
});
}
}
core.info(JSON.stringify(includes, null, 2));
core.setOutput('includes', JSON.stringify(includes));
});
validate:
runs-on: ubuntu-24.04
@ -57,6 +88,12 @@ jobs:
matrix:
include: ${{ fromJson(needs.prepare.outputs.includes) }}
steps:
-
name: Prepare
run: |
if [ "$GITHUB_REPOSITORY" = "docker/buildx" ]; then
echo "GOLANGCI_LINT_MULTIPLATFORM=1" >> $GITHUB_ENV
fi
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@ -70,4 +107,4 @@ jobs:
with:
targets: ${{ matrix.target }}
set: |
*.platform=${{ matrix.platforms }}
*.platform=${{ matrix.platform }}

View File

@ -5,14 +5,13 @@ ARG ALPINE_VERSION=3.22
ARG XX_VERSION=1.6.1
# for testing
ARG DOCKER_VERSION=28.4
ARG DOCKER_VERSION=28.3.0
ARG DOCKER_VERSION_ALT_27=27.5.1
ARG DOCKER_VERSION_ALT_26=26.1.3
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
ARG GOTESTSUM_VERSION=v1.12.0
ARG REGISTRY_VERSION=3.0.0
ARG BUILDKIT_VERSION=v0.23.2
ARG COMPOSE_VERSION=v2.39.1
ARG UNDOCK_VERSION=0.9.0
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
@ -25,7 +24,6 @@ FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
FROM registry:$REGISTRY_VERSION AS registry
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
FROM docker/compose-bin:$COMPOSE_VERSION AS compose
FROM crazymax/undock:$UNDOCK_VERSION AS undock
FROM golatest AS gobase
@ -139,10 +137,8 @@ COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
COPY --link --from=compose /docker-compose /usr/bin/compose
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
COPY --link --from=binaries /buildx /usr/bin/
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
FROM integration-test-base AS integration-test

View File

@ -676,7 +676,7 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
s := "."
t.Context = &s
}
if t.Dockerfile == nil || (t.Dockerfile != nil && *t.Dockerfile == "") {
if t.Dockerfile == nil {
s := "Dockerfile"
t.Dockerfile = &s
}
@ -1257,8 +1257,9 @@ func (t *Target) GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
// make sure local credentials are loaded multiple times for different targets
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
authProvider := authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
ConfigFile: config.LoadDefaultConfigFile(os.Stderr),
ConfigFile: dockerConfig,
})
m2 := make(map[string]build.Options, len(m))
@ -1544,12 +1545,12 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
return nil, err
}
bo.Annotations, err = buildflags.ParseAnnotations(t.Annotations)
annotations, err := buildflags.ParseAnnotations(t.Annotations)
if err != nil {
return nil, err
}
for _, e := range bo.Exports {
for k, v := range bo.Annotations {
for k, v := range annotations {
e.Attrs[k.String()] = v
}
}

View File

@ -2248,23 +2248,6 @@ target "app" {
require.Len(t, m["app"].Outputs, 0)
}
func TestEmptyDockerfile(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
target "app" {
dockerfile = ""
}
`),
}
ctx := context.TODO()
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.NoError(t, err)
require.Contains(t, m, "app")
require.Equal(t, "Dockerfile", *m["app"].Dockerfile)
}
// https://github.com/docker/buildx/issues/2859
func TestGroupTargetsWithDefault(t *testing.T) {
t.Run("OnTarget", func(t *testing.T) {

View File

@ -17,7 +17,7 @@ import (
dockeropts "github.com/docker/cli/opts"
"github.com/docker/go-units"
"github.com/pkg/errors"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
)
func ParseComposeFiles(fs []File) (*Config, error) {
@ -76,7 +76,13 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
var additionalContexts map[string]string
if s.Build.AdditionalContexts != nil {
additionalContexts = composeToBuildkitNamedContexts(s.Build.AdditionalContexts)
additionalContexts = map[string]string{}
for k, v := range s.Build.AdditionalContexts {
if strings.HasPrefix(v, "service:") {
v = strings.Replace(v, "service:", "target:", 1)
}
additionalContexts[k] = v
}
}
var shmSize *string
@ -145,28 +151,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
return nil, err
}
var inAttests []string
if s.Build.SBOM != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("sbom", s.Build.SBOM))
}
if s.Build.Provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", s.Build.Provenance))
}
attests, err := buildflags.ParseAttests(inAttests)
if err != nil {
return nil, err
}
var noCache *bool
if s.Build.NoCache {
noCache = &s.Build.NoCache
}
var pull *bool
if s.Build.Pull {
pull = &s.Build.Pull
}
g.Targets = append(g.Targets, targetName)
t := &Target{
Name: targetName,
@ -192,9 +176,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
ShmSize: shmSize,
Ulimits: ulimits,
ExtraHosts: extraHosts,
Attest: attests,
NoCache: noCache,
Pull: pull,
}
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
return nil, err
@ -261,9 +242,6 @@ func loadComposeFiles(cfgs []composetypes.ConfigFile, envs map[string]string, op
filtered[key] = v
}
}
if len(filtered) == 0 {
return nil, errors.New("empty compose file")
}
if err := composeschema.Validate(filtered); err != nil {
return nil, err
@ -281,7 +259,7 @@ func loadComposeFiles(cfgs []composetypes.ConfigFile, envs map[string]string, op
func validateComposeFile(dt []byte, fn string) (bool, error) {
envs, err := composeEnv()
if err != nil {
return false, err
return true, err
}
fnl := strings.ToLower(fn)
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
@ -477,7 +455,7 @@ func (t *Target) composeExtTarget(exts map[string]any) error {
t.NoCacheFilter = dedupSlice(append(t.NoCacheFilter, xb.NoCacheFilter...))
}
if len(xb.Contexts) > 0 {
t.Contexts = dedupMap(t.Contexts, composeToBuildkitNamedContexts(xb.Contexts))
t.Contexts = dedupMap(t.Contexts, xb.Contexts)
}
return nil
@ -512,16 +490,3 @@ func composeToBuildkitSSH(sshKey composetypes.SSHKey) *buildflags.SSH {
}
return bkssh
}
func composeToBuildkitNamedContexts(m map[string]string) map[string]string {
out := make(map[string]string, len(m))
for k, v := range m {
if strings.HasPrefix(v, "service:") || strings.HasPrefix(v, "target:") {
if parts := strings.SplitN(v, ":", 2); len(parts) == 2 {
v = "target:" + sanitizeTargetName(parts[1])
}
}
out[k] = v
}
return out
}

View File

@ -611,7 +611,6 @@ func TestValidateComposeFile(t *testing.T) {
fn string
dt []byte
isCompose bool
wantErr bool
}{
{
name: "empty service",
@ -621,7 +620,6 @@ services:
foo:
`),
isCompose: true,
wantErr: false,
},
{
name: "build",
@ -632,7 +630,6 @@ services:
build: .
`),
isCompose: true,
wantErr: false,
},
{
name: "image",
@ -643,7 +640,6 @@ services:
image: nginx
`),
isCompose: true,
wantErr: false,
},
{
name: "unknown ext",
@ -654,7 +650,6 @@ services:
image: nginx
`),
isCompose: true,
wantErr: false,
},
{
name: "hcl",
@ -665,64 +660,13 @@ target "default" {
}
`),
isCompose: false,
wantErr: false,
},
{
name: "json",
fn: "docker-bake.json",
dt: []byte(`
{
"group": [
{
"targets": [
"my-service"
]
}
],
"target": [
{
"context": ".",
"dockerfile": "Dockerfile"
}
]
}
`),
isCompose: false,
wantErr: false,
},
{
name: "json unknown ext",
fn: "docker-bake.foo",
dt: []byte(`
{
"group": [
{
"targets": [
"my-service"
]
}
],
"target": [
{
"context": ".",
"dockerfile": "Dockerfile"
}
]
}
`),
isCompose: false,
wantErr: true,
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
isCompose, err := validateComposeFile(tt.dt, tt.fn)
assert.Equal(t, tt.isCompose, isCompose)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.NoError(t, err)
})
}
}
@ -893,44 +837,6 @@ services:
require.Equal(t, map[string]string{"base": "target:base"}, c.Targets[1].Contexts)
}
func TestServiceContextDot(t *testing.T) {
dt := []byte(`
services:
base.1:
build:
dockerfile: baseapp.Dockerfile
command: ./entrypoint.sh
foo.1:
build:
dockerfile: fooapp.Dockerfile
command: ./entrypoint.sh
webapp:
build:
context: ./dir
additional_contexts:
base: service:base.1
x-bake:
contexts:
foo: target:foo.1
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Groups))
require.Equal(t, "default", c.Groups[0].Name)
sort.Strings(c.Groups[0].Targets)
require.Equal(t, []string{"base_1", "foo_1", "webapp"}, c.Groups[0].Targets)
require.Equal(t, 3, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name
})
require.Equal(t, "webapp", c.Targets[2].Name)
require.Equal(t, map[string]string{"base": "target:base_1", "foo": "target:foo_1"}, c.Targets[2].Contexts)
}
func TestDotEnvDir(t *testing.T) {
tmpdir := t.TempDir()
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, ".env"), 0755))
@ -1007,108 +913,6 @@ services:
require.ErrorContains(t, err, `additional properties 'foo' not allowed`)
}
func TestEmptyComposeFile(t *testing.T) {
tmpdir := t.TempDir()
chdir(t, tmpdir)
_, err := ParseComposeFiles([]File{{Name: "compose.yml", Data: []byte(``)}})
require.Error(t, err)
require.ErrorContains(t, err, `empty compose file`) // https://github.com/compose-spec/compose-go/blob/a42e7579d813e64c0c1f598a666358bc0c0a0eb4/loader/loader.go#L542
}
func TestParseComposeAttests(t *testing.T) {
dt := []byte(`
services:
app:
build:
context: .
sbom: true
provenance: mode=max
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
target := c.Targets[0]
require.Equal(t, "app", target.Name)
require.NotNil(t, target.Attest)
require.Len(t, target.Attest, 2)
attestMap := target.Attest.ToMap()
require.Contains(t, attestMap, "sbom")
require.Contains(t, attestMap, "provenance")
// Check the actual content - sbom=true should result in disabled=false (not disabled)
require.Equal(t, "type=sbom", *attestMap["sbom"])
require.Equal(t, "type=provenance,mode=max", *attestMap["provenance"])
}
func TestParseComposeAttestsDisabled(t *testing.T) {
dt := []byte(`
services:
app:
build:
context: .
sbom: false
provenance: false
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
target := c.Targets[0]
require.Equal(t, "app", target.Name)
require.NotNil(t, target.Attest)
require.Len(t, target.Attest, 2)
attestMap := target.Attest.ToMap()
require.Contains(t, attestMap, "sbom")
require.Contains(t, attestMap, "provenance")
// When disabled=true, the value should be nil
require.Nil(t, attestMap["sbom"])
require.Nil(t, attestMap["provenance"])
}
func TestParseComposePull(t *testing.T) {
dt := []byte(`
services:
app:
build:
context: .
pull: true
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
target := c.Targets[0]
require.Equal(t, "app", target.Name)
require.Equal(t, true, *target.Pull)
}
func TestParseComposeNoCache(t *testing.T) {
dt := []byte(`
services:
app:
build:
context: .
no_cache: true
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
target := c.Targets[0]
require.Equal(t, "app", target.Name)
require.Equal(t, true, *target.NoCache)
}
// chdir changes the current working directory to the named directory,
// and then restore the original working directory at the end of the test.
func chdir(t *testing.T, dir string) {

View File

@ -2,15 +2,10 @@ package hclparser
import (
"errors"
"os"
"os/user"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/docker/cli/cli/config"
"github.com/hashicorp/go-cty-funcs/cidr"
"github.com/hashicorp/go-cty-funcs/crypto"
"github.com/hashicorp/go-cty-funcs/encoding"
@ -67,7 +62,6 @@ var stdlibFunctions = []funcDef{
{name: "greaterthan", fn: stdlib.GreaterThanFunc},
{name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc},
{name: "hasindex", fn: stdlib.HasIndexFunc},
{name: "homedir", factory: homedirFunc},
{name: "indent", fn: stdlib.IndentFunc},
{name: "index", fn: stdlib.IndexFunc},
{name: "indexof", factory: indexOfFunc},
@ -260,27 +254,6 @@ func timestampFunc() function.Function {
})
}
// homedirFunc constructs a function that returns the current user's home directory.
func homedirFunc() function.Function {
return function.New(&function.Spec{
Description: `Returns the current user's home directory.`,
Params: []function.Parameter{},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
home, err := os.UserHomeDir()
if err != nil {
if home == "" && runtime.GOOS != "windows" {
if u, err := user.Current(); err == nil {
return cty.StringVal(u.HomeDir), nil
}
}
return cty.StringVal(filepath.Dir(config.Dir())), nil
}
return cty.StringVal(home), nil
},
})
}
func Stdlib() map[string]function.Function {
funcs := make(map[string]function.Function, len(stdlibFunctions))
for _, v := range stdlibFunctions {

View File

@ -1,7 +1,6 @@
package hclparser
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
@ -198,10 +197,3 @@ func TestSanitize(t *testing.T) {
})
}
}
func TestHomedir(t *testing.T) {
home, err := homedirFunc().Call(nil)
require.NoError(t, err)
require.NotEmpty(t, home.AsString())
require.True(t, filepath.IsAbs(home.AsString()))
}

View File

@ -32,12 +32,8 @@ func ReadRemoteFiles(ctx context.Context, nodes []builder.Node, url string, name
var sessions []session.Attachable
var filename string
keepGitDir := false
st, ok, err := dockerui.DetectGitContext(url, &keepGitDir)
st, ok := dockerui.DetectGitContext(url, false)
if ok {
if err != nil {
return nil, nil, err
}
if ssh, err := build.CreateSSH([]*buildflags.SSH{{
ID: "default",
Paths: strings.Split(os.Getenv("BUILDX_BAKE_GIT_SSH"), ","),

View File

@ -93,7 +93,6 @@ type Options struct {
ProvenanceResponseMode confutil.MetadataProvenanceMode
SourcePolicy *spb.Policy
GroupRef string
Annotations map[exptypes.AnnotationKey]string // Not used during build, annotations are already set in Exports. Just used to check for support with drivers.
}
type CallFunc struct {

View File

@ -28,7 +28,6 @@ import (
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/ociindex"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
gateway "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
@ -38,7 +37,6 @@ import (
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/entitlements"
"github.com/moby/buildkit/util/gitutil"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
@ -118,13 +116,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
}
if v, ok := opt.BuildArgs["BUILDKIT_SYNTAX"]; ok {
p := strings.SplitN(strings.TrimSpace(v), " ", 2)
so.Frontend = "gateway.v0"
so.FrontendAttrs["source"] = p[0]
so.FrontendAttrs["cmdline"] = v
}
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
if v, _ := strconv.ParseBool(v); v {
so.FrontendAttrs["multi-platform"] = "true"
@ -189,20 +180,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
}
}
// check if index annotations are supported by docker driver
if len(opt.Exports) > 0 && opt.CallFunc == nil && len(opt.Annotations) > 0 && nodeDriver.IsMobyDriver() && !nodeDriver.Features(ctx)[driver.MultiPlatform] {
for _, exp := range opt.Exports {
if exp.Type == "image" || exp.Type == "docker" {
for ak := range opt.Annotations {
switch ak.Type {
case exptypes.AnnotationIndex, exptypes.AnnotationIndexDescriptor:
return nil, nil, errors.New("index annotations not supported for single platform export")
}
}
}
}
}
// fill in image exporter names from tags
if len(opt.Tags) > 0 {
tags := make([]string, len(opt.Tags))
@ -405,7 +382,6 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro
dockerfileName = inp.DockerfilePath
dockerfileSrcName = inp.DockerfilePath
toRemove []string
caps = map[string]struct{}{}
)
switch {
@ -471,12 +447,6 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro
target.FrontendAttrs["dockerfilekey"] = "dockerfile"
}
target.FrontendAttrs["context"] = inp.ContextPath
gitRef, err := gitutil.ParseURL(inp.ContextPath)
if err == nil && len(gitRef.Query) > 0 {
caps["moby.buildkit.frontend.gitquerystring"] = struct{}{}
}
default:
return nil, errors.Errorf("unable to prepare context: path %q not found", inp.ContextPath)
}
@ -524,7 +494,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro
target.FrontendAttrs["filename"] = dockerfileName
for k, v := range inp.NamedContexts {
caps["moby.buildkit.frontend.contexts+forward"] = struct{}{}
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
if v.State != nil {
target.FrontendAttrs["context:"+k] = "input:" + k
if target.FrontendInputs == nil {
@ -536,12 +506,6 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro
if IsRemoteURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
target.FrontendAttrs["context:"+k] = v.Path
gitRef, err := gitutil.ParseURL(v.Path)
if err == nil && len(gitRef.Query) > 0 {
if _, ok := caps["moby.buildkit.frontend.gitquerystring"]; !ok {
caps["moby.buildkit.frontend.gitquerystring+forward"] = struct{}{}
}
}
continue
}
@ -571,7 +535,6 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro
target.FrontendAttrs["context:"+k] = "oci-layout://" + storeName + ":" + tag + "@" + dig
continue
}
st, err := os.Stat(v.Path)
if err != nil {
return nil, errors.Wrapf(err, "failed to get build context %v", k)
@ -595,12 +558,6 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp *Inputs, pw pro
}
}
if len(caps) > 0 {
keys := slices.Collect(maps.Keys(caps))
slices.Sort(keys)
target.FrontendAttrs["frontend.caps"] = strings.Join(keys, ",")
}
inp.DockerfileMappingSrc = dockerfileSrcName
inp.DockerfileMappingDst = dockerfileName
return release, nil

View File

@ -1,15 +1,10 @@
package build
import (
"cmp"
"context"
_ "crypto/sha256" // ensure digests can be computed
"encoding/json"
"io"
iofs "io/fs"
"path/filepath"
"slices"
"strings"
"sync"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
@ -19,7 +14,6 @@ import (
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/tonistiigi/fsutil/types"
)
// NewResultHandle stores a gateway client, gateway reference, and the error from
@ -81,40 +75,6 @@ func (r *ResultHandle) NewContainer(ctx context.Context, cfg *InvokeConfig) (gat
return r.gwClient.NewContainer(ctx, req)
}
func (r *ResultHandle) StatFile(ctx context.Context, fpath string, cfg *InvokeConfig) (*types.Stat, error) {
containerCfg, err := r.getContainerConfig(cfg)
if err != nil {
return nil, err
}
candidateMounts := make([]gateway.Mount, 0, len(containerCfg.Mounts))
for _, m := range containerCfg.Mounts {
if strings.HasPrefix(fpath, m.Dest) {
candidateMounts = append(candidateMounts, m)
}
}
if len(candidateMounts) == 0 {
return nil, iofs.ErrNotExist
}
slices.SortFunc(candidateMounts, func(a, b gateway.Mount) int {
return cmp.Compare(len(a.Dest), len(b.Dest))
})
m := candidateMounts[len(candidateMounts)-1]
relpath, err := filepath.Rel(m.Dest, fpath)
if err != nil {
return nil, err
}
if m.Ref == nil {
return nil, iofs.ErrNotExist
}
req := gateway.StatRequest{Path: filepath.ToSlash(relpath)}
return m.Ref.StatFile(ctx, req)
}
func (r *ResultHandle) getContainerConfig(cfg *InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
if r.ref != nil && r.solveErr == nil {
logrus.Debugf("creating container from successful build")

View File

@ -11,7 +11,7 @@ import (
"github.com/docker/buildx/driver"
"github.com/docker/cli/opts"
"github.com/moby/buildkit/frontend/dockerfile/dfgitutil"
"github.com/moby/buildkit/util/gitutil"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -36,7 +36,7 @@ func IsRemoteURL(c string) bool {
if isHTTPURL(c) {
return true
}
if _, ok, _ := dfgitutil.ParseGitRef(c); ok {
if _, err := gitutil.ParseGitRef(c); err == nil {
return true
}
return false

View File

@ -122,7 +122,6 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
Name: driver.BuilderName(n.Name),
EndpointAddr: n.Endpoint,
DockerAPI: dockerapi,
DockerContext: b.opts.dockerCli.CurrentContext(),
ContextStore: b.opts.dockerCli.ContextStore(),
BuildkitdFlags: n.BuildkitdFlags,
Files: n.Files,

View File

@ -492,8 +492,7 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
// Other common flags (noCache, pull and progress) are processed in runBake function.
return runBake(cmd.Context(), dockerCli, args, options, cFlags, filesFromEnv)
},
ValidArgsFunction: completion.BakeTargets(options.files),
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.BakeTargets(options.files),
}
flags := cmd.Flags()

View File

@ -490,7 +490,6 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugger debuggerOpt
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveFilterDirs
},
DisableFlagsInUseLine: true,
}
var platformsDefault []string
@ -692,11 +691,6 @@ func wrapBuildError(err error, bake bool) error {
msg += " Named contexts are supported since Dockerfile v1.4. Use #syntax directive in Dockerfile or update to latest BuildKit."
return &wrapped{err, msg}
}
if st.Code() == codes.Unimplemented && strings.Contains(st.Message(), "unsupported frontend capability moby.buildkit.frontend.gitquerystring") {
msg := "current frontend does not support Git URLs with query string components."
msg += " Git URLs with query string are supported since Dockerfile v1.18 and BuildKit v0.24. Use BUILDKIT_SYNTAX build-arg, #syntax directive in Dockerfile or update to latest BuildKit."
return &wrapped{err, msg}
}
}
return err
}
@ -1009,8 +1003,9 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in *BuildOptions, inSt
}
opts.Platforms = platforms
dockerConfig := dockerCli.ConfigFile()
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
ConfigFile: dockerCli.ConfigFile(),
ConfigFile: dockerConfig,
}))
secrets, err := build.CreateSecrets(in.Secrets)
@ -1068,13 +1063,13 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in *BuildOptions, inSt
}
}
opts.Annotations, err = buildflags.ParseAnnotations(in.Annotations)
annotations, err := buildflags.ParseAnnotations(in.Annotations)
if err != nil {
return nil, nil, errors.Wrap(err, "parse annotations")
}
for _, o := range outputs {
for k, v := range opts.Annotations {
for k, v := range annotations {
o.Attrs[k.String()] = v
}
}

View File

@ -98,8 +98,7 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runCreate(cmd.Context(), dockerCli, options, args)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -23,8 +23,6 @@ func dapCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "dap",
Short: "Start debug adapter protocol compatible debugger",
DisableFlagsInUseLine: true,
}
cobrautil.MarkCommandExperimental(cmd)
@ -118,7 +116,6 @@ func dapAttachCmd() *cobra.Command {
}
return nil
},
DisableFlagsInUseLine: true,
}
return cmd
}

View File

@ -44,8 +44,6 @@ func debugCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "debug",
Short: "Start debugger",
DisableFlagsInUseLine: true,
}
cobrautil.MarkCommandExperimental(cmd)

View File

@ -122,7 +122,6 @@ func dialStdioCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
opts.builder = rootOpts.builder
return runDialStdio(dockerCli, opts)
},
DisableFlagsInUseLine: true,
}
flags := cmd.Flags()

View File

@ -4,6 +4,8 @@ import (
"context"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"time"
@ -11,77 +13,20 @@ import (
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/docker/go-units"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
duIDHeader = "ID"
duParentsHeader = "PARENTS"
duCreatedAtHeader = "CREATED AT"
duMutableHeader = "MUTABLE"
duReclaimHeader = "RECLAIMABLE"
duSharedHeader = "SHARED"
duSizeHeader = "SIZE"
duDescriptionHeader = "DESCRIPTION"
duUsageHeader = "USAGE COUNT"
duLastUsedAtHeader = "LAST ACCESSED"
duTypeHeader = "TYPE"
duDefaultTableFormat = "table {{.ID}}\t{{.Reclaimable}}\t{{.Size}}\t{{.LastUsedAt}}"
duDefaultPrettyTemplate = `ID: {{.ID}}
{{- if .Parents }}
Parents:
{{- range .Parents }}
- {{.}}
{{- end }}
{{- end }}
Created at: {{.CreatedAt}}
Mutable: {{.Mutable}}
Reclaimable: {{.Reclaimable}}
Shared: {{.Shared}}
Size: {{.Size}}
{{- if .Description}}
Description: {{ .Description }}
{{- end }}
Usage count: {{.UsageCount}}
{{- if .LastUsedAt}}
Last used: {{ .LastUsedAt }}
{{- end }}
{{- if .Type}}
Type: {{ .Type }}
{{- end }}
`
)
type duOptions struct {
builder string
filter opts.FilterOpt
verbose bool
format string
}
func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts duOptions) error {
if opts.format != "" && opts.verbose {
return errors.New("--format and --verbose cannot be used together")
} else if opts.format == "" {
if opts.verbose {
opts.format = duDefaultPrettyTemplate
} else {
opts.format = duDefaultTableFormat
}
} else if opts.format == formatter.PrettyFormatKey {
opts.format = duDefaultPrettyTemplate
} else if opts.format == formatter.TableFormatKey {
opts.format = duDefaultTableFormat
}
pi, err := toBuildkitPruneInfo(opts.filter.Value())
if err != nil {
return err
@ -129,53 +74,33 @@ func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts duOptions) er
return err
}
fctx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.Format(opts.format),
}
var dus []*client.UsageInfo
tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
first := true
for _, du := range out {
if du != nil {
dus = append(dus, du...)
if du == nil {
continue
}
}
render := func(format func(subContext formatter.SubContext) error) error {
for _, du := range dus {
if err := format(&diskusageContext{
format: fctx.Format,
du: du,
}); err != nil {
return err
if opts.verbose {
printVerbose(tw, du)
} else {
if first {
printTableHeader(tw)
first = false
}
for _, di := range du {
printTableRow(tw, di)
}
tw.Flush()
}
return nil
}
duCtx := diskusageContext{}
duCtx.Header = formatter.SubHeaderContext{
"ID": duIDHeader,
"Parents": duParentsHeader,
"CreatedAt": duCreatedAtHeader,
"Mutable": duMutableHeader,
"Reclaimable": duReclaimHeader,
"Shared": duSharedHeader,
"Size": duSizeHeader,
"Description": duDescriptionHeader,
"UsageCount": duUsageHeader,
"LastUsedAt": duLastUsedAtHeader,
"Type": duTypeHeader,
if opts.filter.Value().Len() == 0 {
printSummary(tw, out)
}
defer func() {
if (fctx.Format != duDefaultTableFormat && fctx.Format != duDefaultPrettyTemplate) || fctx.Format.IsJSON() || opts.filter.Value().Len() > 0 {
return
}
printSummary(dockerCli.Out(), out)
}()
return fctx.Write(&duCtx, render)
tw.Flush()
return nil
}
func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
@ -189,84 +114,69 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
options.builder = rootOpts.builder
return runDiskUsage(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.Var(&options.filter, "filter", "Provide filter values")
flags.BoolVar(&options.verbose, "verbose", false, `Shorthand for "--format=pretty"`)
flags.StringVar(&options.format, "format", "", "Format the output")
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
return cmd
}
type diskusageContext struct {
formatter.HeaderContext
format formatter.Format
du *client.UsageInfo
func printKV(w io.Writer, k string, v any) {
fmt.Fprintf(w, "%s:\t%v\n", k, v)
}
func (d *diskusageContext) MarshalJSON() ([]byte, error) {
return formatter.MarshalJSON(d)
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
for _, di := range du {
printKV(tw, "ID", di.ID)
if len(di.Parents) != 0 {
printKV(tw, "Parent", strings.Join(di.Parents, ","))
}
printKV(tw, "Created at", di.CreatedAt)
printKV(tw, "Mutable", di.Mutable)
printKV(tw, "Reclaimable", !di.InUse)
printKV(tw, "Shared", di.Shared)
printKV(tw, "Size", units.HumanSize(float64(di.Size)))
if di.Description != "" {
printKV(tw, "Description", di.Description)
}
printKV(tw, "Usage count", di.UsageCount)
if di.LastUsedAt != nil {
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago")
}
if di.RecordType != "" {
printKV(tw, "Type", di.RecordType)
}
fmt.Fprintf(tw, "\n")
}
tw.Flush()
}
func (d *diskusageContext) ID() string {
id := d.du.ID
if d.format.IsTable() && d.du.Mutable {
func printTableHeader(tw *tabwriter.Writer) {
fmt.Fprintln(tw, "ID\tRECLAIMABLE\tSIZE\tLAST ACCESSED")
}
func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
id := di.ID
if di.Mutable {
id += "*"
}
return id
}
func (d *diskusageContext) Parents() []string {
return d.du.Parents
}
func (d *diskusageContext) CreatedAt() string {
return d.du.CreatedAt.String()
}
func (d *diskusageContext) Mutable() bool {
return d.du.Mutable
}
func (d *diskusageContext) Reclaimable() bool {
return !d.du.InUse
}
func (d *diskusageContext) Shared() bool {
return d.du.Shared
}
func (d *diskusageContext) Size() string {
size := units.HumanSize(float64(d.du.Size))
if d.format.IsTable() && d.du.Shared {
size := units.HumanSize(float64(di.Size))
if di.Shared {
size += "*"
}
return size
}
func (d *diskusageContext) Description() string {
return d.du.Description
}
func (d *diskusageContext) UsageCount() int {
return d.du.UsageCount
}
func (d *diskusageContext) LastUsedAt() string {
if d.du.LastUsedAt != nil {
return units.HumanDuration(time.Since(*d.du.LastUsedAt)) + " ago"
lastAccessed := ""
if di.LastUsedAt != nil {
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
}
return ""
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
}
func (d *diskusageContext) Type() string {
return string(d.du.RecordType)
}
func printSummary(w io.Writer, dus [][]*client.UsageInfo) {
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
total := int64(0)
reclaimable := int64(0)
shared := int64(0)
@ -285,11 +195,11 @@ func printSummary(w io.Writer, dus [][]*client.UsageInfo) {
}
}
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
if shared > 0 {
fmt.Fprintf(tw, "Shared:\t%s\n", units.HumanSize(float64(shared)))
fmt.Fprintf(tw, "Private:\t%s\n", units.HumanSize(float64(total-shared)))
}
fmt.Fprintf(tw, "Reclaimable:\t%s\n", units.HumanSize(float64(reclaimable)))
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
tw.Flush()

View File

@ -160,8 +160,7 @@ func exportCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runExport(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -125,8 +125,7 @@ func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runImport(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -656,8 +656,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runInspect(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
cmd.AddCommand(

View File

@ -129,8 +129,7 @@ func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runAttachment(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -96,8 +96,7 @@ func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runLogs(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -103,8 +103,7 @@ func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runLs(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -55,8 +55,7 @@ func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runOpen(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
return cmd

View File

@ -129,8 +129,7 @@ func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runRm(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -16,8 +16,6 @@ func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *c
Short: "Commands to work on build records",
ValidArgsFunction: completion.Disable,
RunE: rootcmd.RunE,
DisableFlagsInUseLine: true,
}
cmd.AddCommand(

View File

@ -199,8 +199,7 @@ func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runTrace(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -18,7 +18,7 @@ import (
"github.com/docker/buildx/localstate"
"github.com/docker/cli/cli/command"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/frontend/dockerfile/dfgitutil"
"github.com/moby/buildkit/util/gitutil"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
@ -26,10 +26,6 @@ import (
const recordsLimit = 50
func buildName(fattrs map[string]string, ls *localstate.State) string {
if v, ok := fattrs["build-arg:BUILDKIT_BUILD_NAME"]; ok && v != "" {
return v
}
var res string
var target, contextPath, dockerfilePath, vcsSource string
@ -332,7 +328,7 @@ func valueFiler(key, value, sep string) matchFunc {
recValue = v
} else {
if context, ok := rec.FrontendAttrs["context"]; ok {
if ref, _, err := dfgitutil.ParseGitRef(context); err == nil {
if ref, err := gitutil.ParseGitRef(context); err == nil {
recValue = ref.Remote
}
}

View File

@ -279,8 +279,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
options.builder = *opts.Builder
return runCreate(cmd.Context(), dockerCli, options, args)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -52,8 +52,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
options.builder = *rootOpts.Builder
return runInspect(cmd.Context(), dockerCli, options, args[0])
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -12,11 +12,10 @@ type RootOptions struct {
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "imagetools",
Short: "Commands to work on images in registry",
ValidArgsFunction: completion.Disable,
RunE: rootcmd.RunE,
DisableFlagsInUseLine: true,
Use: "imagetools",
Short: "Commands to work on images in registry",
ValidArgsFunction: completion.Disable,
RunE: rootcmd.RunE,
}
cmd.AddCommand(

View File

@ -182,8 +182,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
}
return runInspect(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.BuilderNames(dockerCli),
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.BuilderNames(dockerCli),
}
flags := cmd.Flags()

View File

@ -47,9 +47,8 @@ func installCmd(dockerCli command.Cli) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runInstall(dockerCli, options)
},
Hidden: true,
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
Hidden: true,
ValidArgsFunction: completion.Disable,
}
// hide builder persistent flag for this command

View File

@ -107,8 +107,7 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runLs(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()

View File

@ -3,7 +3,6 @@ package commands
import (
"context"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
@ -170,13 +169,12 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
options.builder = rootOpts.builder
return runPrune(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images")
flags.Var(&options.filter, "filter", `Provide filter values`)
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
flags.Var(&options.reservedSpace, "reserved-space", "Amount of disk space always allowed to keep for cache")
flags.Var(&options.minFreeSpace, "min-free-space", "Target amount of free disk space after pruning")
flags.Var(&options.maxUsedSpace, "max-used-space", "Maximum amount of disk space allowed to keep for cache")
@ -243,55 +241,3 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
Filter: []string{strings.Join(filters, ",")},
}, nil
}
func printKV(w io.Writer, k string, v any) {
fmt.Fprintf(w, "%s:\t%v\n", k, v)
}
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
for _, di := range du {
printKV(tw, "ID", di.ID)
if len(di.Parents) != 0 {
printKV(tw, "Parent", strings.Join(di.Parents, ","))
}
printKV(tw, "Created at", di.CreatedAt)
printKV(tw, "Mutable", di.Mutable)
printKV(tw, "Reclaimable", !di.InUse)
printKV(tw, "Shared", di.Shared)
printKV(tw, "Size", units.HumanSize(float64(di.Size)))
if di.Description != "" {
printKV(tw, "Description", di.Description)
}
printKV(tw, "Usage count", di.UsageCount)
if di.LastUsedAt != nil {
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago")
}
if di.RecordType != "" {
printKV(tw, "Type", di.RecordType)
}
fmt.Fprintf(tw, "\n")
}
tw.Flush()
}
func printTableHeader(tw *tabwriter.Writer) {
fmt.Fprintln(tw, "ID\tRECLAIMABLE\tSIZE\tLAST ACCESSED")
}
func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
id := di.ID
if di.Mutable {
id += "*"
}
size := units.HumanSize(float64(di.Size))
if di.Shared {
size += "*"
}
lastAccessed := ""
if di.LastUsedAt != nil {
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
}
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
}

View File

@ -111,8 +111,7 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
}
return runRm(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.BuilderNames(dockerCli),
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.BuilderNames(dockerCli),
}
flags := cmd.Flags()

View File

@ -71,7 +71,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra
Status: fmt.Sprintf("ERROR: unknown command: %q", args[0]),
}
},
DisableFlagsInUseLine: true,
}
if !isPlugin {
// match plugin behavior for standalone mode
@ -79,6 +78,8 @@ func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra
cmd.SilenceUsage = true
cmd.SilenceErrors = true
cmd.TraverseChildren = true
cmd.DisableFlagsInUseLine = true
cli.DisableFlagsInUseLine(cmd)
if !confutil.IsExperimental() {
cmd.SetHelpTemplate(cmd.HelpTemplate() + "\n" + experimentalCommandHint + "\n")
}

View File

@ -1,33 +0,0 @@
package commands
import (
stderrs "errors"
"testing"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/stretchr/testify/require"
)
func TestDisableFlagsInUseLineIsSet(t *testing.T) {
cmd, err := command.NewDockerCli()
require.NoError(t, err)
rootCmd := NewRootCmd("buildx", true, cmd)
var errs []error
visitAll(rootCmd, func(c *cobra.Command) {
if !c.DisableFlagsInUseLine {
errs = append(errs, errors.New("DisableFlagsInUseLine is not set for "+c.CommandPath()))
}
})
err = stderrs.Join(errs...)
require.NoError(t, err)
}
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}

View File

@ -44,8 +44,7 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
}
return runStop(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.BuilderNames(dockerCli),
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.BuilderNames(dockerCli),
}
return cmd

View File

@ -53,9 +53,8 @@ func uninstallCmd(dockerCli command.Cli) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runUninstall(dockerCli, options)
},
Hidden: true,
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
Hidden: true,
ValidArgsFunction: completion.Disable,
}
// hide builder persistent flag for this command

View File

@ -71,8 +71,7 @@ func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
}
return runUse(dockerCli, options)
},
ValidArgsFunction: completion.BuilderNames(dockerCli),
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.BuilderNames(dockerCli),
}
flags := cmd.Flags()

View File

@ -24,8 +24,7 @@ func versionCmd(dockerCli command.Cli) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runVersion(dockerCli)
},
ValidArgsFunction: completion.Disable,
DisableFlagsInUseLine: true,
ValidArgsFunction: completion.Disable,
}
// hide builder persistent flag for this command

View File

@ -38,14 +38,9 @@ type Adapter[C LaunchConfig] struct {
threadsMu sync.RWMutex
nextThreadID int
sharedState
}
type sharedState struct {
breakpointMap *breakpointMap
sourceMap *sourceMap
sourceMap sourceMap
idPool *idPool
sh *shell
}
func New[C LaunchConfig]() *Adapter[C] {
@ -56,12 +51,8 @@ func New[C LaunchConfig]() *Adapter[C] {
evaluateReqCh: make(chan *evaluateRequest),
threads: make(map[int]*thread),
nextThreadID: 1,
sharedState: sharedState{
breakpointMap: newBreakpointMap(),
sourceMap: new(sourceMap),
idPool: new(idPool),
sh: newShell(),
},
breakpointMap: newBreakpointMap(),
idPool: new(idPool),
}
d.srv = NewServer(d.dapHandler())
return d
@ -170,21 +161,26 @@ func (d *Adapter[C]) Next(c Context, req *dap.NextRequest, resp *dap.NextRespons
}
func (d *Adapter[C]) StepIn(c Context, req *dap.StepInRequest, resp *dap.StepInResponse) error {
d.threadsMu.RLock()
t := d.threads[req.Arguments.ThreadId]
d.threadsMu.RUnlock()
var (
subReq dap.NextRequest
subResp dap.NextResponse
)
t.StepIn()
return nil
subReq.Arguments.ThreadId = req.Arguments.ThreadId
subReq.Arguments.SingleThread = req.Arguments.SingleThread
subReq.Arguments.Granularity = req.Arguments.Granularity
return d.Next(c, &subReq, &subResp)
}
func (d *Adapter[C]) StepOut(c Context, req *dap.StepOutRequest, resp *dap.StepOutResponse) error {
d.threadsMu.RLock()
t := d.threads[req.Arguments.ThreadId]
d.threadsMu.RUnlock()
var (
subReq dap.ContinueRequest
subResp dap.ContinueResponse
)
t.StepOut()
return nil
subReq.Arguments.ThreadId = req.Arguments.ThreadId
subReq.Arguments.SingleThread = req.Arguments.SingleThread
return d.Continue(c, &subReq, &subResp)
}
func (d *Adapter[C]) SetBreakpoints(c Context, req *dap.SetBreakpointsRequest, resp *dap.SetBreakpointsResponse) error {
@ -242,10 +238,12 @@ func (d *Adapter[C]) newThread(ctx Context, name string) (t *thread) {
d.threadsMu.Lock()
id := d.nextThreadID
t = &thread{
id: id,
name: name,
sharedState: d.sharedState,
variables: newVariableReferences(),
id: id,
name: name,
sourceMap: &d.sourceMap,
breakpointMap: d.breakpointMap,
idPool: d.idPool,
variables: newVariableReferences(),
}
d.threads[t.id] = t
d.nextThreadID++
@ -268,6 +266,20 @@ func (d *Adapter[C]) getThread(id int) (t *thread) {
return t
}
func (d *Adapter[C]) getFirstThread() (t *thread) {
d.threadsMu.Lock()
defer d.threadsMu.Unlock()
for _, thread := range d.threads {
if thread.isPaused() {
if t == nil || thread.id < t.id {
t = thread
}
}
}
return t
}
func (d *Adapter[C]) deleteThread(ctx Context, t *thread) {
d.threadsMu.Lock()
if t := d.threads[t.id]; t != nil {

View File

@ -81,14 +81,14 @@ func NewTestAdapter[C LaunchConfig](t *testing.T) (*Adapter[C], Conn, *Client) {
})
clientConn := logConn(t, "client", NewConn(rd2, wr1))
t.Cleanup(func() { clientConn.Close() })
t.Cleanup(func() {
clientConn.Close()
})
adapter := New[C]()
t.Cleanup(func() { adapter.Stop() })
client := NewClient(clientConn)
t.Cleanup(func() { client.Close() })
return adapter, srvConn, client
}

View File

@ -1,308 +0,0 @@
package dap
import (
"context"
"fmt"
"io"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/buildx/build"
"github.com/docker/buildx/util/ioset"
"github.com/docker/cli/cli-plugins/metadata"
"github.com/google/go-dap"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
type shell struct {
// SocketPath is set on the first time Init is invoked
// and stays that way.
SocketPath string
// Locks access to the session from the debug adapter.
// Only one debug thread can access the shell at a time.
sem *semaphore.Weighted
// Initialized once per shell and reused.
once sync.Once
err error
l net.Listener
eg *errgroup.Group
// For the specific session.
fwd *ioset.Forwarder
connected chan struct{}
mu sync.RWMutex
}
func newShell() *shell {
sh := &shell{
sem: semaphore.NewWeighted(1),
}
sh.resetSession()
return sh
}
func (s *shell) resetSession() {
s.mu.Lock()
defer s.mu.Unlock()
s.fwd = nil
s.connected = make(chan struct{})
}
// Init initializes the shell for connections on the client side.
// Attach will block until the terminal has been initialized.
func (s *shell) Init() error {
return s.listen()
}
func (s *shell) listen() error {
s.once.Do(func() {
var dir string
dir, s.err = os.MkdirTemp("", "buildx-dap-exec")
if s.err != nil {
return
}
defer func() {
if s.err != nil {
os.RemoveAll(dir)
}
}()
s.SocketPath = filepath.Join(dir, "s.sock")
s.l, s.err = net.Listen("unix", s.SocketPath)
if s.err != nil {
return
}
s.eg, _ = errgroup.WithContext(context.Background())
s.eg.Go(s.acceptLoop)
})
return s.err
}
func (s *shell) acceptLoop() error {
for {
if err := s.accept(); err != nil {
if errors.Is(err, net.ErrClosed) {
return nil
}
return err
}
}
}
func (s *shell) accept() error {
conn, err := s.l.Accept()
if err != nil {
return err
}
s.mu.Lock()
defer s.mu.Unlock()
if s.fwd != nil {
writeLine(conn, "Error: Already connected to exec instance.")
conn.Close()
return nil
}
// Set the input of the forwarder to the connection.
s.fwd = ioset.NewForwarder()
s.fwd.SetIn(&ioset.In{
Stdin: io.NopCloser(conn),
Stdout: conn,
Stderr: nopCloser{conn},
})
close(s.connected)
writeLine(conn, "Attached to build process.")
return nil
}
// Attach will attach the given thread to the shell.
// Only one container can attach to a shell at any given time.
// Other attaches will block until the context is canceled or it is
// able to reserve the shell for its own use.
//
// This method is intended to be called by paused threads.
func (s *shell) Attach(ctx context.Context, t *thread) {
rCtx := t.rCtx
if rCtx == nil {
return
}
var f dap.StackFrame
if len(t.stackTrace) > 0 {
f = t.frames[t.stackTrace[0]].StackFrame
}
cfg := &build.InvokeConfig{Tty: true}
if len(cfg.Entrypoint) == 0 && len(cfg.Cmd) == 0 {
cfg.Entrypoint = []string{"/bin/sh"} // launch shell by default
cfg.Cmd = []string{}
cfg.NoCmd = false
}
for {
if err := s.attach(ctx, f, rCtx, cfg); err != nil {
return
}
}
}
func (s *shell) wait(ctx context.Context) error {
s.mu.RLock()
connected := s.connected
s.mu.RUnlock()
select {
case <-connected:
return nil
case <-ctx.Done():
return context.Cause(ctx)
}
}
func (s *shell) attach(ctx context.Context, f dap.StackFrame, rCtx *build.ResultHandle, cfg *build.InvokeConfig) (retErr error) {
if err := s.wait(ctx); err != nil {
return err
}
in, out := ioset.Pipe()
defer in.Close()
defer out.Close()
s.mu.RLock()
fwd := s.fwd
s.mu.RUnlock()
fwd.SetOut(&out)
defer func() {
if retErr != nil {
fwd.SetOut(nil)
}
}()
// Check if the entrypoint is executable. If it isn't, don't bother
// trying to invoke.
if reason, ok := s.canInvoke(ctx, rCtx, cfg); !ok {
writeLineF(in.Stdout, "Build container is not executable. (reason: %s)", reason)
<-ctx.Done()
return context.Cause(ctx)
}
if err := s.sem.Acquire(ctx, 1); err != nil {
return err
}
defer s.sem.Release(1)
ctr, err := build.NewContainer(ctx, rCtx, cfg)
if err != nil {
return err
}
defer ctr.Cancel()
writeLineF(in.Stdout, "Running %s in build container from line %d.",
strings.Join(append(cfg.Entrypoint, cfg.Cmd...), " "),
f.Line,
)
writeLine(in.Stdout, "Changes to the container will be reset after the next step is executed.")
err = ctr.Exec(ctx, cfg, in.Stdin, in.Stdout, in.Stderr)
// Send newline to properly terminate the output.
writeLine(in.Stdout, "")
if err != nil {
return err
}
fwd.Close()
s.resetSession()
return nil
}
func (s *shell) canInvoke(ctx context.Context, rCtx *build.ResultHandle, cfg *build.InvokeConfig) (reason string, ok bool) {
var cmd string
if len(cfg.Entrypoint) > 0 {
cmd = cfg.Entrypoint[0]
} else if len(cfg.Cmd) > 0 {
cmd = cfg.Cmd[0]
}
if cmd == "" {
return "no command specified", false
}
st, err := rCtx.StatFile(ctx, cmd, cfg)
if err != nil {
return fmt.Sprintf("stat error: %s", err), false
}
mode := fs.FileMode(st.Mode)
if !mode.IsRegular() {
return fmt.Sprintf("%s: not a file", cmd), false
}
if mode&0111 == 0 {
return fmt.Sprintf("%s: not an executable", cmd), false
}
return "", true
}
// SendRunInTerminalRequest will send the request to the client to attach to
// the socket path that was created by Init. This is intended to be run
// from the adapter and interact directly with the client.
func (s *shell) SendRunInTerminalRequest(ctx Context) error {
// TODO: this should work in standalone mode too.
docker := os.Getenv(metadata.ReexecEnvvar)
req := &dap.RunInTerminalRequest{
Request: dap.Request{
Command: "runInTerminal",
},
Arguments: dap.RunInTerminalRequestArguments{
Kind: "integrated",
Args: []string{docker, "buildx", "dap", "attach", s.SocketPath},
Env: map[string]any{
"BUILDX_EXPERIMENTAL": "1",
},
},
}
resp := ctx.Request(req)
if !resp.GetResponse().Success {
return errors.New(resp.GetResponse().Message)
}
return nil
}
type nopCloser struct {
io.Writer
}
func (nopCloser) Close() error {
return nil
}
func writeLine(w io.Writer, msg string) {
if os.PathSeparator == '\\' {
fmt.Fprint(w, msg+"\r\n")
} else {
fmt.Fprintln(w, msg)
}
}
func writeLineF(w io.Writer, format string, a ...any) {
if os.PathSeparator == '\\' {
fmt.Fprintf(w, format+"\r\n", a...)
} else {
fmt.Fprintf(w, format+"\n", a...)
}
}

View File

@ -29,69 +29,43 @@ func (d *Adapter[C]) Evaluate(ctx Context, req *dap.EvaluateRequest, resp *dap.E
return nil
}
var retErr error
cmd := d.replCommands(ctx, resp, &retErr)
var t *thread
if req.Arguments.FrameId > 0 {
if t = d.getThreadByFrameID(req.Arguments.FrameId); t == nil {
return errors.Errorf("no thread with frame id %d", req.Arguments.FrameId)
}
} else {
if t = d.getFirstThread(); t == nil {
return errors.New("no paused thread")
}
}
cmd := d.replCommands(ctx, t, resp)
cmd.SetArgs(args)
cmd.SetErr(d.Out())
if err := cmd.Execute(); err != nil {
// This error should only happen if there was something command
// related that malfunctioned as it will also print usage.
// Normal errors should set retErr from replCommands.
return err
fmt.Fprintf(d.Out(), "ERROR: %+v\n", err)
}
return retErr
return nil
}
func (d *Adapter[C]) replCommands(ctx Context, resp *dap.EvaluateResponse, retErr *error) *cobra.Command {
rootCmd := &cobra.Command{
SilenceErrors: true,
}
func (d *Adapter[C]) replCommands(ctx Context, t *thread, resp *dap.EvaluateResponse) *cobra.Command {
rootCmd := &cobra.Command{}
execCmd, _ := replCmd(ctx, "exec", resp, retErr, d.execCmd)
execCmd := &cobra.Command{
Use: "exec",
RunE: func(cmd *cobra.Command, args []string) error {
if !d.supportsExec {
return errors.New("cannot exec without runInTerminal client capability")
}
return t.Exec(ctx, args, resp)
},
}
rootCmd.AddCommand(execCmd)
return rootCmd
}
type execOptions struct{}
func (d *Adapter[C]) execCmd(ctx Context, _ []string, _ execOptions) (string, error) {
if !d.supportsExec {
return "", errors.New("cannot exec without runInTerminal client capability")
}
// Initialize the shell if it hasn't been done before. This will allow any
// containers that are attempting to attach to actually attach.
if err := d.sh.Init(); err != nil {
return "", err
}
// Send the request to attach to the terminal.
if err := d.sh.SendRunInTerminalRequest(ctx); err != nil {
return "", err
}
return fmt.Sprintf("Started process attached to %s.", d.sh.SocketPath), nil
}
func replCmd[Flags any, RetVal any](ctx Context, name string, resp *dap.EvaluateResponse, retErr *error, fn func(ctx Context, args []string, flags Flags) (RetVal, error)) (*cobra.Command, *Flags) {
flags := new(Flags)
return &cobra.Command{
Use: name,
Run: func(cmd *cobra.Command, args []string) {
v, err := fn(ctx, args, *flags)
if err != nil {
*retErr = err
return
}
resp.Body.Result = fmt.Sprint(v)
},
}, flags
}
func (t *thread) Exec(ctx Context, args []string) (message string, retErr error) {
if t.rCtx == nil {
return "", errors.New("no container context for exec")
}
func (t *thread) Exec(ctx Context, args []string, eresp *dap.EvaluateResponse) (retErr error) {
cfg := &build.InvokeConfig{Tty: true}
if len(cfg.Entrypoint) == 0 && len(cfg.Cmd) == 0 {
cfg.Entrypoint = []string{"/bin/sh"} // launch shell by default
@ -101,7 +75,7 @@ func (t *thread) Exec(ctx Context, args []string) (message string, retErr error)
ctr, err := build.NewContainer(ctx, t.rCtx, cfg)
if err != nil {
return "", err
return err
}
defer func() {
if retErr != nil {
@ -111,7 +85,7 @@ func (t *thread) Exec(ctx Context, args []string) (message string, retErr error)
dir, err := os.MkdirTemp("", "buildx-dap-exec")
if err != nil {
return "", err
return err
}
defer func() {
if retErr != nil {
@ -122,7 +96,7 @@ func (t *thread) Exec(ctx Context, args []string) (message string, retErr error)
socketPath := filepath.Join(dir, "s.sock")
l, err := net.Listen("unix", socketPath)
if err != nil {
return "", err
return err
}
go func() {
@ -147,11 +121,11 @@ func (t *thread) Exec(ctx Context, args []string) (message string, retErr error)
resp := ctx.Request(req)
if !resp.GetResponse().Success {
return "", errors.New(resp.GetResponse().Message)
return errors.New(resp.GetResponse().Message)
}
message = fmt.Sprintf("Started process attached to %s.", socketPath)
return message, nil
eresp.Body.Result = fmt.Sprintf("Started process attached to %s.", socketPath)
return nil
}
func (t *thread) runExec(l net.Listener, ctr *build.Container, cfg *build.InvokeConfig) {

View File

@ -2,8 +2,8 @@ package dap
import (
"context"
"path"
"path/filepath"
"slices"
"sync"
"github.com/docker/buildx/build"
@ -23,8 +23,10 @@ type thread struct {
name string
// Persistent state from the adapter.
sharedState
variables *variableReferences
idPool *idPool
sourceMap *sourceMap
breakpointMap *breakpointMap
variables *variableReferences
// Inputs to the evaluate call.
c gateway.Client
@ -38,21 +40,27 @@ type thread struct {
head digest.Digest
bps map[digest.Digest]int
frames map[int32]*frame
framesByDigest map[digest.Digest]*frame
// Runtime state for the evaluate call.
entrypoint *step
regions []*region
regionsByDigest map[digest.Digest]int
// Controls pause.
paused chan stepType
mu sync.Mutex
// Attributes set when a thread is paused.
cancel context.CancelCauseFunc // invoked when the thread is resumed
rCtx *build.ResultHandle
curPos digest.Digest
stackTrace []int32
frames map[int32]*frame
}
type region struct {
// dependsOn means this thread depends on the result of another thread.
dependsOn map[int]struct{}
// digests is a set of digests associated with this thread.
digests []digest.Digest
}
type stepType int
@ -60,210 +68,51 @@ type stepType int
const (
stepContinue stepType = iota
stepNext
stepIn
stepOut
)
func (t *thread) Evaluate(ctx Context, c gateway.Client, headRef gateway.Reference, meta map[string][]byte, inputs build.Inputs, cfg common.Config) error {
if err := t.init(ctx, c, headRef, meta, inputs); err != nil {
func (t *thread) Evaluate(ctx Context, c gateway.Client, ref gateway.Reference, meta map[string][]byte, inputs build.Inputs, cfg common.Config) error {
if err := t.init(ctx, c, ref, meta, inputs); err != nil {
return err
}
defer t.reset()
action := stepContinue
step := stepContinue
if cfg.StopOnEntry {
action = stepNext
step = stepNext
}
var (
ref gateway.Reference
next = t.entrypoint
err error
)
for next != nil {
event := t.needsDebug(next, action, err)
if event.Reason != "" {
select {
case action = <-t.pause(ctx, ref, err, next, event):
// do nothing here
case <-ctx.Done():
return context.Cause(ctx)
}
for {
if step == stepContinue {
t.setBreakpoints(ctx)
}
ref, pos, err := t.seekNext(ctx, step)
if err != nil {
event := t.needsDebug(pos, step, err)
if event.Reason == "" {
return err
}
if action == stepContinue {
t.setBreakpoints(ctx)
select {
case step = <-t.pause(ctx, ref, err, event):
if err != nil {
return err
}
case <-ctx.Done():
return context.Cause(ctx)
}
ref, next, err = t.seekNext(ctx, next, action)
}
return nil
}
func (t *thread) init(ctx Context, c gateway.Client, ref gateway.Reference, meta map[string][]byte, inputs build.Inputs) error {
t.c = c
t.ref = ref
t.meta = meta
// Combine the dockerfile directory with the context path to find the
// real base path. The frontend will report the base path as the filename.
dir := path.Dir(inputs.DockerfilePath)
if !path.IsAbs(dir) {
dir = path.Join(inputs.ContextPath, dir)
}
t.sourcePath = dir
t.sourcePath = inputs.ContextPath
if err := t.getLLBState(ctx); err != nil {
return err
}
return t.createProgram()
}
type step struct {
// dgst holds the digest that should be resolved by this step.
// If this is empty, no digest should be resolved.
dgst digest.Digest
// in holds the next target when step in is used.
in *step
// out holds the next target when step out is used.
out *step
// next holds the next target when next is used.
next *step
// frame will hold the stack frame associated with this step.
frame *frame
}
func (t *thread) createProgram() error {
t.framesByDigest = make(map[digest.Digest]*frame)
t.frames = make(map[int32]*frame)
// Create the entrypoint by using the last node.
// We will build on top of that.
head := &step{
dgst: t.head,
frame: t.getStackFrame(t.head),
}
t.entrypoint = t.createBranch(head)
return nil
}
func (t *thread) createBranch(last *step) (first *step) {
first = last
for first.dgst != "" {
prev := &step{
// set to first temporarily until we determine
// if there are other inputs.
in: first,
// always first
next: first,
// exit point always matches the one set on first
out: first.out,
// always set to the same as next which is always first
frame: t.getStackFrame(first.dgst),
}
op := t.ops[first.dgst]
if len(op.Inputs) > 0 {
parent := t.determineParent(op)
for i := len(op.Inputs) - 1; i >= 0; i-- {
if i == parent {
// Skip the direct parent.
continue
}
inp := op.Inputs[i]
// Create a pseudo-step that acts as an exit point for this
// branch. This step exists so this branch has a place to go
// after it has finished that will advance to the next
// instruction.
exit := &step{
in: prev.in,
next: prev.next,
out: prev.out,
frame: prev.frame,
}
head := &step{
dgst: digest.Digest(inp.Digest),
in: exit,
next: exit,
out: exit,
frame: t.getStackFrame(digest.Digest(inp.Digest)),
}
prev.in = t.createBranch(head)
}
// Set the digest of the parent input on the first step associated
// with this step if it exists.
if parent >= 0 {
prev.dgst = digest.Digest(op.Inputs[parent].Digest)
}
}
// New first is the step we just created.
first = prev
}
return first
}
func (t *thread) getStackFrame(dgst digest.Digest) *frame {
if f := t.framesByDigest[dgst]; f != nil {
return f
}
f := &frame{
op: t.ops[dgst],
}
f.Id = int(t.idPool.Get())
if meta, ok := t.def.Metadata[dgst]; ok {
f.setNameFromMeta(meta)
}
if loc, ok := t.def.Source.Locations[string(dgst)]; ok {
f.fillLocation(t.def, loc, t.sourcePath)
}
t.frames[int32(f.Id)] = f
return f
}
func (t *thread) determineParent(op *pb.Op) int {
// Another section should have already checked this but
// double check here just in case we forget somewhere else.
// The rest of this method assumes there's at least one parent
// at index zero.
n := len(op.Inputs)
if n == 0 {
return -1
}
switch op := op.Op.(type) {
case *pb.Op_Exec:
for _, m := range op.Exec.Mounts {
if m.Dest == "/" {
return int(m.Input)
}
}
return -1
case *pb.Op_File:
// Use the first input where the index is from one of the inputs.
for _, action := range op.File.Actions {
if input := int(action.Input); input >= 0 && input < n {
return input
}
}
// Default to having no parent.
return -1
default:
// Default to index zero.
return 0
}
return t.createRegions()
}
func (t *thread) reset() {
@ -274,25 +123,23 @@ func (t *thread) reset() {
t.ops = nil
}
func (t *thread) needsDebug(cur *step, step stepType, err error) (e dap.StoppedEventBody) {
func (t *thread) needsDebug(target digest.Digest, step stepType, err error) (e dap.StoppedEventBody) {
if err != nil {
e.Reason = "exception"
e.Description = "Encountered an error during result evaluation"
} else if cur != nil {
if step != stepContinue {
e.Reason = "step"
} else if next := cur.in; next != nil {
if id, ok := t.bps[next.dgst]; ok {
e.Reason = "breakpoint"
e.Description = "Paused on breakpoint"
e.HitBreakpointIds = []int{id}
}
} else if step == stepNext && target != "" {
e.Reason = "step"
} else if step == stepContinue {
if id, ok := t.bps[target]; ok {
e.Reason = "breakpoint"
e.Description = "Paused on breakpoint"
e.HitBreakpointIds = []int{id}
}
}
return
}
func (t *thread) pause(c Context, ref gateway.Reference, err error, pos *step, event dap.StoppedEventBody) <-chan stepType {
func (t *thread) pause(c Context, ref gateway.Reference, err error, event dap.StoppedEventBody) <-chan stepType {
t.mu.Lock()
defer t.mu.Unlock()
@ -301,6 +148,7 @@ func (t *thread) pause(c Context, ref gateway.Reference, err error, pos *step, e
}
t.paused = make(chan stepType, 1)
t.rCtx = build.NewResultHandle(c, t.c, ref, t.meta, err)
if err != nil {
var solveErr *errdefs.SolveError
if errors.As(err, &solveErr) {
@ -309,14 +157,7 @@ func (t *thread) pause(c Context, ref gateway.Reference, err error, pos *step, e
}
}
}
ctx, cancel := context.WithCancelCause(c)
t.collectStackTrace(ctx, pos, ref)
t.cancel = cancel
if ref != nil || err != nil {
t.prepareResultHandle(c, ref, err)
}
t.collectStackTrace()
event.ThreadId = t.id
c.C() <- &dap.StoppedEvent{
@ -326,27 +167,6 @@ func (t *thread) pause(c Context, ref gateway.Reference, err error, pos *step, e
return t.paused
}
func (t *thread) prepareResultHandle(c Context, ref gateway.Reference, err error) {
// Create a context for cancellations and make the cancel function
// block on the wait group.
var wg sync.WaitGroup
ctx, cancel := context.WithCancelCause(c)
t.cancel = func(cause error) {
defer wg.Wait()
cancel(cause)
}
t.rCtx = build.NewResultHandle(ctx, t.c, ref, t.meta, err)
// Start the attach. Use the context we created and perform it in
// a goroutine. We aren't necessarily assuming this will actually work.
wg.Add(1)
go func() {
defer wg.Done()
t.sh.Attach(ctx, t)
}()
}
func (t *thread) Continue() {
t.resume(stepContinue)
}
@ -355,14 +175,6 @@ func (t *thread) Next() {
t.resume(stepNext)
}
func (t *thread) StepIn() {
t.resume(stepIn)
}
func (t *thread) StepOut() {
t.resume(stepOut)
}
func (t *thread) resume(step stepType) {
t.mu.Lock()
defer t.mu.Unlock()
@ -377,6 +189,13 @@ func (t *thread) resume(step stepType) {
t.paused = nil
}
func (t *thread) isPaused() bool {
t.mu.Lock()
defer t.mu.Unlock()
return t.paused != nil
}
func (t *thread) StackTrace() []dap.StackFrame {
t.mu.Lock()
defer t.mu.Unlock()
@ -442,92 +261,233 @@ func (t *thread) setBreakpoints(ctx Context) {
t.bps = t.breakpointMap.Intersect(ctx, t.def.Source, t.sourcePath)
}
func (t *thread) seekNext(ctx Context, from *step, action stepType) (gateway.Reference, *step, error) {
func (t *thread) findBacklinks() map[digest.Digest]map[digest.Digest]struct{} {
backlinks := make(map[digest.Digest]map[digest.Digest]struct{})
for dgst := range t.ops {
backlinks[dgst] = make(map[digest.Digest]struct{})
}
for dgst, op := range t.ops {
for _, inp := range op.Inputs {
if digest.Digest(inp.Digest) == t.head {
continue
}
backlinks[digest.Digest(inp.Digest)][dgst] = struct{}{}
}
}
return backlinks
}
func (t *thread) createRegions() error {
// Find the links going from inputs to their outputs.
// This isn't represented in the LLB graph but we need it to ensure
// an op only has one child and whether we are allowed to visit a node.
backlinks := t.findBacklinks()
// Create distinct regions whenever we have any branch (inputs or outputs).
t.regions = []*region{}
t.regionsByDigest = map[digest.Digest]int{}
determineRegion := func(dgst digest.Digest, children map[digest.Digest]struct{}) {
if len(children) == 1 {
var cDgst digest.Digest
for d := range children {
cDgst = d
}
childOp := t.ops[cDgst]
if len(childOp.Inputs) == 1 {
// We have one child and our child has one input so we can be merged
// into the same region as our child.
region := t.regionsByDigest[cDgst]
t.regions[region].digests = append(t.regions[region].digests, dgst)
t.regionsByDigest[dgst] = region
return
}
}
// We will require a new region for this digest because
// we weren't able to merge it in within the existing regions.
next := len(t.regions)
t.regions = append(t.regions, &region{
digests: []digest.Digest{dgst},
dependsOn: make(map[int]struct{}),
})
t.regionsByDigest[dgst] = next
// Mark each child as depending on this new region.
for child := range children {
region := t.regionsByDigest[child]
t.regions[region].dependsOn[next] = struct{}{}
}
}
canVisit := func(dgst digest.Digest) bool {
for dgst := range backlinks[dgst] {
if _, ok := t.regionsByDigest[dgst]; !ok {
// One of our outputs has not been categorized.
return false
}
}
return true
}
unvisited := []digest.Digest{t.head}
for len(unvisited) > 0 {
dgst := pop(&unvisited)
op := t.ops[dgst]
children := backlinks[dgst]
determineRegion(dgst, children)
// Determine which inputs we can now visit.
for _, inp := range op.Inputs {
indgst := digest.Digest(inp.Digest)
if canVisit(indgst) {
unvisited = append(unvisited, indgst)
}
}
}
// Reverse each of the digests so dependencies are first.
// It is currently in reverse topological order and it needs to be in
// topological order.
for _, r := range t.regions {
slices.Reverse(r.digests)
}
t.propagateRegionDependencies()
return nil
}
// propagateRegionDependencies will propagate the dependsOn attribute between
// different regions to make dependency lookups easier. If A depends on B
// and B depends on C, then A depends on C. But the algorithm before this will only
// record direct dependencies.
func (t *thread) propagateRegionDependencies() {
for _, r := range t.regions {
for {
n := len(r.dependsOn)
for i := range r.dependsOn {
for j := range t.regions[i].dependsOn {
r.dependsOn[j] = struct{}{}
}
}
if n == len(r.dependsOn) {
break
}
}
}
}
func (t *thread) seekNext(ctx Context, step stepType) (gateway.Reference, digest.Digest, error) {
// If we're at the end, return no digest to signal that
// we should conclude debugging.
var target *step
switch action {
if t.curPos == t.head {
return nil, "", nil
}
target := t.head
switch step {
case stepNext:
target = from.next
case stepIn:
target = from.in
case stepOut:
target = from.out
target = t.nextDigest(nil)
case stepContinue:
target = t.continueDigest(from)
target = t.continueDigest()
}
if target == "" {
return nil, "", nil
}
return t.seek(ctx, target)
}
func (t *thread) seek(ctx Context, target *step) (ref gateway.Reference, result *step, err error) {
if target != nil {
if target.dgst != "" {
ref, err = t.solve(ctx, target.dgst)
if err != nil {
return ref, nil, err
}
}
func (t *thread) seek(ctx Context, target digest.Digest) (gateway.Reference, digest.Digest, error) {
ref, err := t.solve(ctx, target)
if err != nil {
return ref, "", err
}
result = target
if err = ref.Evaluate(ctx); err != nil {
var solveErr *errdefs.SolveError
if errors.As(err, &solveErr) {
if dt, err := solveErr.Op.MarshalVT(); err == nil {
t.curPos = digest.FromBytes(dt)
}
} else {
t.curPos = ""
}
} else {
ref = t.ref
t.curPos = target
}
if ref != nil {
if err = ref.Evaluate(ctx); err != nil {
// If this is not a solve error, do not return the
// reference and target step.
var solveErr *errdefs.SolveError
if errors.As(err, &solveErr) {
if dt, err := solveErr.Op.MarshalVT(); err == nil {
// Find the error digest.
errDgst := digest.FromBytes(dt)
// Iterate from the first step to find the one
// we failed on.
result = t.entrypoint
for result != nil {
next := result.in
if next != nil && next.dgst == errDgst {
break
}
result = next
}
}
} else {
return nil, nil, err
}
}
}
return ref, result, err
return ref, t.curPos, err
}
func (t *thread) continueDigest(from *step) *step {
if len(t.bps) == 0 {
return nil
}
isBreakpoint := func(dgst digest.Digest) bool {
if dgst == "" {
func (t *thread) nextDigest(fn func(digest.Digest) bool) digest.Digest {
isValid := func(dgst digest.Digest) bool {
// Skip this digest because it has no locations in the source file.
if loc, ok := t.def.Source.Locations[string(dgst)]; !ok || len(loc.Locations) == 0 {
return false
}
// If a custom function has been set for validation, use it.
return fn == nil || fn(dgst)
}
// If we have no position, automatically select the first step.
if t.curPos == "" {
r := t.regions[len(t.regions)-1]
if isValid(r.digests[0]) {
return r.digests[0]
}
// We cannot use the first position. Treat the first position as our
// current position so we can iterate.
t.curPos = r.digests[0]
}
// Look up the region associated with our current position.
// If we can't find it, just pretend we're using step continue.
region, ok := t.regionsByDigest[t.curPos]
if !ok {
return t.head
}
r := t.regions[region]
i := slices.Index(r.digests, t.curPos) + 1
for {
if i >= len(r.digests) {
if region <= 0 {
// We're at the end of our execution. Should have been caught by
// t.head == t.curPos.
return ""
}
region--
r = t.regions[region]
i = 0
continue
}
next := r.digests[i]
if !isValid(next) {
i++
continue
}
return next
}
}
func (t *thread) continueDigest() digest.Digest {
if len(t.bps) == 0 {
return t.head
}
isValid := func(dgst digest.Digest) bool {
_, ok := t.bps[dgst]
return ok
}
next := func(s *step) *step {
cur := s.in
for cur != nil {
next := cur.in
if next != nil && isBreakpoint(next.dgst) {
return cur
}
cur = next
}
return nil
}
return next(from)
return t.nextDigest(isValid)
}
func (t *thread) solve(ctx context.Context, target digest.Digest) (gateway.Reference, error) {
@ -560,26 +520,38 @@ func (t *thread) releaseState() {
t.rCtx.Done()
t.rCtx = nil
}
for _, f := range t.frames {
f.ResetVars()
}
if t.cancel != nil {
t.cancel(context.Canceled)
t.cancel = nil
}
t.stackTrace = t.stackTrace[:0]
t.variables.Reset()
t.stackTrace = nil
t.frames = nil
}
func (t *thread) collectStackTrace(ctx context.Context, pos *step, ref gateway.Reference) {
for pos != nil {
frame := pos.frame
frame.ExportVars(ctx, ref, t.variables)
func (t *thread) collectStackTrace() {
region := t.regionsByDigest[t.curPos]
r := t.regions[region]
digests := r.digests
if index := slices.Index(digests, t.curPos); index >= 0 {
digests = digests[:index+1]
}
t.frames = make(map[int32]*frame)
for i := len(digests) - 1; i >= 0; i-- {
dgst := digests[i]
frame := &frame{}
frame.Id = int(t.idPool.Get())
if meta, ok := t.def.Metadata[dgst]; ok {
frame.setNameFromMeta(meta)
}
if loc, ok := t.def.Source.Locations[string(dgst)]; ok {
frame.fillLocation(t.def, loc, t.sourcePath)
}
if op := t.ops[dgst]; op != nil {
frame.fillVarsFromOp(op, t.variables)
}
t.stackTrace = append(t.stackTrace, int32(frame.Id))
pos, ref = pos.out, nil
t.frames[int32(frame.Id)] = frame
}
}
@ -594,3 +566,9 @@ func (t *thread) hasFrame(id int) bool {
_, ok := t.frames[int32(id)]
return ok
}
func pop[S ~[]E, E any](s *S) E {
e := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return e
}

View File

@ -1,28 +1,20 @@
package dap
import (
"context"
"fmt"
"io/fs"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
"github.com/google/go-dap"
"github.com/moby/buildkit/client/llb"
gateway "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/tonistiigi/fsutil/types"
)
type frame struct {
dap.StackFrame
op *pb.Op
scopes []dap.Scope
}
@ -45,7 +37,6 @@ func (f *frame) fillLocation(def *llb.Definition, loc *pb.Locations, ws string)
info := def.Source.Infos[l.SourceIndex]
f.Source = &dap.Source{
Name: path.Base(info.Filename),
Path: filepath.Join(ws, info.Filename),
}
return
@ -53,34 +44,25 @@ func (f *frame) fillLocation(def *llb.Definition, loc *pb.Locations, ws string)
}
}
func (f *frame) ExportVars(ctx context.Context, ref gateway.Reference, refs *variableReferences) {
f.fillVarsFromOp(f.op, refs)
if ref != nil {
f.fillVarsFromResult(ctx, ref, refs)
}
}
func (f *frame) ResetVars() {
f.scopes = nil
}
func (f *frame) fillVarsFromOp(op *pb.Op, refs *variableReferences) {
f.scopes = append(f.scopes, dap.Scope{
Name: "Arguments",
PresentationHint: "arguments",
VariablesReference: refs.New(func() []dap.Variable {
var vars []dap.Variable
if op.Platform != nil {
vars = append(vars, platformVars(op.Platform, refs))
}
f.scopes = []dap.Scope{
{
Name: "Arguments",
PresentationHint: "arguments",
VariablesReference: refs.New(func() []dap.Variable {
var vars []dap.Variable
if op.Platform != nil {
vars = append(vars, platformVars(op.Platform, refs))
}
switch op := op.Op.(type) {
case *pb.Op_Exec:
vars = append(vars, execOpVars(op.Exec, refs))
}
return vars
}),
})
switch op := op.Op.(type) {
case *pb.Op_Exec:
vars = append(vars, execOpVars(op.Exec, refs))
}
return vars
}),
},
}
}
func platformVars(platform *pb.Platform, refs *variableReferences) dap.Variable {
@ -172,152 +154,7 @@ func execOpVars(exec *pb.ExecOp, refs *variableReferences) dap.Variable {
}
}
func (f *frame) fillVarsFromResult(ctx context.Context, ref gateway.Reference, refs *variableReferences) {
f.scopes = append(f.scopes, dap.Scope{
Name: "File Explorer",
PresentationHint: "locals",
VariablesReference: refs.New(func() []dap.Variable {
return fsVars(ctx, ref, "/", refs)
}),
Expensive: true,
})
}
func fsVars(ctx context.Context, ref gateway.Reference, path string, vars *variableReferences) []dap.Variable {
files, err := ref.ReadDir(ctx, gateway.ReadDirRequest{
Path: path,
})
if err != nil {
return []dap.Variable{
{
Name: "error",
Value: err.Error(),
},
}
}
paths := make([]dap.Variable, len(files))
for i, file := range files {
stat := statf(file)
fv := dap.Variable{
Name: file.Path,
}
fullpath := filepath.Join(path, file.Path)
if file.IsDir() {
fv.Name += "/"
fv.VariablesReference = vars.New(func() []dap.Variable {
dvar := dap.Variable{
Name: ".",
Value: statf(file),
VariablesReference: vars.New(func() []dap.Variable {
return statVars(file)
}),
}
return append([]dap.Variable{dvar}, fsVars(ctx, ref, fullpath, vars)...)
})
fv.Value = ""
} else {
fv.Value = stat
fv.VariablesReference = vars.New(func() (dvars []dap.Variable) {
if fs.FileMode(file.Mode).IsRegular() {
// Regular file so display a small blurb of the file.
dvars = append(dvars, fileVars(ctx, ref, fullpath)...)
}
return append(dvars, statVars(file)...)
})
}
paths[i] = fv
}
return paths
}
func statf(st *types.Stat) string {
mode := fs.FileMode(st.Mode)
modTime := time.Unix(0, st.ModTime).UTC()
return fmt.Sprintf("%s %d:%d %s", mode, st.Uid, st.Gid, modTime.Format("Jan 2 15:04:05 2006"))
}
func fileVars(ctx context.Context, ref gateway.Reference, fullpath string) []dap.Variable {
b, err := ref.ReadFile(ctx, gateway.ReadRequest{
Filename: fullpath,
Range: &gateway.FileRange{Length: 512},
})
var (
data string
dataErr error
)
if err != nil {
data = err.Error()
} else if isBinaryData(b) {
data = "binary data"
} else {
if len(b) == 512 {
// Get the remainder of the file.
remaining, err := ref.ReadFile(ctx, gateway.ReadRequest{
Filename: fullpath,
Range: &gateway.FileRange{Offset: 512},
})
if err != nil {
dataErr = err
} else {
b = append(b, remaining...)
}
}
data = string(b)
}
dvars := []dap.Variable{
{
Name: "data",
Value: data,
},
}
if dataErr != nil {
dvars = append(dvars, dap.Variable{
Name: "dataError",
Value: dataErr.Error(),
})
}
return dvars
}
func statVars(st *types.Stat) (vars []dap.Variable) {
if st.Linkname != "" {
vars = append(vars, dap.Variable{
Name: "linkname",
Value: st.Linkname,
})
}
mode := fs.FileMode(st.Mode)
modTime := time.Unix(0, st.ModTime).UTC()
vars = append(vars, []dap.Variable{
{
Name: "mode",
Value: mode.String(),
},
{
Name: "uid",
Value: strconv.FormatUint(uint64(st.Uid), 10),
},
{
Name: "gid",
Value: strconv.FormatUint(uint64(st.Gid), 10),
},
{
Name: "mtime",
Value: modTime.Format("Jan 2 15:04:05 2006"),
},
}...)
return vars
}
func (f *frame) Scopes() []dap.Scope {
if f.scopes == nil {
return []dap.Scope{}
}
return f.scopes
}
@ -368,34 +205,6 @@ func (v *variableReferences) Reset() {
v.nextID.Store(0)
}
// isBinaryData uses heuristics to determine if the file
// is binary. Algorithm taken from this blog post:
// https://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/
func isBinaryData(b []byte) bool {
odd := 0
for i := 0; i < len(b); i++ {
c := b[i]
if c == 0 {
return true
}
isHighBit := c&128 > 0
if !isHighBit {
if c < 32 && c != '\n' && c != '\t' {
odd++
}
} else {
r, sz := utf8.DecodeRune(b)
if r != utf8.RuneError && sz > 1 {
i += sz - 1
continue
}
odd++
}
}
return float64(odd)/float64(len(b)) > .3
}
func brief(s string) string {
if len(s) >= 64 {
return s[:60] + " ..."

View File

@ -43,7 +43,6 @@ title: Bake standard library functions
| `greaterthan` | Returns true if and only if the second number is greater than the first. |
| `greaterthanorequalto` | Returns true if and only if the second number is greater than or equal to the first. |
| `hasindex` | Returns true if if the given collection can be indexed with the given key without producing an error, or false otherwise. |
| `homedir` | Returns the current user's home directory. |
| `indent` | Adds a given number of spaces after each newline character in the given string. |
| `index` | Returns the element with the given key from the given collection, or raises an error if there is no such element. |
| `indexof` | Finds the element index for a given value in a list. |

View File

@ -11,38 +11,72 @@ Many [popular editors](https://microsoft.github.io/debug-adapter-protocol/implem
- Pause on exception.
- Set breakpoints on instructions.
- Step next and continue.
- Open terminal in an intermediate container image.
- File explorer.
## Limitations
- The debugger cannot differentiate between identical `FROM` directives.
- **Step In** is the same as **Next**.
- **Step Out** is the same as **Continue**.
- **FROM** directives may have unintuitive breakpoint lines.
- Stack traces may not show the full sequence of events.
- Invalid `args` in launch request may not produce an error in the UI.
- Does not support arbitrary pausing.
- Output is always the plain text printer.
- File explorer does not work when pausing on an exception.
## Future Improvements
- Support for Bake.
- Open terminal in an intermediate container image.
- Backwards stepping.
- Better UI for errors with invalid arguments.
## We would like feedback on
- Stack traces.
- Step/pause locations.
- Variable inspections.
- Additional information that would be helpful while debugging.
- Annoyances or difficulties with the current implementation.
### Stack Traces
We would like feedback on whether the stack traces are easy to read and useful for debugging.
The goal was to include the parent commands inside of a stack trace to make it easier to understand the previous commands used to reach the current step. Stack traces in normal programming languages will only have one parent (the calling function).
In a Dockerfile, there are no functions which makes displaying a call stack not useful. Instead, we decided to show the input to the step as the "calling function" to make it easier to see the preceding steps.
This method of showing a stack trace is not always clear. When a step has multiple parents, such as a `COPY --from` or a `RUN` with a bind mount, there are multiple parents. Only one can be the official "parent" in the stack trace. At the moment, we do not try to choose one and will break the stack trace into two separate call stacks. This is also the case when one step is used as the parent for multiple steps.
### Step/pause Locations
Execution is paused **before** the step has been executed. Due to the way Dockerfiles are written, this sometimes creates
some unclear visuals regarding where the pause happened.
Execution is paused **after** the step has been executed rather than before.
For the last command in a stage, step **next** will highlight the same instruction twice. One of these is before the execution and the second is after. For every other command, they are only highlighted before the command is executed. It is not currently possible to set a breakpoint at the end of a stage. You must set the breakpoint on the last step and then use step **next**.
For example:
When a command has multiple parents, step **into** will step into one of the parents. Step **out** will then return from that stage. This will continue until there are no additional parents. There is currently no way to tell the difference between which parents have executed and which ones have not.
```dockerfile
FROM busybox
RUN echo hello > /hello
```
If you set a breakpoint on line 2, then execution will pause **after** the `RUN` has executed rather than before.
We thought this method would be more useful because we figured it was more common to want to inspect the state after a step rather than before the step.
There are also Dockerfiles where some instructions are aliases for another instruction and don't have their own representation in the Dockerfile.
```dockerfile
FROM golang:1.24 AS golang-base
# Does not show up as a breakpoint since it refers to the instruction
# from earlier.
FROM golang-base
RUN go build ...
```
### Step into/out
It is required to implement these for a debug adapter but we haven't determined a way that these map to Dockerfile execution. Feedback about how you would expect these to work would be helpful for future development.
For now, step into is implemented the same as next while step out is implemented the same as continue. The logic here is that next step is always going into the next call and stepping out would be returning from the current function which is the same as building the final step.
### Variable Inspections

View File

@ -368,7 +368,6 @@ You can override the following fields:
* `args`
* `cache-from`
* `cache-to`
* `call`
* `context`
* `dockerfile`
* `entitlements`

View File

@ -75,15 +75,13 @@ The following [launch request arguments](https://microsoft.github.io/debug-adapt
Command line arguments may be passed to the debug adapter the same way they would be passed to the normal build command and they will set the value.
Launch request arguments that are set will override command line arguments if they are present.
A debug extension should include an `args` and `builder` entry in the launch configuration. These will modify the arguments passed to the binary for the tool invocation.
`builder` will add `--builder <arg>` directly after the executable and `args` will append to the end of the tool invocation.
A debug extension should include an `args` entry in the launch configuration and should append these arguments to the end of the tool invocation.
For example, a launch configuration in Visual Studio Code with the following:
```json
{
"args": ["--build-arg", "FOO=AAA"]
"builder": ["mybuilder"]
}
```
This should cause the debug adapter to be invoked as `docker buildx --builder mybuilder dap build --build-arg FOO=AAA`.
This should cause the debug adapter to be invoked as `docker buildx dap build --build-arg FOO=AAA`.

View File

@ -13,9 +13,8 @@ Disk usage
|:------------------------|:---------|:--------|:-----------------------------------------|
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`--filter`](#filter) | `filter` | | Provide filter values |
| [`--format`](#format) | `string` | | Format the output |
| [`--verbose`](#verbose) | `bool` | | Shorthand for `--format=pretty` |
| `--filter` | `filter` | | Provide filter values |
| [`--verbose`](#verbose) | `bool` | | Provide a more verbose output |
<!---MARKER_GEN_END-->
@ -51,7 +50,7 @@ If `RECLAIMABLE` is false, the `docker buildx du prune` command won't delete
the record, even if you use `--all`. That's because the record is actively in
use by some component of the builder.
The asterisks (\*) in the default output format indicate the following:
The asterisks (\*) in the default output indicate the following:
- An asterisk next to an ID (`zu7m6evdpebh5h8kfkpw9dlf2*`) indicates that the record
is mutable. The size of the record may change, or another build can take ownership of
@ -62,156 +61,33 @@ The asterisks (\*) in the default output format indicate the following:
If you prune such a record then you will lose build cache but only metadata
will be deleted as the image still needs to actual storage layers.
### <a name="filter"></a> Provide filter values (--filter)
Same as [`buildx prune --filter`](buildx_prune.md#filter).
### <a name="format"></a> Format the output (--format)
The formatting options (`--format`) pretty-prints usage information output
using a Go template.
Valid placeholders for the Go template are:
* `.ID`
* `.Parents`
* `.CreatedAt`
* `.Mutable`
* `.Reclaimable`
* `.Shared`
* `.Size`
* `.Description`
* `.UsageCount`
* `.LastUsedAt`
* `.Type`
When using the `--format` option, the `du` command will either output the data
exactly as the template declares or, when using the `table` directive, includes
column headers as well.
The `pretty` format is useful for inspecting the disk usage records in more
detail. It shows the mutable and shared states more clearly, as well as
additional information about the corresponding layer:
```console
$ docker buildx du --format=pretty
...
ID: 6wqu0v6hjdwvhh8yjozrepaof
Parents:
- bqx15bcewecz4wcg14b7iodvp
Created at: 2025-06-12 15:44:02.715795569 +0000 UTC
Mutable: false
Reclaimable: true
Shared: true
Size: 1.653GB
Description: [build-base 4/4] COPY . .
Usage count: 1
Last used: 2 months ago
Type: regular
Shared: 35.57GB
Private: 97.94GB
Reclaimable: 131.5GB
Total: 133.5GB
```
The following example uses a template without headers and outputs the
`ID` and `Size` entries separated by a colon (`:`):
```console
$ docker buildx du --format "{{.ID}}: {{.Size}}"
6wqu0v6hjdwvhh8yjozrepaof: 1.653GB
4m8061kctvjyh9qleus8rgpgx: 1.723GB
fcm9mlz2641u8r5eicjqdhy1l: 1.841GB
z2qu1swvo3afzd9mhihi3l5k0: 1.873GB
nmi6asc00aa3ja6xnt6o7wbrr: 2.027GB
0qlam41jxqsq6i27yqllgxed3: 2.495GB
3w9qhzzskq5jc262snfu90bfz: 2.617GB
```
The following example uses a `table` template and outputs the `ID` and
`Description`:
```console
$ docker buildx du --format "table {{.ID}} {{.Description}}"
ID DESCRIPTION
03bbhchaib8cygqs68um6hfnl [binaries-linux 2/5] LINK COPY --link --from=binfmt-filter /out/ /
2h8un0tyg57oj64xvbas6mzea [cni-plugins-export 2/4] LINK COPY --link --from=cni-plugins /opt/cni/bin/loopback /buildkit-cni-loopback
evckox33t07ob9dmollhn4h4j [cni-plugins-export 3/4] LINK COPY --link --from=cni-plugins /opt/cni/bin/host-local /buildkit-cni-host-local
jlxzwcw6xaomxj8irerow9bhb [binaries-linux 4/5] LINK COPY --link --from=buildctl /usr/bin/buildctl /
ov2oetgebkhpsw39rv1sbh5w1 [buildkit-linux 1/1] LINK COPY --link --from=binaries / /usr/bin/
ruoczhyq25n5v9ld7n231zalx [binaries-linux 3/5] LINK COPY --link --from=cni-plugins-export-squashed / /
ax7cov6kizxi9ufvcwsef4occ* local source for context
```
JSON output is also supported and will print as newline delimited JSON:
```console
$ docker buildx du --format=json
{"CreatedAt":"2025-07-29T12:36:01Z","Description":"pulled from docker.io/library/rust:1.85.1-bookworm@sha256:e51d0265072d2d9d5d320f6a44dde6b9ef13653b035098febd68cce8fa7c0bc4","ID":"ic1gfidvev5nciupzz53alel4","LastUsedAt":"2025-07-29T12:36:01Z","Mutable":false,"Parents":["hmpdhm4sjrfpmae4xm2y3m0ra"],"Reclaimable":true,"Shared":false,"Size":"829889526","Type":"regular","UsageCount":1}
{"CreatedAt":"2025-08-05T09:24:09Z","Description":"pulled from docker.io/library/node:22@sha256:3218f0d1b9e4b63def322e9ae362d581fbeac1ef21b51fc502ef91386667ce92","ID":"jsw7fx09l5zsda3bri1z4mwk5","LastUsedAt":"2025-08-05T09:24:09Z","Mutable":false,"Parents":["098jsj5ebbv1w47ikqigeuurs"],"Reclaimable":true,"Shared":true,"Size":"829898832","Type":"regular","UsageCount":1}
```
You can use `jq` to pretty-print the JSON output:
```console
$ docker buildx du --format=json | jq .
{
"CreatedAt": "2025-07-29T12:36:01Z",
"Description": "pulled from docker.io/library/rust:1.85.1-bookworm@sha256:e51d0265072d2d9d5d320f6a44dde6b9ef13653b035098febd68cce8fa7c0bc4",
"ID": "ic1gfidvev5nciupzz53alel4",
"LastUsedAt": "2025-07-29T12:36:01Z",
"Mutable": false,
"Parents": [
"hmpdhm4sjrfpmae4xm2y3m0ra"
],
"Reclaimable": true,
"Shared": false,
"Size": "829889526",
"Type": "regular",
"UsageCount": 1
}
{
"CreatedAt": "2025-08-05T09:24:09Z",
"Description": "pulled from docker.io/library/node:22@sha256:3218f0d1b9e4b63def322e9ae362d581fbeac1ef21b51fc502ef91386667ce92",
"ID": "jsw7fx09l5zsda3bri1z4mwk5",
"LastUsedAt": "2025-08-05T09:24:09Z",
"Mutable": false,
"Parents": [
"098jsj5ebbv1w47ikqigeuurs"
],
"Reclaimable": true,
"Shared": true,
"Size": "829898832",
"Type": "regular",
"UsageCount": 1
}
```
### <a name="verbose"></a> Use verbose output (--verbose)
Shorthand for [`--format=pretty`](#format):
The verbose output of the `docker buildx du` command is useful for inspecting
the disk usage records in more detail. The verbose output shows the mutable and
shared states more clearly, as well as additional information about the
corresponding layer.
```console
$ docker buildx du --verbose
...
ID: 6wqu0v6hjdwvhh8yjozrepaof
Parents:
- bqx15bcewecz4wcg14b7iodvp
Created at: 2025-06-12 15:44:02.715795569 +0000 UTC
Mutable: false
Reclaimable: true
Shared: true
Size: 1.653GB
Description: [build-base 4/4] COPY . .
Usage count: 1
Last used: 2 months ago
Type: regular
Last used: 2 days ago
Type: regular
Shared: 35.57GB
Private: 97.94GB
Reclaimable: 131.5GB
Total: 133.5GB
ID: 05d0elirb4mmvpmnzbrp3ssrg
Parent: e8sfdn4mygrg7msi9ak1dy6op
Created at: 2023-11-20 09:53:30.881558721 +0000 UTC
Mutable: false
Reclaimable: true
Shared: false
Size: 0B
Description: [gobase 3/3] WORKDIR /src
Usage count: 3
Last used: 24 hours ago
Type: regular
Reclaimable: 4.453GB
Total: 4.453GB
```
### <a name="builder"></a> Override the configured builder instance (--builder)
@ -219,7 +95,7 @@ Total: 133.5GB
Use the `--builder` flag to inspect the disk usage of a particular builder.
```console
$ docker buildx du --builder mybuilder
$ docker buildx du --builder youthful_shtern
ID RECLAIMABLE SIZE LAST ACCESSED
g41agepgdczekxg2mtw0dujsv* true 1.312GB 47 hours ago
e6ycrsa0bn9akigqgzu0sc6kr true 318MB 47 hours ago

View File

@ -9,17 +9,17 @@ Remove build cache
### Options
| Name | Type | Default | Description |
|:--------------------------------------|:---------|:--------|:-------------------------------------------------------|
| [`-a`](#all), [`--all`](#all) | `bool` | | Include internal/frontend images |
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`--filter`](#filter) | `filter` | | Provide filter values |
| `-f`, `--force` | `bool` | | Do not prompt for confirmation |
| [`--max-used-space`](#max-used-space) | `bytes` | `0` | Maximum amount of disk space allowed to keep for cache |
| [`--min-free-space`](#min-free-space) | `bytes` | `0` | Target amount of free disk space after pruning |
| [`--reserved-space`](#reserved-space) | `bytes` | `0` | Amount of disk space always allowed to keep for cache |
| `--verbose` | `bool` | | Provide a more verbose output |
| Name | Type | Default | Description |
|:------------------------|:---------|:--------|:-------------------------------------------------------|
| `-a`, `--all` | `bool` | | Include internal/frontend images |
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
| `-f`, `--force` | `bool` | | Do not prompt for confirmation |
| `--max-used-space` | `bytes` | `0` | Maximum amount of disk space allowed to keep for cache |
| `--min-free-space` | `bytes` | `0` | Target amount of free disk space after pruning |
| `--reserved-space` | `bytes` | `0` | Amount of disk space always allowed to keep for cache |
| `--verbose` | `bool` | | Provide a more verbose output |
<!---MARKER_GEN_END-->
@ -28,89 +28,24 @@ Remove build cache
Clears the build cache of the selected builder.
You can finely control what cache data is kept using:
- The `--filter=until=<duration>` flag to keep images that have been used in
the last `<duration>` time.
`<duration>` is a duration string, e.g. `24h` or `2h30m`, with allowable
units of `(h)ours`, `(m)inutes` and `(s)econds`.
- The `--keep-storage=<size>` flag to keep `<size>` bytes of data in the cache.
`<size>` is a human-readable memory string, e.g. `128mb`, `2gb`, etc. Units
are case-insensitive.
- The `--all` flag to allow clearing internal helper images and frontend images
set using the `#syntax=` directive or the `BUILDKIT_SYNTAX` build argument.
## Examples
### <a name="all"></a> Include internal/frontend images (--all)
The `--all` flag to allow clearing internal helper images and frontend images
set using the `#syntax=` directive or the `BUILDKIT_SYNTAX` build argument.
### <a name="filter"></a> Provide filter values (--filter)
You can finely control which cache records to delete using the `--filter` flag.
The filter format is in the form of `<key><op><value>`, known as selectors. All
selectors must match the target object for the filter to be true. We define the
operators `=` for equality, `!=` for not equal and `~=` for a regular
expression.
Valid filter keys are:
- `until` flag to keep records that have been used in the last duration time.
Value is a duration string, e.g. `24h` or `2h30m`, with allowable units of
`(h)ours`, `(m)inutes` and `(s)econds`.
- `id` flag to target a specific image ID.
- `parents` flag to target records that are parents of the
specified image ID. Multiple parent IDs are separated by a semicolon (`;`).
- `description` flag to target records whose description is the specified
substring.
- `inuse` flag to target records that are actively in use and therefore not
reclaimable.
- `mutable` flag to target records that are mutable.
- `immutable` flag to target records that are immutable.
- `shared` flag to target records that are shared with other resources,
typically images.
- `private` flag to target records that are not shared.
- `type` flag to target records by type. Valid types are:
- `internal`
- `frontend`
- `source.local`
- `source.git.checkout`
- `exec.cachemount`
- `regular`
Examples:
```console
docker buildx prune --filter "until=24h"
docker buildx prune --filter "description~=golang"
docker buildx prune --filter "parents=dpetmoi6n0yqanxjqrbnofz9n;kgoj0q6g57i35gdyrv546alz7"
docker buildx prune --filter "type=source.local"
docker buildx prune --filter "type!=exec.cachemount"
```
> [!NOTE]
> Multiple `--filter` flags are ANDed together.
### <a name="max-used-space"></a> Maximum amount of disk space allowed to keep for cache (--max-used-space)
The `--max-used-space` flag allows setting a maximum amount of disk space
that the build cache can use. If the cache is using more disk space than this
value, the least recently used cache records are deleted until the total
used space is less than or equal to the specified value.
The value is specified in bytes. You can use a human-readable memory string,
e.g. `128mb`, `2gb`, etc. Units are case-insensitive.
### <a name="min-free-space"></a> Target amount of free disk space after pruning (--min-free-space)
The `--min-free-space` flag allows setting a target amount of free disk space
that should be available after pruning. If the available disk space is less
than this value, the least recently used cache records are deleted until
the available free space is greater than or equal to the specified value.
The value is specified in bytes. You can use a human-readable memory string,
e.g. `128mb`, `2gb`, etc. Units are case-insensitive.
### <a name="reserved-space"></a> Amount of disk space always allowed to keep for cache (--reserved-space)
The `--reserved-space` flag allows setting an amount of disk space that
should always be kept for the build cache. If the available disk space is less
than this value, the least recently used cache records are deleted until
the available free space is greater than or equal to the specified value.
The value is specified in bytes. You can use a human-readable memory string,
e.g. `128mb`, `2gb`, etc. Units are case-insensitive.
### <a name="builder"></a> Override the configured builder instance (--builder)
Same as [`buildx --builder`](buildx.md#builder).

View File

@ -18,7 +18,6 @@ import (
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
@ -126,38 +125,15 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
hc := &container.HostConfig{
Privileged: true,
RestartPolicy: d.restartPolicy,
Init: &useInit,
}
mounts := []mount.Mount{
{
Type: mount.TypeVolume,
Source: d.Name + volumeStateSuffix,
Target: confutil.DefaultBuildKitStateDir,
Mounts: []mount.Mount{
{
Type: mount.TypeVolume,
Source: d.Name + volumeStateSuffix,
Target: confutil.DefaultBuildKitStateDir,
},
},
Init: &useInit,
}
// Mount WSL libaries if running in WSL environment and Docker context
// is a local socket as requesting GPU on container builder creation
// is not enough when generating the CDI specification for GPU devices.
// https://github.com/docker/buildx/pull/3320
if os.Getenv("WSL_DISTRO_NAME") != "" {
if cm, err := d.ContextStore.GetMetadata(d.DockerContext); err == nil {
if epm, err := docker.EndpointFromContext(cm); err == nil && isSocket(epm.Host) {
wslLibPath := "/usr/lib/wsl"
if st, err := os.Stat(wslLibPath); err == nil && st.IsDir() {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: wslLibPath,
Target: wslLibPath,
ReadOnly: true,
})
}
}
}
}
hc.Mounts = mounts
if d.netMode != "" {
hc.NetworkMode = container.NetworkMode(d.netMode)
}
@ -555,12 +531,3 @@ func getBuildkitFlags(initConfig driver.InitConfig) []string {
}
return flags
}
func isSocket(addr string) bool {
switch proto, _, _ := strings.Cut(addr, "://"); proto {
case "unix", "npipe", "fd":
return true
default:
return false
}
}

View File

@ -3,14 +3,13 @@ package context
import (
"net/url"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context"
"github.com/docker/cli/cli/context/store"
"github.com/docker/docker/pkg/homedir"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@ -100,7 +99,7 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
func (c *EndpointMeta) ResolveDefault() (any, *store.EndpointTLSData, error) {
kubeconfig := os.Getenv("KUBECONFIG")
if kubeconfig == "" {
kubeconfig = filepath.Join(getHomeDir(), ".kube/config")
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
}
kubeEP, err := FromKubeConfig(kubeconfig, "", "")
if err != nil {
@ -157,7 +156,7 @@ func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
if config := os.Getenv("KUBECONFIG"); config != "" {
kubeConfig = config
} else {
kubeConfig = filepath.Join(getHomeDir(), ".kube/config")
kubeConfig = filepath.Join(homedir.Get(), ".kube/config")
}
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
@ -182,28 +181,3 @@ func ConfigFromEndpoint(endpointName string, s store.Reader) (clientcmd.ClientCo
}
return ConfigFromContext(endpointName, s)
}
// getHomeDir returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
//
// On non-Windows platforms, it falls back to nss lookups, if the home
// directory cannot be obtained from environment-variables.
//
// If linking statically with cgo enabled against glibc, ensure the
// osusergo build tag is used.
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
//
// It's a local fork of [pkg/homedir].
//
// [pkg/homedir]: https://github.com/moby/moby/blob/v28.3.2/pkg/homedir/homedir.go#L9-L28
func getHomeDir() string {
home, _ := os.UserHomeDir()
if home == "" && runtime.GOOS != "windows" {
if u, err := user.Current(); err == nil {
return u.HomeDir
}
}
return home
}

View File

@ -176,36 +176,38 @@ func (f *factory) processDriverOpts(deploymentName string, namespace string, cfg
defaultLoad := false
timeout := defaultTimeout
deploymentOpt.Qemu.Image = bkimage.QemuImage
loadbalance := LoadbalanceSticky
var err error
for k, v := range cfg.DriverOpts {
switch {
case k == "image":
switch k {
case "image":
if v != "" {
deploymentOpt.Image = v
}
case k == "namespace":
case "namespace":
namespace = v
case k == "replicas":
case "replicas":
deploymentOpt.Replicas, err = strconv.Atoi(v)
if err != nil {
return nil, "", "", false, 0, err
}
case k == "requests.cpu":
case "requests.cpu":
deploymentOpt.RequestsCPU = v
case k == "requests.memory":
case "requests.memory":
deploymentOpt.RequestsMemory = v
case k == "requests.ephemeral-storage":
case "requests.ephemeral-storage":
deploymentOpt.RequestsEphemeralStorage = v
case k == "limits.cpu":
case "limits.cpu":
deploymentOpt.LimitsCPU = v
case k == "limits.memory":
case "limits.memory":
deploymentOpt.LimitsMemory = v
case k == "limits.ephemeral-storage":
case "limits.ephemeral-storage":
deploymentOpt.LimitsEphemeralStorage = v
case k == "rootless":
case "rootless":
deploymentOpt.Rootless, err = strconv.ParseBool(v)
if err != nil {
return nil, "", "", false, 0, err
@ -213,26 +215,26 @@ func (f *factory) processDriverOpts(deploymentName string, namespace string, cfg
if _, isImage := cfg.DriverOpts["image"]; !isImage {
deploymentOpt.Image = bkimage.DefaultRootlessImage
}
case k == "schedulername":
case "schedulername":
deploymentOpt.SchedulerName = v
case k == "serviceaccount":
case "serviceaccount":
deploymentOpt.ServiceAccountName = v
case k == "nodeselector":
case "nodeselector":
deploymentOpt.NodeSelector, err = splitMultiValues(v, ",", "=")
if err != nil {
return nil, "", "", false, 0, errors.Wrap(err, "cannot parse node selector")
}
case k == "annotations":
case "annotations":
deploymentOpt.CustomAnnotations, err = splitMultiValues(v, ",", "=")
if err != nil {
return nil, "", "", false, 0, errors.Wrap(err, "cannot parse annotations")
}
case k == "labels":
case "labels":
deploymentOpt.CustomLabels, err = splitMultiValues(v, ",", "=")
if err != nil {
return nil, "", "", false, 0, errors.Wrap(err, "cannot parse labels")
}
case k == "tolerations":
case "tolerations":
ts := strings.Split(v, ";")
deploymentOpt.Tolerations = []corev1.Toleration{}
for i := range ts {
@ -267,46 +269,42 @@ func (f *factory) processDriverOpts(deploymentName string, namespace string, cfg
deploymentOpt.Tolerations = append(deploymentOpt.Tolerations, t)
}
case k == "loadbalance":
case "loadbalance":
switch v {
case LoadbalanceSticky, LoadbalanceRandom:
loadbalance = v
case LoadbalanceSticky:
case LoadbalanceRandom:
default:
return nil, "", "", false, 0, errors.Errorf("invalid loadbalance %q", v)
}
case k == "qemu.install":
loadbalance = v
case "qemu.install":
deploymentOpt.Qemu.Install, err = strconv.ParseBool(v)
if err != nil {
return nil, "", "", false, 0, err
}
case k == "qemu.image":
case "qemu.image":
if v != "" {
deploymentOpt.Qemu.Image = v
}
case k == "buildkit-root-volume-memory":
case "buildkit-root-volume-memory":
if v != "" {
deploymentOpt.BuildKitRootVolumeMemory = v
}
case k == "default-load":
case "default-load":
defaultLoad, err = strconv.ParseBool(v)
if err != nil {
return nil, "", "", false, 0, err
}
case k == "timeout":
case "timeout":
timeout, err = time.ParseDuration(v)
if err != nil {
return nil, "", "", false, 0, errors.Wrap(err, "cannot parse timeout")
}
case strings.HasPrefix(k, "env."):
envName := strings.TrimPrefix(k, "env.")
if envName == "" {
return nil, "", "", false, 0, errors.Errorf("invalid env option %q, expecting env.FOO=bar", k)
}
deploymentOpt.Env = append(deploymentOpt.Env, corev1.EnvVar{Name: envName, Value: v})
default:
return nil, "", "", false, 0, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
}
}
return deploymentOpt, loadbalance, namespace, defaultLoad, timeout, nil
}

View File

@ -45,7 +45,6 @@ type DeploymentOpt struct {
LimitsMemory string
LimitsEphemeralStorage string
Platforms []ocispecs.Platform
Env []corev1.EnvVar // injected into main buildkitd container
}
const (
@ -271,10 +270,6 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
})
}
if len(opt.Env) > 0 {
d.Spec.Template.Spec.Containers[0].Env = append(d.Spec.Template.Spec.Containers[0].Env, opt.Env...)
}
return
}

View File

@ -30,7 +30,6 @@ type InitConfig struct {
Name string
EndpointAddr string
DockerAPI dockerclient.APIClient
DockerContext string
ContextStore store.Reader
BuildkitdFlags []string
Files map[string][]byte

24
go.mod
View File

@ -6,9 +6,9 @@ require (
github.com/Masterminds/semver/v3 v3.4.0
github.com/Microsoft/go-winio v0.6.2
github.com/aws/aws-sdk-go-v2/config v1.27.27
github.com/compose-spec/compose-go/v2 v2.8.1
github.com/compose-spec/compose-go/v2 v2.7.2-0.20250703132301-891fce532a51 // main
github.com/containerd/console v1.0.5
github.com/containerd/containerd/v2 v2.1.4
github.com/containerd/containerd/v2 v2.1.3
github.com/containerd/continuity v0.4.5
github.com/containerd/errdefs v1.0.0
github.com/containerd/log v0.1.0
@ -16,9 +16,9 @@ require (
github.com/creack/pty v1.1.24
github.com/davecgh/go-spew v1.1.1
github.com/distribution/reference v0.6.0
github.com/docker/cli v28.4.0+incompatible
github.com/docker/cli v28.3.2+incompatible
github.com/docker/cli-docs-tool v0.10.0
github.com/docker/docker v28.4.0+incompatible
github.com/docker/docker v28.3.2+incompatible
github.com/docker/go-units v0.5.0
github.com/gofrs/flock v0.12.1
github.com/google/go-dap v0.12.0
@ -29,7 +29,7 @@ require (
github.com/hashicorp/hcl/v2 v2.23.0
github.com/in-toto/in-toto-golang v0.9.0
github.com/mitchellh/hashstructure/v2 v2.0.2
github.com/moby/buildkit v0.24.0
github.com/moby/buildkit v0.23.0-rc1.0.20250618182037-9b91d20367db // master
github.com/moby/go-archive v0.1.0
github.com/moby/sys/atomicwriter v0.1.0
github.com/moby/sys/mountinfo v0.7.2
@ -43,8 +43,8 @@ require (
github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.7
github.com/stretchr/testify v1.11.0
github.com/spf13/pflag v1.0.6
github.com/stretchr/testify v1.10.0
github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f
github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0
github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250408171107-3dd17559e117
@ -55,9 +55,8 @@ require (
go.opentelemetry.io/otel/metric v1.35.0
go.opentelemetry.io/otel/sdk v1.35.0
go.opentelemetry.io/otel/trace v1.35.0
go.yaml.in/yaml/v3 v3.0.4
golang.org/x/mod v0.24.0
golang.org/x/sync v0.16.0
golang.org/x/sync v0.14.0
golang.org/x/sys v0.33.0
golang.org/x/term v0.31.0
golang.org/x/text v0.24.0
@ -65,6 +64,7 @@ require (
google.golang.org/grpc v1.72.2
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
google.golang.org/protobuf v1.36.6
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/client-go v0.32.3
@ -92,7 +92,7 @@ require (
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/ttrpc v1.2.7 // indirect
github.com/containerd/typeurl/v2 v2.2.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/docker/go-connections v0.5.0 // indirect
@ -166,7 +166,6 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
@ -183,6 +182,3 @@ exclude (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
)
// restore junctions to have os.ModeSymlink flag set on Windows: https://github.com/docker/buildx/issues/3221
godebug winsymlink=0

36
go.sum
View File

@ -62,16 +62,16 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.8.1 h1:27O4dzyhiS/UEUKp1zHOHCBWD1WbxGsYGMNNaSejTk4=
github.com/compose-spec/compose-go/v2 v2.8.1/go.mod h1:veko/VB7URrg/tKz3vmIAQDaz+CGiXH8vZsW79NmAww=
github.com/compose-spec/compose-go/v2 v2.7.2-0.20250703132301-891fce532a51 h1:AjI75N9METifYMZK7eNt8XIgY9Sryv+1w3XDA7X2vZQ=
github.com/compose-spec/compose-go/v2 v2.7.2-0.20250703132301-891fce532a51/go.mod h1:Zow/3eYNOnl2T4qLGZEizf8d/ht1qfy09G7WGOSzGOY=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0=
github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI=
github.com/containerd/containerd/v2 v2.1.4 h1:/hXWjiSFd6ftrBOBGfAZ6T30LJcx1dBjdKEeI8xucKQ=
github.com/containerd/containerd/v2 v2.1.4/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM=
github.com/containerd/containerd/v2 v2.1.3 h1:eMD2SLcIQPdMlnlNF6fatlrlRLAeDaiGPGwmRKLZKNs=
github.com/containerd/containerd/v2 v2.1.3/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM=
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
@ -95,9 +95,8 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
@ -109,15 +108,15 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY=
github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY=
github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.10.0 h1:bOD6mKynPQgojQi3s2jgcUWGp/Ebqy1SeCr9VfKQLLU=
github.com/docker/cli-docs-tool v0.10.0/go.mod h1:5EM5zPnT2E7yCLERZmrDA234Vwn09fzRHP4aX1qwp1U=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v28.3.2+incompatible h1:wn66NJ6pWB1vBZIilP8G3qQPqHy5XymfYn5vsqeA5oA=
github.com/docker/docker v28.3.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@ -255,8 +254,8 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/buildkit v0.24.0 h1:qYfTl7W1SIJzWDIDCcPT8FboHIZCYfi++wvySi3eyFE=
github.com/moby/buildkit v0.24.0/go.mod h1:4qovICAdR2H4C7+EGMRva5zgHW1gyhT4/flHI7F5F9k=
github.com/moby/buildkit v0.23.0-rc1.0.20250618182037-9b91d20367db h1:ZzrDuG9G1A/RwJvuogNplxCEKsIUQh1CqEnqbOGFgKE=
github.com/moby/buildkit v0.23.0-rc1.0.20250618182037-9b91d20367db/go.mod h1:v5jMDvQgUyidk3wu3NvVAAd5JJo83nfet9Gf/o0+EAQ=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
@ -362,9 +361,8 @@ github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -379,8 +377,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8=
github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 h1:r0p7fK56l8WPequOaR3i9LBqfPtEdXIQbUTzT55iqT4=
@ -443,8 +441,6 @@ go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -470,8 +466,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@ -72,9 +72,9 @@ var bakeTests = []func(t *testing.T, sb integration.Sandbox){
testBakeMetadataWarningsDedup,
testBakeMultiExporters,
testBakeLoadPush,
testBakeListTargets,
testBakeListVariables,
testBakeListTypedVariables,
testListTargets,
testListVariables,
testListTypedVariables,
testBakeCallCheck,
testBakeCallCheckFlag,
testBakeCallMetadata,
@ -1691,7 +1691,7 @@ target "default" {
// TODO: test metadata file when supported by multi exporters https://github.com/docker/buildx/issues/2181
}
func testBakeListTargets(t *testing.T, sb integration.Sandbox) {
func testListTargets(t *testing.T, sb integration.Sandbox) {
bakefile := []byte(`
target "foo" {
description = "This builds foo"
@ -1714,7 +1714,7 @@ target "abc" {
require.Equal(t, "TARGET\tDESCRIPTION\nabc\t\nfoo\tThis builds foo", strings.TrimSpace(out))
}
func testBakeListVariables(t *testing.T, sb integration.Sandbox) {
func testListVariables(t *testing.T, sb integration.Sandbox) {
bakefile := []byte(`
variable "foo" {
default = "bar"
@ -1743,7 +1743,7 @@ target "default" {
require.Equal(t, "VARIABLE\tTYPE\tVALUE\tDESCRIPTION\nabc\t\t\t<null>\t\ndef\t\t\t\t\nfoo\t\t\tbar\tThis is foo", strings.TrimSpace(out))
}
func testBakeListTypedVariables(t *testing.T, sb integration.Sandbox) {
func testListTypedVariables(t *testing.T, sb integration.Sandbox) {
bakefile := []byte(`
variable "abc" {
type = string

View File

@ -76,9 +76,8 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){
testBuildSecret,
testBuildDefaultLoad,
testBuildCall,
testBuildCheckCallOutput,
testCheckCallOutput,
testBuildExtraHosts,
testBuildIndexAnnotationsLoadDocker,
}
func testBuild(t *testing.T, sb integration.Sandbox) {
@ -115,155 +114,28 @@ COPY --from=base /etc/bar /bar
}
func testBuildRemote(t *testing.T, sb integration.Sandbox) {
t.Run("default branch", func(t *testing.T) {
dockerfile := []byte(`
dockerfile := []byte(`
FROM busybox:latest
COPY foo /foo
`)
dir := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
dirDest := t.TempDir()
dir := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
dirDest := t.TempDir()
git, err := gitutil.New(gitutil.WithWorkingDir(dir))
require.NoError(t, err)
git, err := gitutil.New(gitutil.WithWorkingDir(dir))
require.NoError(t, err)
gittestutil.GitInit(git, t)
gittestutil.GitAdd(git, t, "Dockerfile", "foo")
gittestutil.GitCommit(git, t, "initial commit")
addr := gittestutil.GitServeHTTP(git, t)
gittestutil.GitInit(git, t)
gittestutil.GitAdd(git, t, "Dockerfile", "foo")
gittestutil.GitCommit(git, t, "initial commit")
addr := gittestutil.GitServeHTTP(git, t)
out, err := buildCmd(sb, withDir(dir), withArgs("--output=type=local,dest="+dirDest, addr))
require.NoError(t, err, out)
require.FileExists(t, filepath.Join(dirDest, "foo"))
})
t.Run("tag ref with url fragment", func(t *testing.T) {
dockerfile := []byte(`
FROM busybox:latest
COPY foo /foo
`)
dir := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
dirDest := t.TempDir()
git, err := gitutil.New(gitutil.WithWorkingDir(dir))
require.NoError(t, err)
gittestutil.GitInit(git, t)
gittestutil.GitAdd(git, t, "Dockerfile", "foo")
gittestutil.GitCommit(git, t, "initial commit")
gittestutil.GitTag(git, t, "v0.1.0")
addr := gittestutil.GitServeHTTP(git, t)
addr = addr + "#v0.1.0" // tag
out, err := buildCmd(sb, withDir(dir), withArgs("--output=type=local,dest="+dirDest, addr))
require.NoError(t, err, out)
require.FileExists(t, filepath.Join(dirDest, "foo"))
})
t.Run("tag ref with query string", func(t *testing.T) {
dockerfile := []byte(`
FROM busybox:latest
COPY foo /foo
`)
dir := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
dirDest := t.TempDir()
git, err := gitutil.New(gitutil.WithWorkingDir(dir))
require.NoError(t, err)
gittestutil.GitInit(git, t)
gittestutil.GitAdd(git, t, "Dockerfile", "foo")
gittestutil.GitCommit(git, t, "initial commit")
gittestutil.GitTag(git, t, "v0.1.0")
addr := gittestutil.GitServeHTTP(git, t)
addr = addr + "?tag=v0.1.0" // tag
out, err := buildCmd(sb, withDir(dir), withArgs("--output=type=local,dest="+dirDest, addr))
if matchesBuildKitVersion(t, sb, ">= 0.24.0-0") {
require.NoError(t, err, out)
require.FileExists(t, filepath.Join(dirDest, "foo"))
} else {
require.Error(t, err)
require.Contains(t, out, "current frontend does not support Git URLs with query string components")
}
})
t.Run("tag ref with query string frontend 1.17", func(t *testing.T) {
dockerfile := []byte(`
# syntax=docker/dockerfile:1.17
FROM busybox:latest
COPY foo /foo
`)
dir := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
dirDest := t.TempDir()
git, err := gitutil.New(gitutil.WithWorkingDir(dir))
require.NoError(t, err)
gittestutil.GitInit(git, t)
gittestutil.GitAdd(git, t, "Dockerfile", "foo")
gittestutil.GitCommit(git, t, "initial commit")
gittestutil.GitTag(git, t, "v0.1.0")
addr := gittestutil.GitServeHTTP(git, t)
addr = addr + "?tag=v0.1.0" // tag
out, err := buildCmd(sb, withDir(dir), withArgs("--output=type=local,dest="+dirDest, addr))
if matchesBuildKitVersion(t, sb, ">= 0.24.0-0") {
require.NoError(t, err, out)
require.FileExists(t, filepath.Join(dirDest, "foo"))
} else {
require.Error(t, err)
require.Contains(t, out, "current frontend does not support Git URLs with query string components")
}
})
t.Run("tag ref with query string frontend 1.18.0", func(t *testing.T) {
dockerfile := []byte(`
# syntax=docker/dockerfile-upstream:1.18.0
FROM busybox:latest
COPY foo /foo
`)
dir := tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
dirDest := t.TempDir()
git, err := gitutil.New(gitutil.WithWorkingDir(dir))
require.NoError(t, err)
gittestutil.GitInit(git, t)
gittestutil.GitAdd(git, t, "Dockerfile", "foo")
gittestutil.GitCommit(git, t, "initial commit")
gittestutil.GitTag(git, t, "v0.1.0")
addr := gittestutil.GitServeHTTP(git, t)
addr = addr + "?tag=v0.1.0" // tag
out, err := buildCmd(sb, withDir(dir), withArgs("--output=type=local,dest="+dirDest, addr))
if matchesBuildKitVersion(t, sb, ">= 0.24.0-0") {
require.NoError(t, err, out)
require.FileExists(t, filepath.Join(dirDest, "foo"))
} else {
require.Error(t, err)
require.Contains(t, out, "current frontend does not support Git URLs with query string components")
}
})
out, err := buildCmd(sb, withDir(dir), withArgs("--output=type=local,dest="+dirDest, addr))
require.NoError(t, err, out)
require.FileExists(t, filepath.Join(dirDest, "foo"))
}
func testBuildLocalState(t *testing.T, sb integration.Sandbox) {
@ -1369,7 +1241,7 @@ COPy --from=base \
})
}
func testBuildCheckCallOutput(t *testing.T, sb integration.Sandbox) {
func testCheckCallOutput(t *testing.T, sb integration.Sandbox) {
t.Run("check for warning count msg in check without warnings", func(t *testing.T) {
dockerfile := []byte(`
FROM busybox AS base
@ -1469,17 +1341,6 @@ RUN cat /etc/hosts | grep myhostmulti | grep 162.242.195.82
require.NoError(t, err, string(out))
}
func testBuildIndexAnnotationsLoadDocker(t *testing.T, sb integration.Sandbox) {
if sb.DockerAddress() == "" {
t.Skip("only testing with docker available")
}
skipNoCompatBuildKit(t, sb, ">= 0.11.0-0", "annotations")
dir := createTestProject(t)
out, err := buildCmd(sb, withArgs("--annotation", "index:foo=bar", "--provenance", "false", "--output", "type=docker", dir))
require.Error(t, err, out)
require.Contains(t, out, "index annotations not supported for single platform export")
}
func createTestProject(t *testing.T) string {
dockerfile := []byte(`
FROM busybox:latest AS base

View File

@ -1,172 +0,0 @@
package tests
import (
"fmt"
"os"
"testing"
"github.com/containerd/continuity/fs/fstest"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/testutil"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
var composeTests = []func(t *testing.T, sb integration.Sandbox){
testComposeBuildLocalStore,
testComposeBuildRegistry,
testComposeBuildMultiPlatform,
testComposeBuildCheck,
}
func testComposeBuildLocalStore(t *testing.T, sb integration.Sandbox) {
if !isDockerWorker(sb) && !isDockerContainerWorker(sb) {
t.Skip("only testing with docker and docker-container worker")
}
target := "buildx:local-" + identity.NewID()
dir := composeTestProject(target, t)
t.Cleanup(func() {
cmd := dockerCmd(sb, withArgs("image", "rm", target))
cmd.Stderr = os.Stderr
require.NoError(t, cmd.Run())
})
cmd := composeCmd(sb, withDir(dir), withArgs("build"))
out, err := cmd.CombinedOutput()
require.NoError(t, err, string(out))
cmd = dockerCmd(sb, withArgs("image", "inspect", target))
cmd.Stderr = os.Stderr
require.NoError(t, cmd.Run())
}
func testComposeBuildRegistry(t *testing.T, sb integration.Sandbox) {
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
target := registry + "/buildx/registry:latest"
dir := composeTestProject(target, t)
cmd := composeCmd(sb, withDir(dir), withArgs("build", "--push"))
out, err := cmd.CombinedOutput()
require.NoError(t, err, string(out))
desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)
_, err = testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
}
func testComposeBuildMultiPlatform(t *testing.T, sb integration.Sandbox) {
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
target := registry + "/buildx/registry:latest"
dockerfile := []byte(`
FROM busybox:latest
COPY foo /etc/foo
`)
composefile := fmt.Appendf([]byte{}, `
services:
bar:
build:
context: .
platforms:
- linux/amd64
- linux/arm64
image: %s
`, target)
dir := tmpdir(
t,
fstest.CreateFile("compose.yml", composefile, 0600),
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
cmd := composeCmd(sb, withDir(dir), withArgs("build", "--push"))
out, err := cmd.CombinedOutput()
if !isMobyWorker(sb) {
require.NoError(t, err, string(out))
desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)
imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
img := imgs.Find("linux/amd64")
require.NotNil(t, img)
img = imgs.Find("linux/arm64")
require.NotNil(t, img)
} else {
require.Error(t, err, string(out))
require.Contains(t, string(out), "Multi-platform build is not supported")
}
}
func testComposeBuildCheck(t *testing.T, sb integration.Sandbox) {
dockerfile := []byte(`
frOM busybox as base
cOpy Dockerfile .
from scratch
COPy --from=base \
/Dockerfile \
/
`)
composefile := []byte(`
services:
bar:
build:
context: .
`)
dir := tmpdir(
t,
fstest.CreateFile("compose.yml", composefile, 0600),
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
cmd := composeCmd(sb, withDir(dir), withArgs("build", "--check"))
out, err := cmd.CombinedOutput()
require.Error(t, err, string(out))
require.Contains(t, string(out), "Check complete, 3 warnings have been found!")
}
func composeTestProject(imageName string, t *testing.T) string {
dockerfile := []byte(`
FROM busybox:latest AS base
COPY foo /etc/foo
RUN cp /etc/foo /etc/bar
FROM scratch
COPY --from=base /etc/bar /bar
`)
composefile := fmt.Appendf([]byte{}, `
services:
bar:
build:
context: .
image: %s
`, imageName)
return tmpdir(
t,
fstest.CreateFile("compose.yml", composefile, 0600),
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo"), 0600),
)
}

View File

@ -1,51 +0,0 @@
package tests
import (
"testing"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/stretchr/testify/require"
)
var diskusageTests = []func(t *testing.T, sb integration.Sandbox){
testDiskusage,
testDiskusageVerbose,
testDiskusageVerboseFormatError,
testDiskusageFormatJSON,
testDiskusageFormatGoTemplate,
}
func testDiskusage(t *testing.T, sb integration.Sandbox) {
buildTestProject(t, sb)
cmd := buildxCmd(sb, withArgs("du"))
out, err := cmd.Output()
require.NoError(t, err, string(out))
}
func testDiskusageVerbose(t *testing.T, sb integration.Sandbox) {
buildTestProject(t, sb)
cmd := buildxCmd(sb, withArgs("du", "--verbose"))
out, err := cmd.Output()
require.NoError(t, err, string(out))
}
func testDiskusageVerboseFormatError(t *testing.T, sb integration.Sandbox) {
buildTestProject(t, sb)
cmd := buildxCmd(sb, withArgs("du", "--verbose", "--format=json"))
out, err := cmd.Output()
require.Error(t, err, string(out))
}
func testDiskusageFormatJSON(t *testing.T, sb integration.Sandbox) {
buildTestProject(t, sb)
cmd := buildxCmd(sb, withArgs("du", "--format=json"))
out, err := cmd.Output()
require.NoError(t, err, string(out))
}
func testDiskusageFormatGoTemplate(t *testing.T, sb integration.Sandbox) {
buildTestProject(t, sb)
cmd := buildxCmd(sb, withArgs("du", "--format={{.ID}}: {{.Size}}"))
out, err := cmd.Output()
require.NoError(t, err, string(out))
}

View File

@ -20,7 +20,6 @@ var historyTests = []func(t *testing.T, sb integration.Sandbox){
testHistoryLs,
testHistoryRm,
testHistoryLsStoppedBuilder,
testHistoryBuildNameOverride,
}
func testHistoryExport(t *testing.T, sb integration.Sandbox) {
@ -137,45 +136,6 @@ func testHistoryLsStoppedBuilder(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err, string(bout))
}
func testHistoryBuildNameOverride(t *testing.T, sb integration.Sandbox) {
dir := createTestProject(t)
out, err := buildCmd(sb, withArgs("--build-arg=BUILDKIT_BUILD_NAME=foobar", "--metadata-file", filepath.Join(dir, "md.json"), dir))
require.NoError(t, err, string(out))
dt, err := os.ReadFile(filepath.Join(dir, "md.json"))
require.NoError(t, err)
type mdT struct {
BuildRef string `json:"buildx.build.ref"`
}
var md mdT
err = json.Unmarshal(dt, &md)
require.NoError(t, err)
refParts := strings.Split(md.BuildRef, "/")
require.Len(t, refParts, 3)
cmd := buildxCmd(sb, withArgs("history", "ls", "--filter=ref="+refParts[2], "--format=json"))
bout, err := cmd.Output()
require.NoError(t, err, string(bout))
type recT struct {
Ref string `json:"ref"`
Name string `json:"name"`
Status string `json:"status"`
CreatedAt *time.Time `json:"created_at"`
CompletedAt *time.Time `json:"completed_at"`
TotalSteps int32 `json:"total_steps"`
CompletedSteps int32 `json:"completed_steps"`
CachedSteps int32 `json:"cached_steps"`
}
var rec recT
err = json.Unmarshal(bout, &rec)
require.NoError(t, err)
require.Equal(t, md.BuildRef, rec.Ref)
require.Equal(t, "foobar", rec.Name)
}
type buildRef struct {
Builder string
Node string

View File

@ -75,30 +75,6 @@ func buildxCmd(sb integration.Sandbox, opts ...cmdOpt) *exec.Cmd {
return cmd
}
func composeCmd(sb integration.Sandbox, opts ...cmdOpt) *exec.Cmd {
cmd := exec.Command("compose")
cmd.Env = os.Environ()
for _, opt := range opts {
opt(cmd)
}
if builder := sb.Address(); builder != "" {
cmd.Env = append(cmd.Env,
"BUILDX_CONFIG="+buildxConfig(sb),
"BUILDX_BUILDER="+builder,
)
}
if context := sb.DockerAddress(); context != "" {
cmd.Env = append(cmd.Env, "DOCKER_CONTEXT="+context)
}
if v := os.Getenv("GO_TEST_COVERPROFILE"); v != "" {
coverDir := filepath.Join(filepath.Dir(v), "helpers")
cmd.Env = append(cmd.Env, "GOCOVERDIR="+coverDir)
}
cmd.Env = append(cmd.Env, "COMPOSE_BAKE=true")
return cmd
}
func dockerCmd(sb integration.Sandbox, opts ...cmdOpt) *exec.Cmd {
cmd := exec.Command("docker")
cmd.Env = os.Environ()

View File

@ -32,8 +32,6 @@ func TestIntegration(t *testing.T) {
tests = append(tests, createTests...)
tests = append(tests, rmTests...)
tests = append(tests, dialstdioTests...)
tests = append(tests, composeTests...)
tests = append(tests, diskusageTests...)
testIntegration(t, tests...)
}
@ -49,7 +47,6 @@ func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sa
}
}
mirroredImages["moby/buildkit:buildx-stable-1"] = buildkitImage
mirroredImages["docker/dockerfile-upstream:1.18.0"] = "docker.io/docker/dockerfile-upstream:1.18.0"
mirrors := integration.WithMirroredImages(mirroredImages)
tests := integration.TestFuncs(funcs...)

View File

@ -25,7 +25,7 @@ import (
"strings"
"github.com/sirupsen/logrus"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
"github.com/compose-spec/compose-go/v2/consts"
"github.com/compose-spec/compose-go/v2/dotenv"

View File

@ -56,7 +56,7 @@ func GetEnvFromFile(currentEnv map[string]string, filenames []string) (map[strin
return envMap, err
}
err = parseWithLookup(bytes.NewReader(b), envMap, func(k string) (string, bool) {
env, err := ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) {
v, ok := currentEnv[k]
if ok {
return v, true
@ -67,6 +67,9 @@ func GetEnvFromFile(currentEnv map[string]string, filenames []string) (map[strin
if err != nil {
return envMap, fmt.Errorf("failed to read %s: %w", dotEnvFile, err)
}
for k, v := range env {
envMap[k] = v
}
}
return envMap, nil

View File

@ -43,7 +43,7 @@ import (
"github.com/compose-spec/compose-go/v2/validation"
"github.com/go-viper/mapstructure/v2"
"github.com/sirupsen/logrus"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
)
// Options supported by Load

View File

@ -22,7 +22,7 @@ import (
"strings"
"github.com/compose-spec/compose-go/v2/tree"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
)
type ResetProcessor struct {

View File

@ -123,8 +123,6 @@
"no_cache": {"type": ["boolean", "string"], "description": "Do not use cache when building the image."},
"additional_contexts": {"$ref": "#/definitions/list_or_dict", "description": "Additional build contexts to use, specified as a map of name to context path or URL."},
"network": {"type": "string", "description": "Network mode to use for the build. Options include 'default', 'none', 'host', or a network name."},
"provenance": {"type": ["string","boolean"], "description": "Add a provenance attestation"},
"sbom": {"type": ["string","boolean"], "description": "Add a SBOM attestation"},
"pull": {"type": ["boolean", "string"], "description": "Always attempt to pull a newer version of the image."},
"target": {"type": "string", "description": "Build stage to target in a multi-stage Dockerfile."},
"shm_size": {"type": ["integer", "string"], "description": "Size of /dev/shm for the build container. A string value can use suffix like '2g' for 2 gigabytes."},
@ -208,8 +206,7 @@
},
"container_name": {
"type": "string",
"description": "Specify a custom container name, rather than a generated default name.",
"pattern": "[a-zA-Z0-9][a-zA-Z0-9_.-]+"
"description": "Specify a custom container name, rather than a generated default name."
},
"cpu_count": {
"oneOf": [

View File

@ -57,9 +57,10 @@ func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variab
case []interface{}:
for _, elem := range value {
submap := recurseExtract(elem, pattern)
for key, value := range submap {
m[key] = value
if values, is := extractVariable(elem, pattern); is {
for _, v := range values {
m[v.Name] = v
}
}
}
}

View File

@ -17,21 +17,16 @@
package transform
import (
"fmt"
"github.com/compose-spec/compose-go/v2/tree"
)
// Func is a function that can transform data at a specific path
type Func func(data any, p tree.Path, ignoreParseError bool) (any, error)
type transformFunc func(data any, p tree.Path, ignoreParseError bool) (any, error)
var transformers = map[tree.Path]Func{}
var transformers = map[tree.Path]transformFunc{}
func init() {
transformers["services.*"] = transformService
transformers["services.*.build.secrets.*"] = transformFileMount
transformers["services.*.build.provenance"] = transformStringOrX
transformers["services.*.build.sbom"] = transformStringOrX
transformers["services.*.build.additional_contexts"] = transformKeyValue
transformers["services.*.depends_on"] = transformDependsOn
transformers["services.*.env_file"] = transformEnvFile
@ -126,12 +121,3 @@ func transformMapping(v map[string]any, p tree.Path, ignoreParseError bool) (map
}
return v, nil
}
func transformStringOrX(data any, _ tree.Path, _ bool) (any, error) {
switch v := data.(type) {
case string:
return v, nil
default:
return fmt.Sprint(v), nil
}
}

View File

@ -20,20 +20,14 @@ import (
"github.com/compose-spec/compose-go/v2/tree"
)
// DefaultValues contains the default value transformers for compose fields
var DefaultValues = map[tree.Path]Func{}
var defaultValues = map[tree.Path]transformFunc{}
func init() {
DefaultValues["services.*.build"] = defaultBuildContext
DefaultValues["services.*.secrets.*"] = defaultSecretMount
DefaultValues["services.*.ports.*"] = portDefaults
DefaultValues["services.*.deploy.resources.reservations.devices.*"] = deviceRequestDefaults
DefaultValues["services.*.gpus.*"] = deviceRequestDefaults
}
// RegisterDefaultValue registers a custom transformer for the given path pattern
func RegisterDefaultValue(path string, transformer Func) {
DefaultValues[tree.Path(path)] = transformer
defaultValues["services.*.build"] = defaultBuildContext
defaultValues["services.*.secrets.*"] = defaultSecretMount
defaultValues["services.*.ports.*"] = portDefaults
defaultValues["services.*.deploy.resources.reservations.devices.*"] = deviceRequestDefaults
defaultValues["services.*.gpus.*"] = deviceRequestDefaults
}
// SetDefaultValues transforms a compose model to set default values to missing attributes
@ -46,7 +40,7 @@ func SetDefaultValues(yaml map[string]any) (map[string]any, error) {
}
func setDefaults(data any, p tree.Path) (any, error) {
for pattern, transformer := range DefaultValues {
for pattern, transformer := range defaultValues {
if p.Matches(pattern) {
t, err := transformer(data, p, false)
if err != nil {

View File

@ -34,7 +34,7 @@ import "github.com/mattn/go-shellwords"
// preserved so that it can override any base value (e.g. container entrypoint).
//
// The different semantics between YAML and JSON are due to limitations with
// JSON marshaling + `omitempty` in the Go stdlib, while go.yaml.in/yaml/v3 gives
// JSON marshaling + `omitempty` in the Go stdlib, while gopkg.in/yaml.v3 gives
// us more flexibility via the yaml.IsZeroer interface.
//
// In the future, it might make sense to make fields of this type be
@ -58,7 +58,7 @@ func (s ShellCommand) IsZero() bool {
// accurately if the `omitempty` struct tag is omitted/forgotten.
//
// A similar MarshalJSON() implementation is not needed because the Go stdlib
// already serializes nil slices to `null`, whereas go.yaml.in/yaml/v3 by default
// already serializes nil slices to `null`, whereas gopkg.in/yaml.v3 by default
// serializes nil slices to `[]`.
func (s ShellCommand) MarshalYAML() (interface{}, error) {
if s == nil {

View File

@ -875,8 +875,6 @@ func deriveDeepCopy_6(dst, src *BuildConfig) {
} else {
dst.Args = nil
}
dst.Provenance = src.Provenance
dst.SBOM = src.SBOM
if src.SSH == nil {
dst.SSH = nil
} else {

View File

@ -95,7 +95,7 @@ func (m *MappingWithEquals) DecodeMapstructure(value interface{}) error {
mapping := make(MappingWithEquals, len(v))
for _, s := range v {
k, e, ok := strings.Cut(fmt.Sprint(s), "=")
if k != "" && unicode.IsSpace(rune(k[len(k)-1])) {
if unicode.IsSpace(rune(k[len(k)-1])) {
return fmt.Errorf("environment variable %s is declared with a trailing space", k)
}
if !ok {

View File

@ -32,8 +32,8 @@ import (
"github.com/compose-spec/compose-go/v2/utils"
"github.com/distribution/reference"
godigest "github.com/opencontainers/go-digest"
"go.yaml.in/yaml/v3"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v3"
)
// Project is the result of loading a set of compose files
@ -118,16 +118,6 @@ func (p *Project) ConfigNames() []string {
return names
}
// ModelNames return names for all models in this Compose config
func (p *Project) ModelNames() []string {
var names []string
for k := range p.Models {
names = append(names, k)
}
sort.Strings(names)
return names
}
func (p *Project) ServicesWithBuild() []string {
servicesBuild := p.Services.Filter(func(s ServiceConfig) bool {
return s.Build != nil && s.Build.Context != ""
@ -149,11 +139,6 @@ func (p *Project) ServicesWithDependsOn() []string {
return slices.Collect(maps.Keys(servicesDependsOn))
}
func (p *Project) ServicesWithModels() []string {
servicesModels := p.Services.Filter(func(s ServiceConfig) bool { return len(s.Models) > 0 })
return slices.Collect(maps.Keys(servicesModels))
}
func (p *Project) ServicesWithCapabilities() ([]string, []string, []string) {
capabilities := []string{}
gpu := []string{}

View File

@ -309,8 +309,6 @@ type BuildConfig struct {
DockerfileInline string `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"`
Entitlements []string `yaml:"entitlements,omitempty" json:"entitlements,omitempty"`
Args MappingWithEquals `yaml:"args,omitempty" json:"args,omitempty"`
Provenance string `yaml:"provenance,omitempty" json:"provenance,omitempty"`
SBOM string `yaml:"sbom,omitempty" json:"sbom,omitempty"`
SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"`
Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"`
CacheFrom StringList `yaml:"cache_from,omitempty" json:"cache_from,omitempty"`

View File

@ -474,18 +474,7 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
return nil, err
}
body := &fnOnClose{
BeforeClose: func() {
r.Release(1)
},
ReadCloser: resp.Body,
}
defer func() {
if retErr != nil {
body.Close()
}
}()
body := resp.Body
encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool {
return r == ' ' || r == '\t' || r == ','
})
@ -516,33 +505,29 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
for i := range numChunks {
readers[i], writers[i] = newPipeWriter(bufPool)
}
// keep reference of the initial body value to ensure it is closed
ibody := body
go func() {
for i := range numChunks {
select {
case queue <- i:
case <-done:
if i == 0 {
ibody.Close()
}
return // avoid leaking a goroutine if we exit early.
}
}
close(queue)
}()
r.Release(1)
for range parallelism {
go func() {
for i := range queue { // first in first out
copy := func() error {
if err := r.Acquire(ctx, 1); err != nil {
return err
}
defer r.Release(1)
var body io.ReadCloser
if i == 0 {
body = ibody
body = resp.Body
} else {
if err := r.Acquire(ctx, 1); err != nil {
return err
}
defer r.Release(1)
reqClone := req.clone()
reqClone.setOffset(offset + i*chunkSize)
nresp, err := reqClone.doWithRetries(ctx, lastHost, withErrorCheck)
@ -579,27 +564,32 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
},
ReadCloser: io.NopCloser(io.MultiReader(readers...)),
}
} else {
body = &fnOnClose{
BeforeClose: func() {
r.Release(1)
},
ReadCloser: body,
}
}
for i := len(encoding) - 1; i >= 0; i-- {
algorithm := strings.ToLower(encoding[i])
switch algorithm {
case "zstd":
r, err := zstd.NewReader(body.ReadCloser,
r, err := zstd.NewReader(body,
zstd.WithDecoderLowmem(false),
)
if err != nil {
return nil, err
}
body.ReadCloser = r.IOReadCloser()
body = r.IOReadCloser()
case "gzip":
r, err := gzip.NewReader(body.ReadCloser)
body, err = gzip.NewReader(body)
if err != nil {
return nil, err
}
body.ReadCloser = r
case "deflate":
body.ReadCloser = flate.NewReader(body.ReadCloser)
body = flate.NewReader(body)
case "identity", "":
// no content-encoding applied, use raw body
default:

View File

@ -24,7 +24,7 @@ var (
Package = "github.com/containerd/containerd/v2"
// Version holds the complete version number. Filled in at linking time.
Version = "2.1.4+unknown"
Version = "2.1.3+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@ -1,4 +1,3 @@
// Package md2man aims in converting markdown into roff (man pages).
package md2man
import (

View File

@ -47,13 +47,13 @@ const (
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
tableCellEnd = "\nT}"
tableCellEnd = "\nT}\n"
tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
func NewRoffRenderer() *roffRenderer {
func NewRoffRenderer() *roffRenderer { // nolint: golint
return &roffRenderer{}
}
@ -316,8 +316,9 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
if node.Next == nil {
// Last cell: need to carriage return if we are at the end of the header row.
if node.Next == nil && end != tableCellEnd {
// Last cell: need to carriage return if we are at the end of the
// header row and content isn't wrapped in a "tablecell"
end += crTag
}
out(w, end)
@ -355,7 +356,7 @@ func countColumns(node *blackfriday.Node) int {
}
func out(w io.Writer, output string) {
io.WriteString(w, output) //nolint:errcheck
io.WriteString(w, output) // nolint: errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
@ -394,7 +395,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
i++
}
if i > org {
w.Write(text[org:i]) //nolint:errcheck
w.Write(text[org:i]) // nolint: errcheck
}
// escape a character
@ -402,7 +403,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
break
}
w.Write([]byte{'\\', text[i]}) //nolint:errcheck
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
}
}

View File

@ -175,24 +175,11 @@ func newPluginCommand(dockerCli *command.DockerCli, plugin *cobra.Command, meta
newMetadataSubcommand(plugin, meta),
)
visitAll(cmd,
// prevent adding "[flags]" to the end of the usage line.
func(c *cobra.Command) { c.DisableFlagsInUseLine = true },
)
cli.DisableFlagsInUseLine(cmd)
return cli.NewTopLevelCommand(cmd, dockerCli, opts, cmd.Flags())
}
// visitAll traverses all commands from the root.
func visitAll(root *cobra.Command, fns ...func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fns...)
}
for _, fn := range fns {
fn(root)
}
}
func newMetadataSubcommand(plugin *cobra.Command, meta metadata.Metadata) *cobra.Command {
if meta.ShortDescription == "" {
meta.ShortDescription = plugin.Short

View File

@ -168,30 +168,34 @@ func (tcmd *TopLevelCommand) Initialize(ops ...command.CLIOption) error {
}
// VisitAll will traverse all commands from the root.
//
// Deprecated: this utility was only used internally and will be removed in the next release.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func VisitAll(root *cobra.Command, fn func(*cobra.Command)) {
visitAll(root, fn)
}
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
VisitAll(cmd, fn)
}
fn(root)
}
// DisableFlagsInUseLine sets the DisableFlagsInUseLine flag on all
// commands within the tree rooted at cmd.
//
// Deprecated: this utility was only used internally and will be removed in the next release.
func DisableFlagsInUseLine(cmd *cobra.Command) {
visitAll(cmd, func(ccmd *cobra.Command) {
VisitAll(cmd, func(ccmd *cobra.Command) {
// do not add a `[flags]` to the end of the usage line.
ccmd.DisableFlagsInUseLine = true
})
}
// HasCompletionArg returns true if a cobra completion arg request is found.
func HasCompletionArg(args []string) bool {
for _, arg := range args {
if arg == cobra.ShellCompRequestCmd || arg == cobra.ShellCompNoDescRequestCmd {
return true
}
}
return false
}
var helpCommand = &cobra.Command{
Use: "help [command]",
Short: "Help about the command",

View File

@ -282,17 +282,6 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption)
}
filterResourceAttributesEnvvar()
// early return if GODEBUG is already set or the docker context is
// the default context, i.e. is a virtual context where we won't override
// any GODEBUG values.
if v := os.Getenv("GODEBUG"); cli.currentContext == DefaultContextName || v != "" {
return nil
}
meta, err := cli.contextStore.GetMetadata(cli.currentContext)
if err == nil {
setGoDebug(meta)
}
return nil
}
@ -486,57 +475,6 @@ func (cli *DockerCli) getDockerEndPoint() (ep docker.Endpoint, err error) {
return resolveDockerEndpoint(cli.contextStore, cn)
}
// setGoDebug is an escape hatch that sets the GODEBUG environment
// variable value using docker context metadata.
//
// {
// "Name": "my-context",
// "Metadata": { "GODEBUG": "x509negativeserial=1" }
// }
//
// WARNING: Setting x509negativeserial=1 allows Go's x509 library to accept
// X.509 certificates with negative serial numbers.
// This behavior is deprecated and non-compliant with current security
// standards (RFC 5280). Accepting negative serial numbers can introduce
// serious security vulnerabilities, including the risk of certificate
// collision or bypass attacks.
// This option should only be used for legacy compatibility and never in
// production environments.
// Use at your own risk.
func setGoDebug(meta store.Metadata) {
fieldName := "GODEBUG"
godebugEnv := os.Getenv(fieldName)
// early return if GODEBUG is already set. We don't want to override what
// the user already sets.
if godebugEnv != "" {
return
}
var cfg any
var ok bool
switch m := meta.Metadata.(type) {
case DockerContext:
cfg, ok = m.AdditionalFields[fieldName]
if !ok {
return
}
case map[string]any:
cfg, ok = m[fieldName]
if !ok {
return
}
default:
return
}
v, ok := cfg.(string)
if !ok {
return
}
// set the GODEBUG environment variable with whatever was in the context
_ = os.Setenv(fieldName, v)
}
func (cli *DockerCli) initialize() error {
cli.init.Do(func() {
cli.dockerEndpoint, cli.initErr = cli.getDockerEndPoint()

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/docker/docker/api/types/build"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
)
@ -114,7 +115,7 @@ func (c *buildCacheContext) MarshalJSON() ([]byte, error) {
func (c *buildCacheContext) ID() string {
id := c.v.ID
if c.trunc {
id = TruncateID(c.v.ID)
id = stringid.TruncateID(c.v.ID)
}
if c.v.InUse {
return id + "*"
@ -130,7 +131,7 @@ func (c *buildCacheContext) Parent() string {
parent = c.v.Parent //nolint:staticcheck // Ignore SA1019: Field was deprecated in API v1.42, but kept for backward compatibility
}
if c.trunc {
return TruncateID(parent)
return stringid.TruncateID(parent)
}
return parent
}

View File

@ -14,6 +14,7 @@ import (
"github.com/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -134,7 +135,7 @@ func (c *ContainerContext) MarshalJSON() ([]byte, error) {
// option being set, the full or truncated ID is returned.
func (c *ContainerContext) ID() string {
if c.trunc {
return TruncateID(c.c.ID)
return stringid.TruncateID(c.c.ID)
}
return c.c.ID
}
@ -171,7 +172,7 @@ func (c *ContainerContext) Image() string {
return "<no image>"
}
if c.trunc {
if trunc := TruncateID(c.c.ImageID); trunc == TruncateID(c.c.Image) {
if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) {
return trunc
}
// truncate digest if no-trunc option was not selected

View File

@ -11,7 +11,7 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/volume"
"github.com/docker/go-units"
units "github.com/docker/go-units"
)
const (

Some files were not shown because too many files have changed in this diff Show More