mirror of https://github.com/docker/buildx.git
Merge a9584cc532
into a04b7d8689
This commit is contained in:
commit
9e58a56b36
251
build/build.go
251
build/build.go
|
@ -138,75 +138,57 @@ func filterAvailableNodes(nodes []builder.Node) ([]builder.Node, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
func toRepoOnly(in string) (string, error) {
|
||||
m := map[string]struct{}{}
|
||||
p := strings.Split(in, ",")
|
||||
for _, pp := range p {
|
||||
n, err := reference.ParseNormalizedNamed(pp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
m[n.Name()] = struct{}{}
|
||||
}
|
||||
out := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
out = append(out, k)
|
||||
}
|
||||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func Build(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, cfg *confutil.Config, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||
return BuildWithResultHandler(ctx, nodes, opts, docker, cfg, w, nil)
|
||||
}
|
||||
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, cfg *confutil.Config, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
|
||||
nodes, err = filterAvailableNodes(nodes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no valid drivers found")
|
||||
}
|
||||
|
||||
var noMobyDriver *driver.DriverHandle
|
||||
// findNonMobyDriver returns the first non-moby based driver.
|
||||
func findNonMobyDriver(nodes []builder.Node) *driver.DriverHandle {
|
||||
for _, n := range nodes {
|
||||
if !n.Driver.IsMobyDriver() {
|
||||
noMobyDriver = n.Driver
|
||||
break
|
||||
return n.Driver
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// warnOnNoOutput will check if the given nodes and options would result in an output
|
||||
// and prints a warning if it would not.
|
||||
func warnOnNoOutput(ctx context.Context, nodes []builder.Node, opts map[string]Options) {
|
||||
// Return immediately if default load is explicitly disabled or a call
|
||||
// function is used.
|
||||
if noDefaultLoad() || !noCallFunc(opts) {
|
||||
return
|
||||
}
|
||||
|
||||
// Find the first non-moby driver and return if it either doesn't exist
|
||||
// or if the driver has default load enabled.
|
||||
noMobyDriver := findNonMobyDriver(nodes)
|
||||
if noMobyDriver == nil || noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
||||
return
|
||||
}
|
||||
|
||||
// Produce a warning describing the targets affected.
|
||||
var noOutputTargets []string
|
||||
for name, opt := range opts {
|
||||
if !opt.Linked && len(opt.Exports) == 0 {
|
||||
noOutputTargets = append(noOutputTargets, name)
|
||||
}
|
||||
}
|
||||
|
||||
if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opts) {
|
||||
var noOutputTargets []string
|
||||
for name, opt := range opts {
|
||||
if noMobyDriver.Features(ctx)[driver.DefaultLoad] {
|
||||
continue
|
||||
}
|
||||
|
||||
if !opt.Linked && len(opt.Exports) == 0 {
|
||||
noOutputTargets = append(noOutputTargets, name)
|
||||
}
|
||||
}
|
||||
if len(noOutputTargets) > 0 {
|
||||
var warnNoOutputBuf bytes.Buffer
|
||||
warnNoOutputBuf.WriteString("No output specified ")
|
||||
if len(noOutputTargets) == 1 && noOutputTargets[0] == "default" {
|
||||
warnNoOutputBuf.WriteString(fmt.Sprintf("with %s driver", noMobyDriver.Factory().Name()))
|
||||
} else {
|
||||
warnNoOutputBuf.WriteString(fmt.Sprintf("for %s target(s) with %s driver", strings.Join(noOutputTargets, ", "), noMobyDriver.Factory().Name()))
|
||||
}
|
||||
logrus.Warnf("%s. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load", warnNoOutputBuf.String())
|
||||
}
|
||||
if len(noOutputTargets) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
drivers, err := resolveDrivers(ctx, nodes, opts, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var warnNoOutputBuf bytes.Buffer
|
||||
warnNoOutputBuf.WriteString("No output specified ")
|
||||
if len(noOutputTargets) == 1 && noOutputTargets[0] == "default" {
|
||||
warnNoOutputBuf.WriteString(fmt.Sprintf("with %s driver", noMobyDriver.Factory().Name()))
|
||||
} else {
|
||||
warnNoOutputBuf.WriteString(fmt.Sprintf("for %s target(s) with %s driver", strings.Join(noOutputTargets, ", "), noMobyDriver.Factory().Name()))
|
||||
}
|
||||
logrus.Warnf("%s. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load", warnNoOutputBuf.String())
|
||||
}
|
||||
|
||||
func newBuildRequests(ctx context.Context, docker *dockerutil.Client, cfg *confutil.Config, drivers map[string][]*resolvedNode, w progress.Writer, opts map[string]Options) (map[string][]*reqForNode, func(), error) {
|
||||
reqForNodes := make(map[string][]*reqForNode)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
var releasers []func()
|
||||
|
||||
for k, opt := range opts {
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
|
@ -226,17 +208,17 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
opt.Platforms = np.platforms
|
||||
gatewayOpts, err := np.BuildOpts(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
localOpt := opt
|
||||
so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, &localOpt, gatewayOpts, cfg, w, docker)
|
||||
opts[k] = localOpt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
defer release()
|
||||
releasers = append(releasers, release)
|
||||
if err := saveLocalState(so, k, opt, np.Node(), cfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
addGitAttrs(so)
|
||||
reqn = append(reqn, &reqForNode{
|
||||
|
@ -261,7 +243,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
for _, e := range np.so.Exports {
|
||||
if e.Type == "moby" {
|
||||
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
|
||||
return nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
return nil, nil, errors.Errorf("multi-node push can't currently be performed with the docker driver, please switch to a different driver")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -269,7 +251,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
}
|
||||
}
|
||||
|
||||
// validate that all links between targets use same drivers
|
||||
release := func() {
|
||||
for _, fn := range releasers {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
return reqForNodes, release, nil
|
||||
}
|
||||
|
||||
func validateTargetLinks(reqForNodes map[string][]*reqForNode, drivers map[string][]*resolvedNode, opts map[string]Options) error {
|
||||
for name := range opts {
|
||||
dps := reqForNodes[name]
|
||||
for i, dp := range dps {
|
||||
|
@ -279,8 +269,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
k2 := strings.TrimPrefix(v, "target:")
|
||||
dps2, ok := drivers[k2]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
return errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, dp2 := range dps2 {
|
||||
if dp2.driverIndex == dp.driverIndex {
|
||||
|
@ -289,12 +280,63 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
return errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func toRepoOnly(in string) (string, error) {
|
||||
m := map[string]struct{}{}
|
||||
p := strings.Split(in, ",")
|
||||
for _, pp := range p {
|
||||
n, err := reference.ParseNormalizedNamed(pp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
m[n.Name()] = struct{}{}
|
||||
}
|
||||
out := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
out = append(out, k)
|
||||
}
|
||||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func Build(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, cfg *confutil.Config, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||
return BuildWithResultHandler(ctx, nodes, opts, docker, cfg, w, nil)
|
||||
}
|
||||
|
||||
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[string]Options, docker *dockerutil.Client, cfg *confutil.Config, w progress.Writer, resultHandleFunc func(driverIdx int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.Errorf("driver required for build")
|
||||
}
|
||||
|
||||
nodes, err = filterAvailableNodes(nodes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no valid drivers found")
|
||||
}
|
||||
warnOnNoOutput(ctx, nodes, opts)
|
||||
|
||||
drivers, err := resolveDrivers(ctx, nodes, opts, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
reqForNodes, release, err := newBuildRequests(ctx, docker, cfg, drivers, w, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
// validate that all links between targets use same drivers
|
||||
if err := validateTargetLinks(reqForNodes, drivers, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedSessions, err := detectSharedMounts(ctx, reqForNodes)
|
||||
if err != nil {
|
||||
|
@ -311,7 +353,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
|
||||
for k, opt := range opts {
|
||||
err := func(k string) (err error) {
|
||||
opt := opt
|
||||
dps := drivers[k]
|
||||
multiDriver := len(drivers[k]) > 1
|
||||
|
||||
|
@ -441,19 +482,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
req.FrontendOpt["requestid"] = "frontend." + opt.CallFunc.Name
|
||||
}
|
||||
|
||||
res, err := c.Solve(ctx, req)
|
||||
res, err := solve(ctx, c, req)
|
||||
if err != nil {
|
||||
req, ok := fallbackPrintError(err, req)
|
||||
if ok {
|
||||
res2, err2 := c.Solve(ctx, req)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = res2
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.CallFunc != nil {
|
||||
callRes = res.Metadata
|
||||
}
|
||||
|
@ -461,31 +494,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
rKey := resultKey(dp.driverIndex, k)
|
||||
results.Set(rKey, res)
|
||||
|
||||
if children, ok := childTargets[rKey]; ok && len(children) > 0 {
|
||||
// wait for the child targets to register their LLB before evaluating
|
||||
_, err := results.Get(ctx, children...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// we need to wait until the child targets have completed before we can release
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
return res.EachRef(func(ref gateway.Reference) error {
|
||||
return ref.Evaluate(ctx)
|
||||
})
|
||||
})
|
||||
eg.Go(func() error {
|
||||
_, err := results.Get(ctx, children...)
|
||||
return err
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
if children := childTargets[rKey]; len(children) > 0 {
|
||||
if err := waitForChildren(ctx, res, results, children); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
buildRef := fmt.Sprintf("%s/%s/%s", node.Builder, node.Name, so.Ref)
|
||||
|
||||
var rr *client.SolveResponse
|
||||
if resultHandleFunc != nil {
|
||||
var resultHandle *ResultHandle
|
||||
|
@ -496,6 +513,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
|
|||
rr, err = c.Build(ctx, *so, "buildx", buildFunc, ch)
|
||||
tracing.FinishWithError(span, err)
|
||||
}
|
||||
|
||||
if !so.Internal && desktop.BuildBackendEnabled() && node.Driver.HistoryAPISupported(ctx) {
|
||||
if err != nil {
|
||||
return &desktop.ErrorWithBuildRef{
|
||||
|
@ -1146,3 +1164,40 @@ func ReadSourcePolicy() (*spb.Policy, error) {
|
|||
|
||||
return &pol, nil
|
||||
}
|
||||
|
||||
func solve(ctx context.Context, c gateway.Client, req gateway.SolveRequest) (*gateway.Result, error) {
|
||||
res, err := c.Solve(ctx, req)
|
||||
if err != nil {
|
||||
req, ok := fallbackPrintError(err, req)
|
||||
if ok {
|
||||
res2, err2 := c.Solve(ctx, req)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = res2
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func waitForChildren(ctx context.Context, res *gateway.Result, results *waitmap.Map, children []string) error {
|
||||
// wait for the child targets to register their LLB before evaluating
|
||||
_, err := results.Get(ctx, children...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we need to wait until the child targets have completed before we can release
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
return res.EachRef(func(ref gateway.Reference) error {
|
||||
return ref.Evaluate(ctx)
|
||||
})
|
||||
})
|
||||
eg.Go(func() error {
|
||||
_, err := results.Get(ctx, children...)
|
||||
return err
|
||||
})
|
||||
return eg.Wait()
|
||||
}
|
||||
|
|
|
@ -21,10 +21,9 @@ import (
|
|||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/commands/debug"
|
||||
"github.com/docker/buildx/controller"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||
"github.com/docker/buildx/controller/local"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/monitor"
|
||||
"github.com/docker/buildx/store"
|
||||
|
@ -431,28 +430,18 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
|
|||
// stdin must be usable for monitor
|
||||
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||
}
|
||||
c := controller.NewController(ctx, dockerCli)
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("failed to close server connection %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// NOTE: buildx server has the current working directory different from the client
|
||||
// so we need to resolve paths to abosolute ones in the client.
|
||||
opts, err := cbuild.ResolveOptionPaths(opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c := local.NewController(ctx, dockerCli)
|
||||
|
||||
var ref string
|
||||
var retErr error
|
||||
var resp *client.SolveResponse
|
||||
var inputs *build.Inputs
|
||||
var (
|
||||
ref string
|
||||
retErr error
|
||||
|
||||
f *ioset.SingleForwarder
|
||||
pr io.ReadCloser
|
||||
pw io.WriteCloser
|
||||
)
|
||||
|
||||
var f *ioset.SingleForwarder
|
||||
var pr io.ReadCloser
|
||||
var pw io.WriteCloser
|
||||
if options.invokeConfig == nil {
|
||||
pr = dockerCli.In()
|
||||
} else {
|
||||
|
@ -466,7 +455,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
|
|||
})
|
||||
}
|
||||
|
||||
resp, inputs, err = c.Build(ctx, opts, pr, printer)
|
||||
resp, inputs, err := c.Build(ctx, opts, pr, printer)
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
|
@ -508,10 +497,6 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
|
|||
// Update return values with the last build result from monitor
|
||||
resp, retErr = monitorBuildResult.Resp, monitorBuildResult.Err
|
||||
}
|
||||
} else {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("close error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, inputs, retErr
|
||||
|
@ -1003,13 +988,9 @@ func (cfg *invokeConfig) needsDebug(retErr error) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *cbuild.Options, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
|
||||
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *cbuild.Options, c *local.Controller, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
|
||||
con := console.Current()
|
||||
if err := con.SetRaw(); err != nil {
|
||||
// TODO: run disconnect in build command (on error case)
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("close error: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("failed to configure terminal: %v", err)
|
||||
}
|
||||
defer con.Reset()
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/controller"
|
||||
"github.com/docker/buildx/controller/local"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/monitor"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -47,12 +46,8 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
|
|||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
c := controller.NewController(ctx, dockerCli)
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("failed to close server connection %v", err)
|
||||
}
|
||||
}()
|
||||
c := local.NewController(ctx, dockerCli)
|
||||
|
||||
con := console.Current()
|
||||
if err := con.SetRaw(); err != nil {
|
||||
return errors.Errorf("failed to configure terminal: %v", err)
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/controller/processes"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
type BuildxController interface {
|
||||
Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (resp *client.SolveResponse, inputs *build.Inputs, err error)
|
||||
// Invoke starts an IO session into the specified process.
|
||||
// If pid doesn't match to any running processes, it starts a new process with the specified config.
|
||||
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
|
||||
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
||||
Invoke(ctx context.Context, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
||||
Close() error
|
||||
ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error)
|
||||
DisconnectProcess(ctx context.Context, pid string) error
|
||||
Inspect(ctx context.Context) *cbuild.Options
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/controller/control"
|
||||
"github.com/docker/buildx/controller/local"
|
||||
"github.com/docker/cli/cli/command"
|
||||
)
|
||||
|
||||
func NewController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
|
||||
return local.NewLocalBuildxController(ctx, dockerCli)
|
||||
}
|
|
@ -7,7 +7,6 @@ import (
|
|||
|
||||
"github.com/docker/buildx/build"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/controller/processes"
|
||||
|
@ -18,10 +17,9 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
|
||||
return &localController{
|
||||
func NewController(ctx context.Context, dockerCli command.Cli) *Controller {
|
||||
return &Controller{
|
||||
dockerCli: dockerCli,
|
||||
processes: processes.NewManager(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,15 +30,14 @@ type buildConfig struct {
|
|||
buildOptions *cbuild.Options
|
||||
}
|
||||
|
||||
type localController struct {
|
||||
type Controller struct {
|
||||
dockerCli command.Cli
|
||||
buildConfig buildConfig
|
||||
processes *processes.Manager
|
||||
|
||||
buildOnGoing atomic.Bool
|
||||
}
|
||||
|
||||
func (b *localController) Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (*client.SolveResponse, *build.Inputs, error) {
|
||||
func (b *Controller) Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (*client.SolveResponse, *build.Inputs, error) {
|
||||
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
||||
return nil, nil, errors.New("build ongoing")
|
||||
}
|
||||
|
@ -63,27 +60,15 @@ func (b *localController) Build(ctx context.Context, options *cbuild.Options, in
|
|||
return resp, dockerfileMappings, nil
|
||||
}
|
||||
|
||||
func (b *localController) ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error) {
|
||||
return b.processes.ListProcesses(), nil
|
||||
}
|
||||
|
||||
func (b *localController) DisconnectProcess(ctx context.Context, pid string) error {
|
||||
return b.processes.DeleteProcess(pid)
|
||||
}
|
||||
|
||||
func (b *localController) cancelRunningProcesses() {
|
||||
b.processes.CancelRunningProcesses()
|
||||
}
|
||||
|
||||
func (b *localController) Invoke(ctx context.Context, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
||||
proc, ok := b.processes.Get(pid)
|
||||
func (b *Controller) Invoke(ctx context.Context, processes *processes.Manager, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
||||
proc, ok := processes.Get(pid)
|
||||
if !ok {
|
||||
// Start a new process.
|
||||
if b.buildConfig.resultCtx == nil {
|
||||
return errors.New("no build result is registered")
|
||||
}
|
||||
var err error
|
||||
proc, err = b.processes.StartProcess(pid, b.buildConfig.resultCtx, cfg)
|
||||
proc, err = processes.StartProcess(pid, b.buildConfig.resultCtx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -103,15 +88,6 @@ func (b *localController) Invoke(ctx context.Context, pid string, cfg *controlle
|
|||
}
|
||||
}
|
||||
|
||||
func (b *localController) Close() error {
|
||||
b.cancelRunningProcesses()
|
||||
if b.buildConfig.resultCtx != nil {
|
||||
b.buildConfig.resultCtx.Done()
|
||||
}
|
||||
// TODO: cancel ongoing builds?
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *localController) Inspect(ctx context.Context) *cbuild.Options {
|
||||
func (b *Controller) Inspect(ctx context.Context) *cbuild.Options {
|
||||
return b.buildConfig.buildOptions
|
||||
}
|
||||
|
|
|
@ -12,8 +12,9 @@ import (
|
|||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
"github.com/docker/buildx/controller/local"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/controller/processes"
|
||||
"github.com/docker/buildx/monitor/commands"
|
||||
"github.com/docker/buildx/monitor/types"
|
||||
"github.com/docker/buildx/util/ioset"
|
||||
|
@ -32,13 +33,7 @@ type MonitorBuildResult struct {
|
|||
}
|
||||
|
||||
// RunMonitor provides an interactive session for running and managing containers via specified IO.
|
||||
func RunMonitor(ctx context.Context, curRef string, options *cbuild.Options, invokeConfig *controllerapi.InvokeConfig, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*MonitorBuildResult, error) {
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("close error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
func RunMonitor(ctx context.Context, curRef string, options *cbuild.Options, invokeConfig *controllerapi.InvokeConfig, c *local.Controller, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*MonitorBuildResult, error) {
|
||||
if err := progress.Pause(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -70,8 +65,9 @@ func RunMonitor(ctx context.Context, curRef string, options *cbuild.Options, inv
|
|||
invokeForwarder := ioset.NewForwarder()
|
||||
invokeForwarder.SetIn(&containerIn)
|
||||
m := &monitor{
|
||||
BuildxController: c,
|
||||
invokeIO: invokeForwarder,
|
||||
c: c,
|
||||
processes: processes.NewManager(),
|
||||
invokeIO: invokeForwarder,
|
||||
muxIO: ioset.NewMuxIO(ioset.In{
|
||||
Stdin: io.NopCloser(stdin),
|
||||
Stdout: nopCloser{stdout},
|
||||
|
@ -84,6 +80,12 @@ func RunMonitor(ctx context.Context, curRef string, options *cbuild.Options, inv
|
|||
return "Switched IO\n"
|
||||
}),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := m.Close(); err != nil {
|
||||
logrus.Warnf("close error: %v", err)
|
||||
}
|
||||
}()
|
||||
m.ref.Store(curRef)
|
||||
|
||||
// Start container automatically
|
||||
|
@ -231,7 +233,7 @@ type readWriter struct {
|
|||
}
|
||||
|
||||
type monitor struct {
|
||||
control.BuildxController
|
||||
c *local.Controller
|
||||
ref atomic.Value
|
||||
|
||||
muxIO *ioset.MuxIO
|
||||
|
@ -240,14 +242,24 @@ type monitor struct {
|
|||
attachedPid atomic.Value
|
||||
|
||||
lastBuildResult *MonitorBuildResult
|
||||
|
||||
processes *processes.Manager
|
||||
}
|
||||
|
||||
func (m *monitor) Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (resp *client.SolveResponse, input *build.Inputs, err error) {
|
||||
resp, _, err = m.BuildxController.Build(ctx, options, in, progress)
|
||||
resp, _, err = m.c.Build(ctx, options, in, progress)
|
||||
m.lastBuildResult = &MonitorBuildResult{Resp: resp, Err: err} // Record build result
|
||||
return
|
||||
}
|
||||
|
||||
func (m *monitor) Invoke(ctx context.Context, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
|
||||
return m.c.Invoke(ctx, m.processes, pid, cfg, ioIn, ioOut, ioErr)
|
||||
}
|
||||
|
||||
func (m *monitor) Inspect(ctx context.Context) *cbuild.Options {
|
||||
return m.c.Inspect(ctx)
|
||||
}
|
||||
|
||||
func (m *monitor) Rollback(ctx context.Context, cfg *controllerapi.InvokeConfig) string {
|
||||
pid := identity.NewID()
|
||||
cfg1 := cfg
|
||||
|
@ -332,6 +344,27 @@ func (m *monitor) invoke(ctx context.Context, pid string, cfg *controllerapi.Inv
|
|||
return err
|
||||
}
|
||||
|
||||
func (m *monitor) Close() error {
|
||||
m.cancelRunningProcesses()
|
||||
// if m.buildConfig.resultCtx != nil {
|
||||
// b.buildConfig.resultCtx.Done()
|
||||
// }
|
||||
// TODO: cancel ongoing builds?
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *monitor) ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error) {
|
||||
return m.processes.ListProcesses(), nil
|
||||
}
|
||||
|
||||
func (m *monitor) DisconnectProcess(ctx context.Context, pid string) error {
|
||||
return m.processes.DeleteProcess(pid)
|
||||
}
|
||||
|
||||
func (m *monitor) cancelRunningProcesses() {
|
||||
m.processes.CancelRunningProcesses()
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
|
|
@ -2,14 +2,31 @@ package types
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/buildx/controller/control"
|
||||
"github.com/docker/buildx/build"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/controller/processes"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
// Monitor provides APIs for attaching and controlling the buildx server.
|
||||
type Monitor interface {
|
||||
control.BuildxController
|
||||
Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (resp *client.SolveResponse, inputs *build.Inputs, err error)
|
||||
|
||||
Inspect(ctx context.Context) *cbuild.Options
|
||||
|
||||
// Invoke starts an IO session into the specified process.
|
||||
// If pid doesn't match to any running processes, it starts a new process with the specified config.
|
||||
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
|
||||
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
|
||||
Invoke(ctx context.Context, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
|
||||
|
||||
ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error)
|
||||
|
||||
DisconnectProcess(ctx context.Context, pid string) error
|
||||
|
||||
// Rollback re-runs the interactive container with initial rootfs contents.
|
||||
Rollback(ctx context.Context, cfg *controllerapi.InvokeConfig) string
|
||||
|
@ -25,6 +42,8 @@ type Monitor interface {
|
|||
|
||||
// Detach detaches IO from the container.
|
||||
Detach()
|
||||
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// CommandInfo is information about a command.
|
||||
|
|
Loading…
Reference in New Issue