mirror of https://github.com/docker/compose.git
Compare commits
54 Commits
Author | SHA1 | Date |
---|---|---|
|
0dc9852c67 | |
|
a478702236 | |
|
2c12ad19db | |
|
038ea8441a | |
|
9e98e6101e | |
|
52f04229c0 | |
|
28895d0322 | |
|
a926f7d717 | |
|
fe046915eb | |
|
adbd61e5d6 | |
|
e37ac04329 | |
|
cab2c2a44e | |
|
1946de598d | |
|
8e29a138aa | |
|
3c8da0afee | |
|
1b12c867c5 | |
|
1a4fc55fd7 | |
|
efc939dcee | |
|
d6e9f79ba6 | |
|
b4c44a431f | |
|
fb5a8644c3 | |
|
95660c5e5a | |
|
f6ddd6ae88 | |
|
4ae7066955 | |
|
fd954f266c | |
|
d62e21025c | |
|
6a2d16bd10 | |
|
4d47da6dc2 | |
|
8f91793fb5 | |
|
1d2223fb23 | |
|
d4f6000712 | |
|
c50d16cd78 | |
|
3875e13fad | |
|
c89f30170d | |
|
41a9b91887 | |
|
5fc2b2a71c | |
|
b1cd40c316 | |
|
362ab0733f | |
|
f35d2cfb3b | |
|
17ba6c7188 | |
|
1c37f1abb6 | |
|
485b6200ee | |
|
8c17a35609 | |
|
6b9667401a | |
|
9a1e589ce8 | |
|
5e147e852e | |
|
29308cb97e | |
|
0b0242d0ac | |
|
5a704004d3 | |
|
cb95910018 | |
|
f42226e352 | |
|
0cc3c7a550 | |
|
f7ee9c8a0c | |
|
35efa97b7d |
|
@ -12,6 +12,12 @@ body:
|
|||
Include both the current behavior (what you are seeing) as well as what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
[Docker Swarm](https://www.mirantis.com/software/swarm/) uses a distinct compose file parser and
|
||||
as such doesn't support some of the recent features of Docker Compose. Please contact Mirantis
|
||||
if you need assistance with compose file support in Docker Swarm.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
|
|
|
@ -30,6 +30,8 @@ linters:
|
|||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: io/ioutil package has been deprecated
|
||||
- pkg: github.com/docker/docker/errdefs
|
||||
desc: use github.com/containerd/errdefs instead.
|
||||
- pkg: golang.org/x/exp/maps
|
||||
desc: use stdlib maps package
|
||||
- pkg: golang.org/x/exp/slices
|
||||
|
|
|
@ -23,6 +23,12 @@ your application are configured.
|
|||
Once you have a Compose file, you can create and start your application with a
|
||||
single command: `docker compose up`.
|
||||
|
||||
> **Note**: About Docker Swarm
|
||||
> Docker Swarm used to rely on the legacy compose file format but did not adopted the compose specification
|
||||
> so is missing some of the recent enhancements in the compose syntax. After
|
||||
> [acquisition by Mirantis](https://www.mirantis.com/software/swarm/) swarm isn't maintained by Docker Inc, and
|
||||
> as such some Docker Compose features aren't accessible to swarm users.
|
||||
|
||||
# Where to get Docker Compose
|
||||
|
||||
### Windows and macOS
|
||||
|
|
|
@ -55,8 +55,10 @@ func Setup(cmd *cobra.Command, dockerCli command.Cli, args []string) error {
|
|||
ctx,
|
||||
"cli/"+strings.Join(commandName(cmd), "-"),
|
||||
)
|
||||
cmdSpan.SetAttributes(attribute.StringSlice("cli.flags", getFlags(cmd.Flags())))
|
||||
cmdSpan.SetAttributes(attribute.Bool("cli.isatty", dockerCli.In().IsTerminal()))
|
||||
cmdSpan.SetAttributes(
|
||||
attribute.StringSlice("cli.flags", getFlags(cmd.Flags())),
|
||||
attribute.Bool("cli.isatty", dockerCli.In().IsTerminal()),
|
||||
)
|
||||
|
||||
cmd.SetContext(ctx)
|
||||
wrapRunE(cmd, cmdSpan, tracingShutdown)
|
||||
|
|
|
@ -45,7 +45,8 @@ type buildOptions struct {
|
|||
deps bool
|
||||
print bool
|
||||
check bool
|
||||
provenance bool
|
||||
sbom string
|
||||
provenance string
|
||||
}
|
||||
|
||||
func (opts buildOptions) toAPIBuildOptions(services []string) (api.BuildOptions, error) {
|
||||
|
@ -84,6 +85,7 @@ func (opts buildOptions) toAPIBuildOptions(services []string) (api.BuildOptions,
|
|||
Check: opts.check,
|
||||
SSHs: SSHKeys,
|
||||
Builder: builderName,
|
||||
SBOM: opts.sbom,
|
||||
Provenance: opts.provenance,
|
||||
}, nil
|
||||
}
|
||||
|
@ -119,12 +121,14 @@ func buildCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service)
|
|||
}
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.push, "push", false, "Push service images")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Don't print anything to STDOUT")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the build output")
|
||||
flags.BoolVar(&opts.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringArrayVar(&opts.args, "build-arg", []string{}, "Set build-time variables for services")
|
||||
flags.StringVar(&opts.ssh, "ssh", "", "Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent)")
|
||||
flags.StringVar(&opts.builder, "builder", "", "Set builder to use")
|
||||
flags.BoolVar(&opts.deps, "with-dependencies", false, "Also build dependencies (transitively)")
|
||||
flags.StringVar(&opts.provenance, "provenance", "", `Add a provenance attestation`)
|
||||
flags.StringVar(&opts.sbom, "sbom", "", `Add a SBOM attestation`)
|
||||
|
||||
flags.Bool("parallel", true, "Build images in parallel. DEPRECATED")
|
||||
flags.MarkHidden("parallel") //nolint:errcheck
|
||||
|
@ -156,7 +160,7 @@ func runBuild(ctx context.Context, dockerCli command.Cli, backend api.Service, o
|
|||
}
|
||||
|
||||
apiBuildOptions, err := opts.toAPIBuildOptions(services)
|
||||
apiBuildOptions.Provenance = true
|
||||
apiBuildOptions.Attestations = true
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ type configOptions struct {
|
|||
services bool
|
||||
volumes bool
|
||||
networks bool
|
||||
models bool
|
||||
profiles bool
|
||||
images bool
|
||||
hash string
|
||||
|
@ -115,6 +116,9 @@ func configCommand(p *ProjectOptions, dockerCli command.Cli) *cobra.Command {
|
|||
if opts.networks {
|
||||
return runNetworks(ctx, dockerCli, opts)
|
||||
}
|
||||
if opts.models {
|
||||
return runModels(ctx, dockerCli, opts)
|
||||
}
|
||||
if opts.hash != "" {
|
||||
return runHash(ctx, dockerCli, opts)
|
||||
}
|
||||
|
@ -152,6 +156,7 @@ func configCommand(p *ProjectOptions, dockerCli command.Cli) *cobra.Command {
|
|||
flags.BoolVar(&opts.services, "services", false, "Print the service names, one per line.")
|
||||
flags.BoolVar(&opts.volumes, "volumes", false, "Print the volume names, one per line.")
|
||||
flags.BoolVar(&opts.networks, "networks", false, "Print the network names, one per line.")
|
||||
flags.BoolVar(&opts.models, "models", false, "Print the model names, one per line.")
|
||||
flags.BoolVar(&opts.profiles, "profiles", false, "Print the profile names, one per line.")
|
||||
flags.BoolVar(&opts.images, "images", false, "Print the image names, one per line.")
|
||||
flags.StringVar(&opts.hash, "hash", "", "Print the service config hash, one per line.")
|
||||
|
@ -383,6 +388,19 @@ func runNetworks(ctx context.Context, dockerCli command.Cli, opts configOptions)
|
|||
return nil
|
||||
}
|
||||
|
||||
func runModels(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
|
||||
project, err := opts.ToProject(ctx, dockerCli, nil, cli.WithoutEnvironmentResolution)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, model := range project.Models {
|
||||
if model.Model != "" {
|
||||
_, _ = fmt.Fprintln(dockerCli.Out(), model.Model)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runHash(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
|
||||
var services []string
|
||||
if opts.hash != "*" {
|
||||
|
|
|
@ -29,7 +29,9 @@ import (
|
|||
|
||||
type eventsOpts struct {
|
||||
*composeOptions
|
||||
json bool
|
||||
json bool
|
||||
since string
|
||||
until string
|
||||
}
|
||||
|
||||
func eventsCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
|
||||
|
@ -48,6 +50,8 @@ func eventsCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service
|
|||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.json, "json", false, "Output events as a stream of json objects")
|
||||
cmd.Flags().StringVar(&opts.since, "since", "", "Show all events created since timestamp")
|
||||
cmd.Flags().StringVar(&opts.until, "until", "", "Stream events until this timestamp")
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -59,6 +63,8 @@ func runEvents(ctx context.Context, dockerCli command.Cli, backend api.Service,
|
|||
|
||||
return backend.Events(ctx, name, api.EventsOptions{
|
||||
Services: services,
|
||||
Since: opts.since,
|
||||
Until: opts.until,
|
||||
Consumer: func(event api.Event) error {
|
||||
if opts.json {
|
||||
marshal, err := json.Marshal(map[string]interface{}{
|
||||
|
|
|
@ -165,6 +165,7 @@ func upCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *c
|
|||
flags.BoolVar(&create.recreateDeps, "always-recreate-deps", false, "Recreate dependent containers. Incompatible with --no-recreate.")
|
||||
flags.BoolVarP(&create.noInherit, "renew-anon-volumes", "V", false, "Recreate anonymous volumes instead of retrieving data from the previous containers")
|
||||
flags.BoolVar(&create.quietPull, "quiet-pull", false, "Pull without printing progress information")
|
||||
flags.BoolVar(&build.quiet, "quiet-build", false, "Suppress the build output")
|
||||
flags.StringArrayVar(&up.attach, "attach", []string{}, "Restrict attaching to the specified services. Incompatible with --attach-dependencies.")
|
||||
flags.StringArrayVar(&up.noAttach, "no-attach", []string{}, "Do not attach (stream logs) to the specified services")
|
||||
flags.BoolVar(&up.attachDependencies, "attach-dependencies", false, "Automatically attach to log output of dependent services")
|
||||
|
@ -223,6 +224,7 @@ func validateFlags(up *upOptions, create *createOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func runUp(
|
||||
ctx context.Context,
|
||||
dockerCli command.Cli,
|
||||
|
@ -330,7 +332,7 @@ func runUp(
|
|||
WaitTimeout: timeout,
|
||||
Watch: upOptions.watch,
|
||||
Services: services,
|
||||
NavigationMenu: upOptions.navigationMenu && ui.Mode != "plain",
|
||||
NavigationMenu: upOptions.navigationMenu && ui.Mode != "plain" && dockerCli.In().IsTerminal(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -28,49 +28,42 @@ func ansi(code string) string {
|
|||
return fmt.Sprintf("\033%s", code)
|
||||
}
|
||||
|
||||
func SaveCursor() {
|
||||
func saveCursor() {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
fmt.Print(ansi("7"))
|
||||
}
|
||||
|
||||
func RestoreCursor() {
|
||||
func restoreCursor() {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
fmt.Print(ansi("8"))
|
||||
}
|
||||
|
||||
func HideCursor() {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
fmt.Print(ansi("[?25l"))
|
||||
}
|
||||
|
||||
func ShowCursor() {
|
||||
func showCursor() {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
fmt.Print(ansi("[?25h"))
|
||||
}
|
||||
|
||||
func MoveCursor(y, x int) {
|
||||
func moveCursor(y, x int) {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
fmt.Print(ansi(fmt.Sprintf("[%d;%dH", y, x)))
|
||||
}
|
||||
|
||||
func MoveCursorX(pos int) {
|
||||
func carriageReturn() {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
fmt.Print(ansi(fmt.Sprintf("[%dG", pos)))
|
||||
fmt.Print(ansi(fmt.Sprintf("[%dG", 0)))
|
||||
}
|
||||
|
||||
func ClearLine() {
|
||||
func clearLine() {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
|
@ -78,7 +71,7 @@ func ClearLine() {
|
|||
fmt.Print(ansi("[2K"))
|
||||
}
|
||||
|
||||
func MoveCursorUp(lines int) {
|
||||
func moveCursorUp(lines int) {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
|
@ -86,7 +79,7 @@ func MoveCursorUp(lines int) {
|
|||
fmt.Print(ansi(fmt.Sprintf("[%dA", lines)))
|
||||
}
|
||||
|
||||
func MoveCursorDown(lines int) {
|
||||
func moveCursorDown(lines int) {
|
||||
if disableAnsi {
|
||||
return
|
||||
}
|
||||
|
@ -94,7 +87,7 @@ func MoveCursorDown(lines int) {
|
|||
fmt.Print(ansi(fmt.Sprintf("[%dB", lines)))
|
||||
}
|
||||
|
||||
func NewLine() {
|
||||
func newLine() {
|
||||
// Like \n
|
||||
fmt.Print("\012")
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package formatter
|
|||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
|
@ -58,6 +59,9 @@ const (
|
|||
Auto = "auto"
|
||||
)
|
||||
|
||||
// ansiColorOffset is the offset for basic foreground colors in ANSI escape codes.
|
||||
const ansiColorOffset = 30
|
||||
|
||||
// SetANSIMode configure formatter for colored output on ANSI-compliant console
|
||||
func SetANSIMode(streams command.Streams, ansi string) {
|
||||
if !useAnsi(streams, ansi) {
|
||||
|
@ -91,11 +95,15 @@ func ansiColor(code, s string, formatOpts ...string) string {
|
|||
|
||||
// Everything about ansiColorCode color https://hyperskill.org/learn/step/18193
|
||||
func ansiColorCode(code string, formatOpts ...string) string {
|
||||
res := "\033["
|
||||
var sb strings.Builder
|
||||
sb.WriteString("\033[")
|
||||
for _, c := range formatOpts {
|
||||
res = fmt.Sprintf("%s%s;", res, c)
|
||||
sb.WriteString(c)
|
||||
sb.WriteString(";")
|
||||
}
|
||||
return fmt.Sprintf("%s%sm", res, code)
|
||||
sb.WriteString(code)
|
||||
sb.WriteString("m")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func makeColorFunc(code string) colorFunc {
|
||||
|
@ -122,8 +130,8 @@ func rainbowColor() colorFunc {
|
|||
func init() {
|
||||
colors := map[string]colorFunc{}
|
||||
for i, name := range names {
|
||||
colors[name] = makeColorFunc(strconv.Itoa(30 + i))
|
||||
colors["intense_"+name] = makeColorFunc(strconv.Itoa(30+i) + ";1")
|
||||
colors[name] = makeColorFunc(strconv.Itoa(ansiColorOffset + i))
|
||||
colors["intense_"+name] = makeColorFunc(strconv.Itoa(ansiColorOffset+i) + ";1")
|
||||
}
|
||||
rainbow = []colorFunc{
|
||||
colors["cyan"],
|
||||
|
|
|
@ -56,10 +56,6 @@ func NewLogConsumer(ctx context.Context, stdout, stderr io.Writer, color, prefix
|
|||
}
|
||||
}
|
||||
|
||||
func (l *logConsumer) Register(name string) {
|
||||
l.register(name)
|
||||
}
|
||||
|
||||
func (l *logConsumer) register(name string) *presenter {
|
||||
var p *presenter
|
||||
root, _, found := strings.Cut(name, " ")
|
||||
|
@ -73,9 +69,12 @@ func (l *logConsumer) register(name string) *presenter {
|
|||
} else {
|
||||
cf := monochrome
|
||||
if l.color {
|
||||
if name == api.WatchLogger {
|
||||
switch name {
|
||||
case "":
|
||||
cf = monochrome
|
||||
case api.WatchLogger:
|
||||
cf = makeColorFunc("92")
|
||||
} else {
|
||||
default:
|
||||
cf = nextColor()
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +121,7 @@ func (l *logConsumer) write(w io.Writer, container, message string) {
|
|||
timestamp := time.Now().Format(jsonmessage.RFC3339NanoFixed)
|
||||
for _, line := range strings.Split(message, "\n") {
|
||||
if l.timestamp {
|
||||
_, _ = fmt.Fprintf(w, "%s%s%s\n", p.prefix, timestamp, line)
|
||||
_, _ = fmt.Fprintf(w, "%s%s %s\n", p.prefix, timestamp, line)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(w, "%s%s\n", p.prefix, line)
|
||||
}
|
||||
|
@ -184,7 +183,3 @@ func (l logDecorator) Status(container, msg string) {
|
|||
l.decorated.Status(container, msg)
|
||||
l.After()
|
||||
}
|
||||
|
||||
func (l logDecorator) Register(container string) {
|
||||
l.decorated.Register(container)
|
||||
}
|
||||
|
|
|
@ -48,8 +48,8 @@ func (ke *KeyboardError) printError(height int, info string) {
|
|||
if ke.shouldDisplay() {
|
||||
errMessage := ke.err.Error()
|
||||
|
||||
MoveCursor(height-1-extraLines(info)-extraLines(errMessage), 0)
|
||||
ClearLine()
|
||||
moveCursor(height-1-extraLines(info)-extraLines(errMessage), 0)
|
||||
clearLine()
|
||||
|
||||
fmt.Print(errMessage)
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ func (lk *LogKeyboard) createBuffer(lines int) {
|
|||
|
||||
if lines > 0 {
|
||||
allocateSpace(lines)
|
||||
MoveCursorUp(lines)
|
||||
moveCursorUp(lines)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,17 +146,17 @@ func (lk *LogKeyboard) printNavigationMenu() {
|
|||
height := goterm.Height()
|
||||
menu := lk.navigationMenu()
|
||||
|
||||
MoveCursorX(0)
|
||||
SaveCursor()
|
||||
carriageReturn()
|
||||
saveCursor()
|
||||
|
||||
lk.kError.printError(height, menu)
|
||||
|
||||
MoveCursor(height-extraLines(menu), 0)
|
||||
ClearLine()
|
||||
moveCursor(height-extraLines(menu), 0)
|
||||
clearLine()
|
||||
fmt.Print(menu)
|
||||
|
||||
MoveCursorX(0)
|
||||
RestoreCursor()
|
||||
carriageReturn()
|
||||
restoreCursor()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,15 +188,15 @@ func (lk *LogKeyboard) navigationMenu() string {
|
|||
|
||||
func (lk *LogKeyboard) clearNavigationMenu() {
|
||||
height := goterm.Height()
|
||||
MoveCursorX(0)
|
||||
SaveCursor()
|
||||
carriageReturn()
|
||||
saveCursor()
|
||||
|
||||
// ClearLine()
|
||||
// clearLine()
|
||||
for i := 0; i < height; i++ {
|
||||
MoveCursorDown(1)
|
||||
ClearLine()
|
||||
moveCursorDown(1)
|
||||
clearLine()
|
||||
}
|
||||
RestoreCursor()
|
||||
restoreCursor()
|
||||
}
|
||||
|
||||
func (lk *LogKeyboard) openDockerDesktop(ctx context.Context, project *types.Project) {
|
||||
|
@ -316,13 +316,13 @@ func (lk *LogKeyboard) HandleKeyEvents(ctx context.Context, event keyboard.KeyEv
|
|||
case keyboard.KeyCtrlC:
|
||||
_ = keyboard.Close()
|
||||
lk.clearNavigationMenu()
|
||||
ShowCursor()
|
||||
showCursor()
|
||||
|
||||
lk.logLevel = NONE
|
||||
// will notify main thread to kill and will handle gracefully
|
||||
lk.signalChannel <- syscall.SIGINT
|
||||
case keyboard.KeyEnter:
|
||||
NewLine()
|
||||
newLine()
|
||||
lk.printNavigationMenu()
|
||||
}
|
||||
}
|
||||
|
@ -336,9 +336,9 @@ func (lk *LogKeyboard) EnableWatch(enabled bool, watcher Feature) {
|
|||
|
||||
func allocateSpace(lines int) {
|
||||
for i := 0; i < lines; i++ {
|
||||
ClearLine()
|
||||
NewLine()
|
||||
MoveCursorX(0)
|
||||
clearLine()
|
||||
newLine()
|
||||
carriageReturn()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,9 +22,11 @@ run `docker compose build` to rebuild it.
|
|||
| `-m`, `--memory` | `bytes` | `0` | Set memory limit for the build container. Not supported by BuildKit. |
|
||||
| `--no-cache` | `bool` | | Do not use cache when building the image |
|
||||
| `--print` | `bool` | | Print equivalent bake file |
|
||||
| `--provenance` | `string` | | Add a provenance attestation |
|
||||
| `--pull` | `bool` | | Always attempt to pull a newer version of the image |
|
||||
| `--push` | `bool` | | Push service images |
|
||||
| `-q`, `--quiet` | `bool` | | Don't print anything to STDOUT |
|
||||
| `-q`, `--quiet` | `bool` | | Suppress the build output |
|
||||
| `--sbom` | `string` | | Add a SBOM attestation |
|
||||
| `--ssh` | `string` | | Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent) |
|
||||
| `--with-dependencies` | `bool` | | Also build dependencies (transitively) |
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ the canonical format.
|
|||
| `--hash` | `string` | | Print the service config hash, one per line. |
|
||||
| `--images` | `bool` | | Print the image names, one per line. |
|
||||
| `--lock-image-digests` | `bool` | | Produces an override file with image digests |
|
||||
| `--models` | `bool` | | Print the model names, one per line. |
|
||||
| `--networks` | `bool` | | Print the network names, one per line. |
|
||||
| `--no-consistency` | `bool` | | Don't check model consistency - warning: may produce invalid Compose output |
|
||||
| `--no-env-resolution` | `bool` | | Don't resolve service env files |
|
||||
|
|
|
@ -23,10 +23,12 @@ The events that can be received using this can be seen [here](/reference/cli/doc
|
|||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------|:-------|:--------|:------------------------------------------|
|
||||
| `--dry-run` | `bool` | | Execute command in dry run mode |
|
||||
| `--json` | `bool` | | Output events as a stream of json objects |
|
||||
| Name | Type | Default | Description |
|
||||
|:------------|:---------|:--------|:------------------------------------------|
|
||||
| `--dry-run` | `bool` | | Execute command in dry run mode |
|
||||
| `--json` | `bool` | | Output events as a stream of json objects |
|
||||
| `--since` | `string` | | Show all events created since timestamp |
|
||||
| `--until` | `string` | | Stream events until this timestamp |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
|
|
@ -44,6 +44,7 @@ If the process is interrupted using `SIGINT` (ctrl + C) or `SIGTERM`, the contai
|
|||
| `--no-recreate` | `bool` | | If containers already exist, don't recreate them. Incompatible with --force-recreate. |
|
||||
| `--no-start` | `bool` | | Don't start the services after creating them |
|
||||
| `--pull` | `string` | `policy` | Pull image before running ("always"\|"missing"\|"never") |
|
||||
| `--quiet-build` | `bool` | | Suppress the build output |
|
||||
| `--quiet-pull` | `bool` | | Pull without printing progress information |
|
||||
| `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file |
|
||||
| `-V`, `--renew-anon-volumes` | `bool` | | Recreate anonymous volumes instead of retrieving data from the previous containers |
|
||||
|
|
|
@ -125,6 +125,15 @@ options:
|
|||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: provenance
|
||||
value_type: string
|
||||
description: Add a provenance attestation
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: pull
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
|
@ -149,7 +158,16 @@ options:
|
|||
shorthand: q
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Don't print anything to STDOUT
|
||||
description: Suppress the build output
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: sbom
|
||||
value_type: string
|
||||
description: Add a SBOM attestation
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
|
|
|
@ -56,6 +56,16 @@ options:
|
|||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: models
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Print the model names, one per line.
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: networks
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
|
|
|
@ -34,6 +34,24 @@ options:
|
|||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: since
|
||||
value_type: string
|
||||
description: Show all events created since timestamp
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: until
|
||||
value_type: string
|
||||
description: Stream events until this timestamp
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
|
|
|
@ -211,6 +211,16 @@ options:
|
|||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: quiet-build
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Suppress the build output
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: quiet-pull
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
|
|
53
go.mod
53
go.mod
|
@ -8,16 +8,16 @@ require (
|
|||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/buger/goterm v1.0.4
|
||||
github.com/compose-spec/compose-go/v2 v2.7.1
|
||||
github.com/compose-spec/compose-go/v2 v2.8.1
|
||||
github.com/containerd/containerd/v2 v2.1.3
|
||||
github.com/containerd/errdefs v1.0.0
|
||||
github.com/containerd/platforms v1.0.0-rc.1
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/buildx v0.25.0
|
||||
github.com/docker/cli v28.3.1+incompatible
|
||||
github.com/docker/buildx v0.26.1
|
||||
github.com/docker/cli v28.3.3+incompatible
|
||||
github.com/docker/cli-docs-tool v0.10.0
|
||||
github.com/docker/docker v28.3.1+incompatible
|
||||
github.com/docker/docker v28.3.3+incompatible
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203
|
||||
|
@ -29,7 +29,7 @@ require (
|
|||
github.com/mattn/go-shellwords v1.0.12
|
||||
github.com/mitchellh/go-ps v1.0.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/moby/buildkit v0.23.2
|
||||
github.com/moby/buildkit v0.23.0-rc1.0.20250618182037-9b91d20367db // master
|
||||
github.com/moby/go-archive v0.1.0
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/moby/sys/atomicwriter v0.1.0
|
||||
|
@ -41,21 +41,21 @@ require (
|
|||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
||||
go.opentelemetry.io/otel v1.35.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
|
||||
go.opentelemetry.io/otel/metric v1.35.0
|
||||
go.opentelemetry.io/otel/sdk v1.35.0
|
||||
go.opentelemetry.io/otel/trace v1.35.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/sync v0.15.0
|
||||
golang.org/x/sys v0.33.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/sys v0.34.0
|
||||
google.golang.org/grpc v1.74.2
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.2
|
||||
tags.cncf.io/container-device-interface v1.0.1
|
||||
|
@ -64,7 +64,7 @@ require (
|
|||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27 // indirect
|
||||
|
@ -98,7 +98,7 @@ require (
|
|||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
|
@ -175,20 +175,21 @@ require (
|
|||
github.com/zclconf/go-cty v1.16.2 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/oauth2 v0.29.0 // indirect
|
||||
golang.org/x/term v0.31.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
|
105
go.sum
105
go.sum
|
@ -10,8 +10,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DefangLabs/secret-detector v0.0.0-20250403165618-22662109213e h1:rd4bOvKmDIx0WeTv9Qz+hghsgyjikFiPrseXHlKepO0=
|
||||
github.com/DefangLabs/secret-detector v0.0.0-20250403165618-22662109213e/go.mod h1:blbwPQh4DTlCZEfk1BLU4oMIhLda2U+A840Uag9DsZw=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA=
|
||||
|
@ -80,8 +80,8 @@ github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e
|
|||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.7.1 h1:EUIbuaD0R/J1KA+FbJMNbcS9+jt/CVudbp5iHqUllSs=
|
||||
github.com/compose-spec/compose-go/v2 v2.7.1/go.mod h1:TmjkIB9W73fwVxkYY+u2uhMbMUakjiif79DlYgXsyvU=
|
||||
github.com/compose-spec/compose-go/v2 v2.8.1 h1:27O4dzyhiS/UEUKp1zHOHCBWD1WbxGsYGMNNaSejTk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.8.1/go.mod h1:veko/VB7URrg/tKz3vmIAQDaz+CGiXH8vZsW79NmAww=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
|
||||
|
@ -127,17 +127,17 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
|
|||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/buildx v0.25.0 h1:qs5WxBo0wQKSXcQ+v6UhWaeM2Pu+95ZCymaimRzInaE=
|
||||
github.com/docker/buildx v0.25.0/go.mod h1:xJcOeBhz49tgqN174MMGuOU4bxNmgfaLnZn7Gm641EE=
|
||||
github.com/docker/cli v28.3.1+incompatible h1:ZUdwOLDEBoE3TE5rdC9IXGY5HPHksJK3M+hJEWhh2mc=
|
||||
github.com/docker/cli v28.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/buildx v0.26.1 h1:nlj3bVhHK9fV7g6floRvGhPcR0u2hxCPMmObCS1ZKL4=
|
||||
github.com/docker/buildx v0.26.1/go.mod h1:oxMC30cSHPaCCkY2j+EqN7uxFikjSzVC0c44lo9b4Fo=
|
||||
github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo=
|
||||
github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.10.0 h1:bOD6mKynPQgojQi3s2jgcUWGp/Ebqy1SeCr9VfKQLLU=
|
||||
github.com/docker/cli-docs-tool v0.10.0/go.mod h1:5EM5zPnT2E7yCLERZmrDA234Vwn09fzRHP4aX1qwp1U=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v28.3.1+incompatible h1:20+BmuA9FXlCX4ByQ0vYJcUEnOmRM6XljDnFWR+jCyY=
|
||||
github.com/docker/docker v28.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
|
||||
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
|
@ -171,8 +171,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
|
@ -317,8 +317,8 @@ github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/z
|
|||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
|
||||
github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0=
|
||||
github.com/moby/buildkit v0.23.0-rc1.0.20250618182037-9b91d20367db h1:ZzrDuG9G1A/RwJvuogNplxCEKsIUQh1CqEnqbOGFgKE=
|
||||
github.com/moby/buildkit v0.23.0-rc1.0.20250618182037-9b91d20367db/go.mod h1:v5jMDvQgUyidk3wu3NvVAAd5JJo83nfet9Gf/o0+EAQ=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
|
@ -450,8 +450,9 @@ github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wx
|
|||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 h1:XTHrT015sxHyJ5FnQ0AeemSspZWaDq7DoTRW0EVsDCE=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c h1:2EejZtjFjKJGk71ANb+wtFK5EjUzUkEM3R0xnp559xg=
|
||||
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@ -508,36 +509,38 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS
|
|||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
@ -546,8 +549,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
|
@ -562,10 +565,10 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -573,8 +576,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -596,19 +599,19 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -622,13 +625,13 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
|
||||
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build linux || openbsd
|
||||
//go:build linux || openbsd || freebsd
|
||||
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
|
|
@ -77,11 +77,13 @@ func ProjectOptions(ctx context.Context, proj *types.Project) SpanOptions {
|
|||
attribute.StringSlice("project.networks", proj.NetworkNames()),
|
||||
attribute.StringSlice("project.secrets", proj.SecretNames()),
|
||||
attribute.StringSlice("project.configs", proj.ConfigNames()),
|
||||
attribute.StringSlice("project.models", proj.ModelNames()),
|
||||
attribute.StringSlice("project.extensions", keys(proj.Extensions)),
|
||||
attribute.StringSlice("project.services.active", proj.ServiceNames()),
|
||||
attribute.StringSlice("project.services.disabled", proj.DisabledServiceNames()),
|
||||
attribute.StringSlice("project.services.build", proj.ServicesWithBuild()),
|
||||
attribute.StringSlice("project.services.depends_on", proj.ServicesWithDependsOn()),
|
||||
attribute.StringSlice("project.services.models", proj.ServicesWithModels()),
|
||||
attribute.StringSlice("project.services.capabilities", capabilities),
|
||||
attribute.StringSlice("project.services.capabilities.gpu", gpu),
|
||||
attribute.StringSlice("project.services.capabilities.tpu", tpu),
|
||||
|
@ -110,6 +112,7 @@ func ServiceOptions(service types.ServiceConfig) SpanOptions {
|
|||
attribute.String("service.name", service.Name),
|
||||
attribute.String("service.image", service.Image),
|
||||
attribute.StringSlice("service.networks", keys(service.Networks)),
|
||||
attribute.StringSlice("service.models", keys(service.Models)),
|
||||
}
|
||||
|
||||
configNames := make([]string, len(service.Configs))
|
||||
|
|
|
@ -19,6 +19,7 @@ package api
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -170,8 +171,14 @@ type BuildOptions struct {
|
|||
Print bool
|
||||
// Check let builder validate build configuration
|
||||
Check bool
|
||||
// Provenance
|
||||
Provenance bool
|
||||
// Attestations allows to enable attestations generation
|
||||
Attestations bool
|
||||
// Provenance generate a provenance attestation
|
||||
Provenance string
|
||||
// SBOM generate a SBOM attestation
|
||||
SBOM string
|
||||
// Out is the stream to write build progress
|
||||
Out io.Writer
|
||||
}
|
||||
|
||||
// Apply mutates project according to build options
|
||||
|
@ -398,6 +405,8 @@ type AttachOptions struct {
|
|||
type EventsOptions struct {
|
||||
Services []string
|
||||
Consumer func(event Event) error
|
||||
Since string
|
||||
Until string
|
||||
}
|
||||
|
||||
// Event is a container runtime event served by Events API
|
||||
|
@ -647,7 +656,6 @@ type LogConsumer interface {
|
|||
Log(containerName, message string)
|
||||
Err(containerName, message string)
|
||||
Status(container, msg string)
|
||||
Register(container string)
|
||||
}
|
||||
|
||||
// ContainerEventListener is a callback to process ContainerEvent from services
|
||||
|
@ -655,16 +663,18 @@ type ContainerEventListener func(event ContainerEvent)
|
|||
|
||||
// ContainerEvent notify an event has been collected on source container implementing Service
|
||||
type ContainerEvent struct {
|
||||
Type int
|
||||
// Container is the name of the container _without the project prefix_.
|
||||
Type int
|
||||
Time int64
|
||||
Container *ContainerSummary
|
||||
// Source is the name of the container _without the project prefix_.
|
||||
//
|
||||
// This is only suitable for display purposes within Compose, as it's
|
||||
// not guaranteed to be unique across services.
|
||||
Container string
|
||||
ID string
|
||||
Service string
|
||||
Line string
|
||||
// ContainerEventExit only
|
||||
Source string
|
||||
ID string
|
||||
Service string
|
||||
Line string
|
||||
// ExitCode is only set on ContainerEventExited events
|
||||
ExitCode int
|
||||
Restarting bool
|
||||
}
|
||||
|
@ -674,17 +684,19 @@ const (
|
|||
ContainerEventLog = iota
|
||||
// ContainerEventErr is a ContainerEvent of type log on stderr. Line is set
|
||||
ContainerEventErr
|
||||
// ContainerEventAttach is a ContainerEvent of type attach. First event sent about a container
|
||||
ContainerEventAttach
|
||||
// ContainerEventStarted let consumer know a container has been started
|
||||
ContainerEventStarted
|
||||
// ContainerEventRestarted let consumer know a container has been restarted
|
||||
ContainerEventRestarted
|
||||
// ContainerEventStopped is a ContainerEvent of type stopped.
|
||||
ContainerEventStopped
|
||||
// ContainerEventCreated let consumer know a new container has been created
|
||||
ContainerEventCreated
|
||||
// ContainerEventRecreated let consumer know container stopped but his being replaced
|
||||
ContainerEventRecreated
|
||||
// ContainerEventExit is a ContainerEvent of type exit. ExitCode is set
|
||||
ContainerEventExit
|
||||
// ContainerEventExited is a ContainerEvent of type exit. ExitCode is set
|
||||
ContainerEventExited
|
||||
// UserCancel user cancelled compose up, we are stopping containers
|
||||
UserCancel
|
||||
// HookEventLog is a ContainerEvent of type log on stdout by service hook
|
||||
HookEventLog
|
||||
)
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/docker/cli/cli/streams"
|
||||
)
|
||||
|
||||
// Streams defines the standard streams (stdin, stdout, stderr) used by the CLI.
|
||||
type Streams interface {
|
||||
Out() *streams.Out
|
||||
Err() *streams.Out
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/docker/compose/v2/internal"
|
||||
|
@ -65,9 +63,6 @@ var ComposeVersion string
|
|||
func init() {
|
||||
v, err := version.NewVersion(internal.Version)
|
||||
if err == nil {
|
||||
segments := v.Segments()
|
||||
if len(segments) > 2 {
|
||||
ComposeVersion = fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
|
||||
}
|
||||
ComposeVersion = v.Core().String()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/compose/v2/internal"
|
||||
"github.com/hashicorp/go-version"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestComposeVersionInitialization(t *testing.T) {
|
||||
v, err := version.NewVersion(internal.Version)
|
||||
if err != nil {
|
||||
assert.Equal(t, "", ComposeVersion, "ComposeVersion should be empty for a non-semver internal version (e.g. 'devel')")
|
||||
} else {
|
||||
expected := v.Core().String()
|
||||
assert.Equal(t, expected, ComposeVersion, "ComposeVersion should be the core of internal.Version")
|
||||
}
|
||||
}
|
|
@ -23,10 +23,11 @@ import (
|
|||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/cli/cli/command"
|
||||
cli "github.com/docker/cli/cli/command/container"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
|
@ -112,15 +113,20 @@ func convert(ctx context.Context, dockerCli command.Cli, model map[string]any, o
|
|||
return err
|
||||
}
|
||||
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
created, err := dockerCli.Client().ContainerCreate(ctx, &container.Config{
|
||||
containerConfig := &container.Config{
|
||||
Image: transformation,
|
||||
Env: []string{"LICENSE_AGREEMENT=true"},
|
||||
User: usr.Uid,
|
||||
}, &container.HostConfig{
|
||||
}
|
||||
// On POSIX systems, this is a decimal number representing the uid.
|
||||
// On Windows, this is a security identifier (SID) in a string format and the engine isn't able to manage it
|
||||
if runtime.GOOS != "windows" {
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
containerConfig.User = usr.Uid
|
||||
}
|
||||
created, err := dockerCli.Client().ContainerCreate(ctx, containerConfig, &container.HostConfig{
|
||||
AutoRemove: true,
|
||||
Binds: binds,
|
||||
}, &network.NetworkingConfig{}, nil, "")
|
||||
|
@ -198,7 +204,7 @@ func loadFileObject(conf types.FileObjectConfig) (types.FileObjectConfig, error)
|
|||
|
||||
func inspectWithPull(ctx context.Context, dockerCli command.Cli, imageName string) (image.InspectResponse, error) {
|
||||
inspect, err := dockerCli.Client().ImageInspect(ctx, imageName)
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
var stream io.ReadCloser
|
||||
stream, err = dockerCli.Client().ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
|
|
|
@ -33,6 +33,8 @@ import (
|
|||
const (
|
||||
TransformerLabel = "com.docker.compose.bridge"
|
||||
DefaultTransformerImage = "docker/compose-bridge-kubernetes"
|
||||
|
||||
templatesPath = "/templates"
|
||||
)
|
||||
|
||||
type CreateTransformerOptions struct {
|
||||
|
@ -73,7 +75,7 @@ func CreateTransformer(ctx context.Context, dockerCli command.Cli, options Creat
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
content, stat, err := dockerCli.Client().CopyFromContainer(ctx, created.ID, "/templates")
|
||||
content, stat, err := dockerCli.Client().CopyFromContainer(ctx, created.ID, templatesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -82,7 +84,7 @@ func CreateTransformer(ctx context.Context, dockerCli command.Cli, options Creat
|
|||
}()
|
||||
|
||||
srcInfo := archive.CopyInfo{
|
||||
Path: "/templates",
|
||||
Path: templatesPath,
|
||||
Exists: true,
|
||||
IsDir: stat.Mode.IsDir(),
|
||||
}
|
||||
|
|
|
@ -61,41 +61,37 @@ func (s *composeService) attach(ctx context.Context, project *types.Project, lis
|
|||
}
|
||||
|
||||
func (s *composeService) attachContainer(ctx context.Context, container containerType.Summary, listener api.ContainerEventListener) error {
|
||||
serviceName := container.Labels[api.ServiceLabel]
|
||||
containerName := getContainerNameWithoutProject(container)
|
||||
service := container.Labels[api.ServiceLabel]
|
||||
name := getContainerNameWithoutProject(container)
|
||||
return s.doAttachContainer(ctx, service, container.ID, name, listener)
|
||||
}
|
||||
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventAttach,
|
||||
Container: containerName,
|
||||
ID: container.ID,
|
||||
Service: serviceName,
|
||||
})
|
||||
|
||||
wOut := utils.GetWriter(func(line string) {
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventLog,
|
||||
Container: containerName,
|
||||
ID: container.ID,
|
||||
Service: serviceName,
|
||||
Line: line,
|
||||
})
|
||||
})
|
||||
wErr := utils.GetWriter(func(line string) {
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventErr,
|
||||
Container: containerName,
|
||||
ID: container.ID,
|
||||
Service: serviceName,
|
||||
Line: line,
|
||||
})
|
||||
})
|
||||
|
||||
inspect, err := s.apiClient().ContainerInspect(ctx, container.ID)
|
||||
func (s *composeService) doAttachContainer(ctx context.Context, service, id, name string, listener api.ContainerEventListener) error {
|
||||
inspect, err := s.apiClient().ContainerInspect(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, err = s.attachContainerStreams(ctx, container.ID, inspect.Config.Tty, nil, wOut, wErr)
|
||||
wOut := utils.GetWriter(func(line string) {
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventLog,
|
||||
Source: name,
|
||||
ID: id,
|
||||
Service: service,
|
||||
Line: line,
|
||||
})
|
||||
})
|
||||
wErr := utils.GetWriter(func(line string) {
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventErr,
|
||||
Source: name,
|
||||
ID: id,
|
||||
Service: service,
|
||||
Line: line,
|
||||
})
|
||||
})
|
||||
|
||||
_, _, err = s.attachContainerStreams(ctx, id, inspect.Config.Tty, nil, wOut, wErr)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -397,6 +398,7 @@ func resolveAndMergeBuildArgs(dockerCli command.Cli, project *types.Project, ser
|
|||
return result
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *composeService) toBuildOptions(project *types.Project, service types.ServiceConfig, options api.BuildOptions) (build.Options, error) {
|
||||
plats, err := parsePlatforms(service)
|
||||
if err != nil {
|
||||
|
@ -471,8 +473,19 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
|||
}
|
||||
|
||||
attests := map[string]*string{}
|
||||
if !options.Provenance {
|
||||
attests["provenance"] = nil
|
||||
if options.Attestations {
|
||||
if service.Build.Provenance != "" {
|
||||
attests["provenance"] = attestation(service.Build.Provenance, "provenance")
|
||||
}
|
||||
if service.Build.SBOM != "" {
|
||||
attests["sbom"] = attestation(service.Build.SBOM, "sbom")
|
||||
}
|
||||
}
|
||||
if options.Provenance != "" {
|
||||
attests["provenance"] = attestation(options.Provenance, "provenance")
|
||||
}
|
||||
if options.SBOM != "" {
|
||||
attests["sbom"] = attestation(options.SBOM, "sbom")
|
||||
}
|
||||
|
||||
return build.Options{
|
||||
|
@ -502,6 +515,16 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
|||
}, nil
|
||||
}
|
||||
|
||||
func attestation(attest string, val string) *string {
|
||||
if b, err := strconv.ParseBool(val); err == nil {
|
||||
s := fmt.Sprintf("type=%s,disabled=%t", attest, b)
|
||||
return &s
|
||||
} else {
|
||||
s := fmt.Sprintf("type=%s,%s", attest, val)
|
||||
return &s
|
||||
}
|
||||
}
|
||||
|
||||
func toUlimitOpt(ulimits map[string]*types.UlimitsConfig) *cliopts.UlimitOpt {
|
||||
ref := map[string]*container.Ulimit{}
|
||||
for _, limit := range toUlimits(ulimits) {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -57,6 +58,9 @@ func buildWithBake(dockerCli command.Cli) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
if !bake {
|
||||
if ok {
|
||||
logrus.Warnf("COMPOSE_BAKE=false is deprecated, support for internal compose builder will be removed in next release")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -126,7 +130,18 @@ type buildStatus struct {
|
|||
func (s *composeService) doBuildBake(ctx context.Context, project *types.Project, serviceToBeBuild types.Services, options api.BuildOptions) (map[string]string, error) { //nolint:gocyclo
|
||||
eg := errgroup.Group{}
|
||||
ch := make(chan *client.SolveStatus)
|
||||
display, err := progressui.NewDisplay(os.Stdout, progressui.DisplayMode(options.Progress))
|
||||
if options.Progress == progress.ModeAuto {
|
||||
options.Progress = os.Getenv("BUILDKIT_PROGRESS")
|
||||
}
|
||||
displayMode := progressui.DisplayMode(options.Progress)
|
||||
out := options.Out
|
||||
if out == nil {
|
||||
if !s.dockerCli.Out().IsTerminal() {
|
||||
displayMode = progressui.PlainMode
|
||||
}
|
||||
out = os.Stdout // should be s.dockerCli.Out(), but NewDisplay require access to the underlying *File
|
||||
}
|
||||
display, err := progressui.NewDisplay(out, displayMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -140,10 +155,11 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
Targets: map[string]bakeTarget{},
|
||||
}
|
||||
var (
|
||||
group bakeGroup
|
||||
privileged bool
|
||||
read []string
|
||||
targets = make(map[string]string, len(serviceToBeBuild)) // service name -> build target
|
||||
group bakeGroup
|
||||
privileged bool
|
||||
read []string
|
||||
expectedImages = make(map[string]string, len(serviceToBeBuild)) // service name -> expected image
|
||||
targets = make(map[string]string, len(serviceToBeBuild)) // service name -> build target
|
||||
)
|
||||
|
||||
// produce a unique ID for service used as bake target
|
||||
|
@ -163,6 +179,7 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
continue
|
||||
}
|
||||
build := *service.Build
|
||||
labels := getImageBuildLabels(project, service)
|
||||
|
||||
args := types.Mapping{}
|
||||
for k, v := range resolveAndMergeBuildArgs(s.dockerCli, project, service, options) {
|
||||
|
@ -190,7 +207,11 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
case len(service.Build.Platforms) > 1:
|
||||
outputs = []string{fmt.Sprintf("type=image,push=%t", push)}
|
||||
default:
|
||||
outputs = []string{fmt.Sprintf("type=docker,load=true,push=%t", push)}
|
||||
if push {
|
||||
outputs = []string{"type=registry"}
|
||||
} else {
|
||||
outputs = []string{"type=docker"}
|
||||
}
|
||||
}
|
||||
|
||||
read = append(read, build.Context)
|
||||
|
@ -201,6 +222,9 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
}
|
||||
}
|
||||
|
||||
image := api.GetImageNameOrDefault(service, project.Name)
|
||||
expectedImages[serviceName] = image
|
||||
|
||||
target := targets[serviceName]
|
||||
cfg.Targets[target] = bakeTarget{
|
||||
Context: build.Context,
|
||||
|
@ -208,8 +232,8 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
Dockerfile: dockerFilePath(build.Context, build.Dockerfile),
|
||||
DockerfileInline: strings.ReplaceAll(build.DockerfileInline, "${", "$${"),
|
||||
Args: args,
|
||||
Labels: build.Labels,
|
||||
Tags: append(build.Tags, api.GetImageNameOrDefault(service, project.Name)),
|
||||
Labels: labels,
|
||||
Tags: append(build.Tags, image),
|
||||
|
||||
CacheFrom: build.CacheFrom,
|
||||
CacheTo: build.CacheTo,
|
||||
|
@ -290,9 +314,12 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
|
||||
logrus.Debugf("Executing bake with args: %v", args)
|
||||
|
||||
if s.dryRun {
|
||||
return dryRunBake(ctx, cfg), nil
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, buildx.Path, args...)
|
||||
|
||||
err = s.prepareShellOut(ctx, project, cmd)
|
||||
err = s.prepareShellOut(ctx, project.Environment, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -358,13 +385,14 @@ func (s *composeService) doBuildBake(ctx context.Context, project *types.Project
|
|||
cw := progress.ContextWriter(ctx)
|
||||
results := map[string]string{}
|
||||
for name := range serviceToBeBuild {
|
||||
image := expectedImages[name]
|
||||
target := targets[name]
|
||||
built, ok := md[target]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("build result not found in Bake metadata for service %s", name)
|
||||
}
|
||||
results[name] = built.Digest
|
||||
cw.Event(progress.BuiltEvent(name))
|
||||
results[image] = built.Digest
|
||||
cw.Event(progress.BuiltEvent(image))
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
@ -443,3 +471,30 @@ func dockerFilePath(ctxName string, dockerfile string) string {
|
|||
}
|
||||
return dockerfile
|
||||
}
|
||||
|
||||
func dryRunBake(ctx context.Context, cfg bakeConfig) map[string]string {
|
||||
w := progress.ContextWriter(ctx)
|
||||
bakeResponse := map[string]string{}
|
||||
for name, target := range cfg.Targets {
|
||||
dryRunUUID := fmt.Sprintf("dryRun-%x", sha1.Sum([]byte(name)))
|
||||
displayDryRunBuildEvent(w, name, dryRunUUID, target.Tags[0])
|
||||
bakeResponse[name] = dryRunUUID
|
||||
}
|
||||
for name := range bakeResponse {
|
||||
w.Event(progress.BuiltEvent(name))
|
||||
}
|
||||
return bakeResponse
|
||||
}
|
||||
|
||||
func displayDryRunBuildEvent(w progress.Writer, name string, dryRunUUID, tag string) {
|
||||
w.Event(progress.Event{
|
||||
ID: name + " ==>",
|
||||
Status: progress.Done,
|
||||
Text: fmt.Sprintf("==> writing image %s", dryRunUUID),
|
||||
})
|
||||
w.Event(progress.Event{
|
||||
ID: name + " ==> ==>",
|
||||
Status: progress.Done,
|
||||
Text: fmt.Sprintf(`naming to %s`, tag),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -70,16 +70,7 @@ func (s composeService) dryRunBuildResponse(ctx context.Context, name string, op
|
|||
w := progress.ContextWriter(ctx)
|
||||
buildResponse := map[string]*client.SolveResponse{}
|
||||
dryRunUUID := fmt.Sprintf("dryRun-%x", sha1.Sum([]byte(name)))
|
||||
w.Event(progress.Event{
|
||||
ID: "==>",
|
||||
Status: progress.Done,
|
||||
Text: fmt.Sprintf("==> writing image %s", dryRunUUID),
|
||||
})
|
||||
w.Event(progress.Event{
|
||||
ID: "==> ==>",
|
||||
Status: progress.Done,
|
||||
Text: fmt.Sprintf(`naming to %s`, options.Tags[0]),
|
||||
})
|
||||
displayDryRunBuildEvent(w, name, dryRunUUID, options.Tags[0])
|
||||
buildResponse[name] = &client.SolveResponse{ExporterResponse: map[string]string{
|
||||
"containerimage.digest": dryRunUUID,
|
||||
}}
|
||||
|
|
|
@ -128,12 +128,6 @@ func isService(services ...string) containerPredicate {
|
|||
}
|
||||
}
|
||||
|
||||
func isRunning() containerPredicate {
|
||||
return func(c container.Summary) bool {
|
||||
return c.State == "running"
|
||||
}
|
||||
}
|
||||
|
||||
// isOrphaned is a predicate to select containers without a matching service definition in compose project
|
||||
func isOrphaned(project *types.Project) containerPredicate {
|
||||
services := append(project.ServiceNames(), project.DisabledServiceNames()...)
|
||||
|
|
|
@ -236,7 +236,7 @@ func (c *convergence) stopDependentContainers(ctx context.Context, project *type
|
|||
err := c.service.stop(ctx, project.Name, api.StopOptions{
|
||||
Services: dependents,
|
||||
Project: project,
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -635,13 +635,18 @@ func (s *composeService) recreateContainer(ctx context.Context, project *types.P
|
|||
if inherit {
|
||||
inherited = &replaced
|
||||
}
|
||||
|
||||
replacedContainerName := service.ContainerName
|
||||
if replacedContainerName == "" {
|
||||
replacedContainerName = service.Name + api.Separator + strconv.Itoa(number)
|
||||
}
|
||||
name := getContainerName(project.Name, service, number)
|
||||
tmpName := fmt.Sprintf("%s_%s", replaced.ID[:12], name)
|
||||
opts := createOptions{
|
||||
AutoRemove: false,
|
||||
AttachStdin: false,
|
||||
UseNetworkAliases: true,
|
||||
Labels: mergeLabels(service.Labels, service.CustomLabels).Add(api.ContainerReplaceLabel, replaced.ID),
|
||||
Labels: mergeLabels(service.Labels, service.CustomLabels).Add(api.ContainerReplaceLabel, replacedContainerName),
|
||||
}
|
||||
created, err = s.createMobyContainer(ctx, project, service, tmpName, number, inherited, opts, w)
|
||||
if err != nil {
|
||||
|
@ -659,7 +664,7 @@ func (s *composeService) recreateContainer(ctx context.Context, project *types.P
|
|||
return created, err
|
||||
}
|
||||
|
||||
err = s.apiClient().ContainerRename(ctx, created.ID, name)
|
||||
err = s.apiClient().ContainerRename(ctx, tmpName, name)
|
||||
if err != nil {
|
||||
return created, err
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/moby/go-archive"
|
||||
)
|
||||
|
||||
|
@ -161,7 +160,7 @@ func (s *composeService) copyToContainer(ctx context.Context, containerID string
|
|||
// If the destination is a symbolic link, we should evaluate it.
|
||||
if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := dstStat.LinkTarget
|
||||
if !system.IsAbs(linkTarget) {
|
||||
if !isAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := archive.SplitPathDirEntry(dstPath)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
|
@ -264,7 +263,7 @@ func (s *composeService) copyFromContainer(ctx context.Context, containerID, src
|
|||
// If the destination is a symbolic link, we should follow it.
|
||||
if err == nil && srcStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := srcStat.LinkTarget
|
||||
if !system.IsAbs(linkTarget) {
|
||||
if !isAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
srcParent, _ := archive.SplitPathDirEntry(srcPath)
|
||||
linkTarget = filepath.Join(srcParent, linkTarget)
|
||||
|
@ -302,8 +301,20 @@ func (s *composeService) copyFromContainer(ctx context.Context, containerID, src
|
|||
return archive.CopyTo(preArchive, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// IsAbs is a platform-agnostic wrapper for filepath.IsAbs.
|
||||
//
|
||||
// On Windows, golang filepath.IsAbs does not consider a path \windows\system32
|
||||
// as absolute as it doesn't start with a drive-letter/colon combination. However,
|
||||
// in docker we need to verify things such as WORKDIR /windows/system32 in
|
||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||
// by the daemon). This SHOULD be treated as absolute from a docker processing
|
||||
// perspective.
|
||||
func isAbs(path string) bool {
|
||||
return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator))
|
||||
}
|
||||
|
||||
func splitCpArg(arg string) (ctr, path string) {
|
||||
if system.IsAbs(arg) {
|
||||
if isAbs(arg) {
|
||||
// Explicit local absolute path, e.g., `C:\foo` or `/foo`.
|
||||
return "", arg
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
"github.com/compose-spec/compose-go/v2/paths"
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
|
@ -1262,7 +1262,7 @@ func (s *composeService) ensureNetwork(ctx context.Context, project *types.Proje
|
|||
}
|
||||
|
||||
id, err := s.resolveOrCreateNetwork(ctx, project, name, n)
|
||||
if cerrdefs.IsConflict(err) {
|
||||
if errdefs.IsConflict(err) {
|
||||
// Maybe another execution of `docker compose up|run` created same network
|
||||
// let's retry once
|
||||
return s.resolveOrCreateNetwork(ctx, project, name, n)
|
||||
|
@ -1428,7 +1428,7 @@ func (s *composeService) removeDivergedNetwork(ctx context.Context, project *typ
|
|||
err := s.stop(ctx, project.Name, api.StopOptions{
|
||||
Services: services,
|
||||
Project: project,
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1497,7 +1497,7 @@ func (s *composeService) resolveExternalNetwork(ctx context.Context, n *types.Ne
|
|||
sn, err := s.apiClient().NetworkInspect(ctx, n.Name, network.InspectOptions{})
|
||||
if err == nil {
|
||||
networks = append(networks, sn)
|
||||
} else if !cerrdefs.IsNotFound(err) {
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -1534,7 +1534,7 @@ func (s *composeService) resolveExternalNetwork(ctx context.Context, n *types.Ne
|
|||
func (s *composeService) ensureVolume(ctx context.Context, name string, volume types.VolumeConfig, project *types.Project, assumeYes bool) (string, error) {
|
||||
inspected, err := s.apiClient().VolumeInspect(ctx, volume.Name)
|
||||
if err != nil {
|
||||
if !cerrdefs.IsNotFound(err) {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
if volume.External {
|
||||
|
@ -1599,7 +1599,7 @@ func (s *composeService) removeDivergedVolume(ctx context.Context, name string,
|
|||
err := s.stop(ctx, project.Name, api.StopOptions{
|
||||
Services: services,
|
||||
Project: project,
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
|
@ -219,7 +219,7 @@ func (s *composeService) removeNetwork(ctx context.Context, composeNetworkName s
|
|||
continue
|
||||
}
|
||||
nw, err := s.apiClient().NetworkInspect(ctx, net.ID, network.InspectOptions{})
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
w.Event(progress.NewEvent(eventName, progress.Warning, "No resource found to remove"))
|
||||
return nil
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func (s *composeService) removeNetwork(ctx context.Context, composeNetworkName s
|
|||
}
|
||||
|
||||
if err := s.apiClient().NetworkRemove(ctx, net.ID); err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
w.Event(progress.ErrorEvent(eventName))
|
||||
|
@ -261,11 +261,11 @@ func (s *composeService) removeImage(ctx context.Context, image string, w progre
|
|||
w.Event(progress.NewEvent(id, progress.Done, "Removed"))
|
||||
return nil
|
||||
}
|
||||
if cerrdefs.IsConflict(err) {
|
||||
if errdefs.IsConflict(err) {
|
||||
w.Event(progress.NewEvent(id, progress.Warning, "Resource is still in use"))
|
||||
return nil
|
||||
}
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
w.Event(progress.NewEvent(id, progress.Done, "Warning: No resource found to remove"))
|
||||
return nil
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ func (s *composeService) removeVolume(ctx context.Context, id string, w progress
|
|||
resource := fmt.Sprintf("Volume %s", id)
|
||||
|
||||
_, err := s.apiClient().VolumeInspect(ctx, id)
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Already gone
|
||||
return nil
|
||||
}
|
||||
|
@ -287,27 +287,31 @@ func (s *composeService) removeVolume(ctx context.Context, id string, w progress
|
|||
w.Event(progress.NewEvent(resource, progress.Done, "Removed"))
|
||||
return nil
|
||||
}
|
||||
if cerrdefs.IsConflict(err) {
|
||||
if errdefs.IsConflict(err) {
|
||||
w.Event(progress.NewEvent(resource, progress.Warning, "Resource is still in use"))
|
||||
return nil
|
||||
}
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
w.Event(progress.NewEvent(resource, progress.Done, "Warning: No resource found to remove"))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *composeService) stopContainer(ctx context.Context, w progress.Writer, service *types.ServiceConfig, ctr containerType.Summary, timeout *time.Duration) error {
|
||||
func (s *composeService) stopContainer(
|
||||
ctx context.Context, w progress.Writer,
|
||||
service *types.ServiceConfig, ctr containerType.Summary,
|
||||
timeout *time.Duration, listener api.ContainerEventListener,
|
||||
) error {
|
||||
eventName := getContainerProgressName(ctr)
|
||||
w.Event(progress.StoppingEvent(eventName))
|
||||
|
||||
if service != nil {
|
||||
for _, hook := range service.PreStop {
|
||||
err := s.runHook(ctx, ctr, *service, hook, nil)
|
||||
err := s.runHook(ctx, ctr, *service, hook, listener)
|
||||
if err != nil {
|
||||
// Ignore errors indicating that some containers were already stopped or removed.
|
||||
if cerrdefs.IsNotFound(err) || cerrdefs.IsConflict(err) {
|
||||
if errdefs.IsNotFound(err) || errdefs.IsConflict(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -325,11 +329,15 @@ func (s *composeService) stopContainer(ctx context.Context, w progress.Writer, s
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *composeService) stopContainers(ctx context.Context, w progress.Writer, serv *types.ServiceConfig, containers []containerType.Summary, timeout *time.Duration) error {
|
||||
func (s *composeService) stopContainers(
|
||||
ctx context.Context, w progress.Writer,
|
||||
serv *types.ServiceConfig, containers []containerType.Summary,
|
||||
timeout *time.Duration, listener api.ContainerEventListener,
|
||||
) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, ctr := range containers {
|
||||
eg.Go(func() error {
|
||||
return s.stopContainer(ctx, w, serv, ctr, timeout)
|
||||
return s.stopContainer(ctx, w, serv, ctr, timeout, listener)
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
|
@ -348,8 +356,8 @@ func (s *composeService) removeContainers(ctx context.Context, containers []cont
|
|||
func (s *composeService) stopAndRemoveContainer(ctx context.Context, ctr containerType.Summary, service *types.ServiceConfig, timeout *time.Duration, volumes bool) error {
|
||||
w := progress.ContextWriter(ctx)
|
||||
eventName := getContainerProgressName(ctr)
|
||||
err := s.stopContainer(ctx, w, service, ctr, timeout)
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
err := s.stopContainer(ctx, w, service, ctr, timeout, nil)
|
||||
if errdefs.IsNotFound(err) {
|
||||
w.Event(progress.RemovedEvent(eventName))
|
||||
return nil
|
||||
}
|
||||
|
@ -361,7 +369,7 @@ func (s *composeService) stopAndRemoveContainer(ctx context.Context, ctr contain
|
|||
Force: true,
|
||||
RemoveVolumes: volumes,
|
||||
})
|
||||
if err != nil && !cerrdefs.IsNotFound(err) && !cerrdefs.IsConflict(err) {
|
||||
if err != nil && !errdefs.IsNotFound(err) && !errdefs.IsConflict(err) {
|
||||
w.Event(progress.ErrorMessageEvent(eventName, "Error while Removing"))
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -24,13 +24,13 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"go.uber.org/mock/gomock"
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
|
@ -326,7 +326,7 @@ func TestDownRemoveImages(t *testing.T) {
|
|||
if exists {
|
||||
resp.RepoTags = []string{img}
|
||||
} else {
|
||||
err = errdefs.NotFound(fmt.Errorf("test specified that image %q should not exist", img))
|
||||
err = errdefs.ErrNotFound.WithMessage(fmt.Sprintf("test specified that image %q should not exist", img))
|
||||
}
|
||||
|
||||
api.EXPECT().ImageInspect(gomock.Any(), img).
|
||||
|
|
|
@ -32,6 +32,8 @@ func (s *composeService) Events(ctx context.Context, projectName string, options
|
|||
projectName = strings.ToLower(projectName)
|
||||
evts, errors := s.apiClient().Events(ctx, events.ListOptions{
|
||||
Filters: filters.NewArgs(projectFilter(projectName)),
|
||||
Since: options.Since,
|
||||
Until: options.Until,
|
||||
})
|
||||
for {
|
||||
select {
|
||||
|
|
|
@ -32,11 +32,11 @@ import (
|
|||
func (s composeService) runHook(ctx context.Context, ctr container.Summary, service types.ServiceConfig, hook types.ServiceHook, listener api.ContainerEventListener) error {
|
||||
wOut := utils.GetWriter(func(line string) {
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.HookEventLog,
|
||||
Container: getContainerNameWithoutProject(ctr) + " ->",
|
||||
ID: ctr.ID,
|
||||
Service: service.Name,
|
||||
Line: line,
|
||||
Type: api.HookEventLog,
|
||||
Source: getContainerNameWithoutProject(ctr) + " ->",
|
||||
ID: ctr.ID,
|
||||
Service: service.Name,
|
||||
Line: line,
|
||||
})
|
||||
})
|
||||
defer wOut.Close() //nolint:errcheck
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
|
@ -204,7 +204,7 @@ func (p *ImagePruner) filterImagesByExistence(ctx context.Context, imageNames []
|
|||
for _, img := range imageNames {
|
||||
eg.Go(func() error {
|
||||
_, err := p.client.ImageInspect(ctx, img)
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// err on the side of caution: only skip if we successfully
|
||||
// queried the API and got back a definitive "not exists"
|
||||
return nil
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
@ -121,7 +121,7 @@ func (s *composeService) getImageSummaries(ctx context.Context, repoTags []strin
|
|||
eg.Go(func() error {
|
||||
inspect, err := s.apiClient().ImageInspect(ctx, repoTag)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to get image '%s': %w", repoTag, err)
|
||||
|
|
|
@ -19,7 +19,6 @@ package compose
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
@ -62,7 +61,7 @@ func (s *composeService) Logs(
|
|||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, ctr := range containers {
|
||||
eg.Go(func() error {
|
||||
err := s.logContainers(ctx, consumer, ctr, options)
|
||||
err := s.logContainer(ctx, consumer, ctr, options)
|
||||
if errdefs.IsNotImplemented(err) {
|
||||
logrus.Warnf("Can't retrieve logs for %q: %s", getCanonicalContainerName(ctr), err.Error())
|
||||
return nil
|
||||
|
@ -72,34 +71,26 @@ func (s *composeService) Logs(
|
|||
}
|
||||
|
||||
if options.Follow {
|
||||
containers = containers.filter(isRunning())
|
||||
printer := newLogPrinter(consumer)
|
||||
eg.Go(func() error {
|
||||
_, err := printer.Run(api.CascadeIgnore, "", nil)
|
||||
return err
|
||||
})
|
||||
|
||||
for _, c := range containers {
|
||||
printer.HandleEvent(api.ContainerEvent{
|
||||
Type: api.ContainerEventAttach,
|
||||
Container: getContainerNameWithoutProject(c),
|
||||
ID: c.ID,
|
||||
Service: c.Labels[api.ServiceLabel],
|
||||
})
|
||||
monitor := newMonitor(s.apiClient(), projectName)
|
||||
if len(options.Services) > 0 {
|
||||
monitor.withServices(options.Services)
|
||||
} else if options.Project != nil {
|
||||
monitor.withServices(options.Project.ServiceNames())
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
err := s.watchContainers(ctx, projectName, options.Services, nil, printer.HandleEvent, containers, func(c container.Summary, t time.Time) error {
|
||||
printer.HandleEvent(api.ContainerEvent{
|
||||
Type: api.ContainerEventAttach,
|
||||
Container: getContainerNameWithoutProject(c),
|
||||
ID: c.ID,
|
||||
Service: c.Labels[api.ServiceLabel],
|
||||
})
|
||||
monitor.withListener(printer.HandleEvent)
|
||||
monitor.withListener(func(event api.ContainerEvent) {
|
||||
if event.Type == api.ContainerEventStarted {
|
||||
eg.Go(func() error {
|
||||
err := s.logContainers(ctx, consumer, c, api.LogOptions{
|
||||
ctr, err := s.apiClient().ContainerInspect(ctx, event.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.doLogContainer(ctx, consumer, event.Source, ctr, api.LogOptions{
|
||||
Follow: options.Follow,
|
||||
Since: t.Format(time.RFC3339Nano),
|
||||
Since: ctr.State.StartedAt,
|
||||
Until: options.Until,
|
||||
Tail: options.Tail,
|
||||
Timestamps: options.Timestamps,
|
||||
|
@ -110,31 +101,27 @@ func (s *composeService) Logs(
|
|||
}
|
||||
return err
|
||||
})
|
||||
return nil
|
||||
}, func(c container.Summary, t time.Time) error {
|
||||
printer.HandleEvent(api.ContainerEvent{
|
||||
Type: api.ContainerEventAttach,
|
||||
Container: "", // actual name will be set by start event
|
||||
ID: c.ID,
|
||||
Service: c.Labels[api.ServiceLabel],
|
||||
})
|
||||
return nil
|
||||
})
|
||||
printer.Stop()
|
||||
return err
|
||||
}
|
||||
})
|
||||
eg.Go(func() error {
|
||||
return monitor.Start(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func (s *composeService) logContainers(ctx context.Context, consumer api.LogConsumer, c container.Summary, options api.LogOptions) error {
|
||||
cnt, err := s.apiClient().ContainerInspect(ctx, c.ID)
|
||||
func (s *composeService) logContainer(ctx context.Context, consumer api.LogConsumer, c container.Summary, options api.LogOptions) error {
|
||||
ctr, err := s.apiClient().ContainerInspect(ctx, c.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := getContainerNameWithoutProject(c)
|
||||
return s.doLogContainer(ctx, consumer, name, ctr, options)
|
||||
}
|
||||
|
||||
r, err := s.apiClient().ContainerLogs(ctx, cnt.ID, container.LogsOptions{
|
||||
func (s *composeService) doLogContainer(ctx context.Context, consumer api.LogConsumer, name string, ctr container.InspectResponse, options api.LogOptions) error {
|
||||
r, err := s.apiClient().ContainerLogs(ctx, ctr.ID, container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: options.Follow,
|
||||
|
@ -148,11 +135,10 @@ func (s *composeService) logContainers(ctx context.Context, consumer api.LogCons
|
|||
}
|
||||
defer r.Close() //nolint:errcheck
|
||||
|
||||
name := getContainerNameWithoutProject(c)
|
||||
w := utils.GetWriter(func(line string) {
|
||||
consumer.Log(name, line)
|
||||
})
|
||||
if cnt.Config.Tty {
|
||||
if ctr.Config.Tty {
|
||||
_, err = io.Copy(w, r)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(w, w, r)
|
||||
|
|
|
@ -189,8 +189,6 @@ func (l *testLogConsumer) Err(containerName, message string) {
|
|||
|
||||
func (l *testLogConsumer) Status(containerName, msg string) {}
|
||||
|
||||
func (l *testLogConsumer) Register(containerName string) {}
|
||||
|
||||
func (l *testLogConsumer) LogsForContainer(containerName string) []string {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
|
|
@ -39,73 +39,67 @@ func (s *composeService) ensureModels(ctx context.Context, project *types.Projec
|
|||
return nil
|
||||
}
|
||||
|
||||
dockerModel, err := manager.GetPlugin("model", s.dockerCli, &cobra.Command{})
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return fmt.Errorf("'models' support requires Docker Model plugin")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, dockerModel.Path, "ls", "--json")
|
||||
err = s.prepareShellOut(ctx, project, cmd)
|
||||
api, err := s.newModelAPI(project)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
availableModels, err := api.ListModels(ctx)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking available models: %w", err)
|
||||
}
|
||||
|
||||
type AvailableModel struct {
|
||||
Id string `json:"id"`
|
||||
Tags []string `json:"tags"`
|
||||
Created int `json:"created"`
|
||||
}
|
||||
|
||||
models := []AvailableModel{}
|
||||
err = json.Unmarshal(output, &models)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshalling available models: %w", err)
|
||||
}
|
||||
var availableModels []string
|
||||
for _, model := range models {
|
||||
availableModels = append(availableModels, model.Tags...)
|
||||
}
|
||||
|
||||
eg, gctx := errgroup.WithContext(ctx)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
return s.setModelVariables(gctx, dockerModel, project)
|
||||
return api.SetModelVariables(ctx, project)
|
||||
})
|
||||
|
||||
w := progress.ContextWriter(ctx)
|
||||
for name, config := range project.Models {
|
||||
if config.Name == "" {
|
||||
config.Name = name
|
||||
}
|
||||
eg.Go(func() error {
|
||||
w := progress.ContextWriter(gctx)
|
||||
if !slices.Contains(availableModels, config.Model) {
|
||||
err = s.pullModel(gctx, dockerModel, project, config, quietPull, w)
|
||||
err = api.PullModel(ctx, config, quietPull, w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.configureModel(gctx, dockerModel, project, config, w)
|
||||
return api.ConfigureModel(ctx, config, w)
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func (s *composeService) pullModel(ctx context.Context, dockerModel *manager.Plugin, project *types.Project, model types.ModelConfig, quietPull bool, w progress.Writer) error {
|
||||
type modelAPI struct {
|
||||
path string
|
||||
env []string
|
||||
prepare func(ctx context.Context, cmd *exec.Cmd) error
|
||||
}
|
||||
|
||||
func (s *composeService) newModelAPI(project *types.Project) (*modelAPI, error) {
|
||||
dockerModel, err := manager.GetPlugin("model", s.dockerCli, &cobra.Command{})
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("'models' support requires Docker Model plugin")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &modelAPI{
|
||||
path: dockerModel.Path,
|
||||
prepare: func(ctx context.Context, cmd *exec.Cmd) error {
|
||||
return s.prepareShellOut(ctx, project.Environment, cmd)
|
||||
},
|
||||
env: project.Environment.Values(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *modelAPI) PullModel(ctx context.Context, model types.ModelConfig, quietPull bool, w progress.Writer) error {
|
||||
w.Event(progress.Event{
|
||||
ID: model.Name,
|
||||
Status: progress.Working,
|
||||
Text: "Pulling",
|
||||
})
|
||||
|
||||
cmd := exec.CommandContext(ctx, dockerModel.Path, "pull", model.Model)
|
||||
err := s.prepareShellOut(ctx, project, cmd)
|
||||
cmd := exec.CommandContext(ctx, m.path, "pull", model.Model)
|
||||
err := m.prepare(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -148,7 +142,7 @@ func (s *composeService) pullModel(ctx context.Context, dockerModel *manager.Plu
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *composeService) configureModel(ctx context.Context, dockerModel *manager.Plugin, project *types.Project, config types.ModelConfig, w progress.Writer) error {
|
||||
func (m *modelAPI) ConfigureModel(ctx context.Context, config types.ModelConfig, w progress.Writer) error {
|
||||
w.Event(progress.Event{
|
||||
ID: config.Name,
|
||||
Status: progress.Working,
|
||||
|
@ -164,17 +158,17 @@ func (s *composeService) configureModel(ctx context.Context, dockerModel *manage
|
|||
args = append(args, "--")
|
||||
args = append(args, config.RuntimeFlags...)
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, dockerModel.Path, args...)
|
||||
err := s.prepareShellOut(ctx, project, cmd)
|
||||
cmd := exec.CommandContext(ctx, m.path, args...)
|
||||
err := m.prepare(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (s *composeService) setModelVariables(ctx context.Context, dockerModel *manager.Plugin, project *types.Project) error {
|
||||
cmd := exec.CommandContext(ctx, dockerModel.Path, "status", "--json")
|
||||
err := s.prepareShellOut(ctx, project, cmd)
|
||||
func (m *modelAPI) SetModelVariables(ctx context.Context, project *types.Project) error {
|
||||
cmd := exec.CommandContext(ctx, m.path, "status", "--json")
|
||||
err := m.prepare(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -201,7 +195,7 @@ func (s *composeService) setModelVariables(ctx context.Context, dockerModel *man
|
|||
if modelConfig != nil && modelConfig.ModelVariable != "" {
|
||||
variable = modelConfig.ModelVariable
|
||||
} else {
|
||||
variable = varPrefix
|
||||
variable = varPrefix + "_MODEL"
|
||||
}
|
||||
service.Environment[variable] = &model.Model
|
||||
|
||||
|
@ -228,3 +222,33 @@ type Model struct {
|
|||
Size string `json:"size"`
|
||||
} `json:"config"`
|
||||
}
|
||||
|
||||
func (m *modelAPI) ListModels(ctx context.Context) ([]string, error) {
|
||||
cmd := exec.CommandContext(ctx, m.path, "ls", "--json")
|
||||
err := m.prepare(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking available models: %w", err)
|
||||
}
|
||||
|
||||
type AvailableModel struct {
|
||||
Id string `json:"id"`
|
||||
Tags []string `json:"tags"`
|
||||
Created int `json:"created"`
|
||||
}
|
||||
|
||||
models := []AvailableModel{}
|
||||
err = json.Unmarshal(output, &models)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling available models: %w", err)
|
||||
}
|
||||
var availableModels []string
|
||||
for _, model := range models {
|
||||
availableModels = append(availableModels, model.Tags...)
|
||||
}
|
||||
return availableModels, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
)
|
||||
|
||||
type monitor struct {
|
||||
api client.APIClient
|
||||
project string
|
||||
// services tells us which service to consider and those we can ignore, maybe ran by a concurrent compose command
|
||||
services map[string]bool
|
||||
listeners []api.ContainerEventListener
|
||||
}
|
||||
|
||||
func newMonitor(api client.APIClient, project string) *monitor {
|
||||
return &monitor{
|
||||
api: api,
|
||||
project: project,
|
||||
services: map[string]bool{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *monitor) withServices(services []string) {
|
||||
for _, name := range services {
|
||||
c.services[name] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs monitor to detect application events and return after termination
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (c *monitor) Start(ctx context.Context) error {
|
||||
// collect initial application container
|
||||
initialState, err := c.api.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: filters.NewArgs(
|
||||
projectFilter(c.project),
|
||||
oneOffFilter(false),
|
||||
hasConfigHashLabel(),
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// containers is the set if container IDs the application is based on
|
||||
containers := utils.Set[string]{}
|
||||
for _, ctr := range initialState {
|
||||
if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
|
||||
containers.Add(ctr.ID)
|
||||
}
|
||||
}
|
||||
restarting := utils.Set[string]{}
|
||||
|
||||
evtCh, errCh := c.api.Events(context.Background(), events.ListOptions{
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("type", "container"),
|
||||
projectFilter(c.project)),
|
||||
})
|
||||
for {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return err
|
||||
case event := <-evtCh:
|
||||
if len(c.services) > 0 && !c.services[event.Actor.Attributes[api.ServiceLabel]] {
|
||||
continue
|
||||
}
|
||||
ctr, err := c.getContainerSummary(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch event.Action {
|
||||
case events.ActionCreate:
|
||||
if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
|
||||
containers.Add(ctr.ID)
|
||||
}
|
||||
evtType := api.ContainerEventCreated
|
||||
if _, ok := ctr.Labels[api.ContainerReplaceLabel]; ok {
|
||||
evtType = api.ContainerEventRecreated
|
||||
}
|
||||
for _, listener := range c.listeners {
|
||||
listener(newContainerEvent(event.TimeNano, ctr, evtType))
|
||||
}
|
||||
logrus.Debugf("container %s created", ctr.Name)
|
||||
case events.ActionStart:
|
||||
restarted := restarting.Has(ctr.ID)
|
||||
if restarted {
|
||||
logrus.Debugf("container %s restarted", ctr.Name)
|
||||
for _, listener := range c.listeners {
|
||||
listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted, func(e *api.ContainerEvent) {
|
||||
e.Restarting = restarted
|
||||
}))
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("container %s started", ctr.Name)
|
||||
for _, listener := range c.listeners {
|
||||
listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted))
|
||||
}
|
||||
}
|
||||
if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
|
||||
containers.Add(ctr.ID)
|
||||
}
|
||||
case events.ActionRestart:
|
||||
for _, listener := range c.listeners {
|
||||
listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventRestarted))
|
||||
}
|
||||
logrus.Debugf("container %s restarted", ctr.Name)
|
||||
case events.ActionStop:
|
||||
// when a container is in restarting phase, and we stop the application (abort-on-container-exit)
|
||||
// we won't get any additional start+die events, just this stop as a proof container is down
|
||||
logrus.Debugf("container %s stopped", ctr.Name)
|
||||
containers.Remove(ctr.ID)
|
||||
case events.ActionDie:
|
||||
logrus.Debugf("container %s exited with code %d", ctr.Name, ctr.ExitCode)
|
||||
inspect, err := c.api.ContainerInspect(ctx, event.Actor.ID)
|
||||
if errdefs.IsNotFound(err) {
|
||||
// Source is already removed
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if inspect.State != nil && inspect.State.Restarting || inspect.State.Running {
|
||||
// State.Restarting is set by engine when container is configured to restart on exit
|
||||
// on ContainerRestart it doesn't (see https://github.com/moby/moby/issues/45538)
|
||||
// container state still is reported as "running"
|
||||
logrus.Debugf("container %s is restarting", ctr.Name)
|
||||
restarting.Add(ctr.ID)
|
||||
for _, listener := range c.listeners {
|
||||
listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited, func(e *api.ContainerEvent) {
|
||||
e.Restarting = true
|
||||
}))
|
||||
}
|
||||
} else {
|
||||
for _, listener := range c.listeners {
|
||||
listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited))
|
||||
}
|
||||
containers.Remove(ctr.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newContainerEvent(timeNano int64, ctr *api.ContainerSummary, eventType int, opts ...func(e *api.ContainerEvent)) api.ContainerEvent {
|
||||
name := ctr.Name
|
||||
defaultName := getDefaultContainerName(ctr.Project, ctr.Labels[api.ServiceLabel], ctr.Labels[api.ContainerNumberLabel])
|
||||
if name == defaultName {
|
||||
// remove project- prefix
|
||||
name = name[len(ctr.Project)+1:]
|
||||
}
|
||||
|
||||
event := api.ContainerEvent{
|
||||
Type: eventType,
|
||||
Container: ctr,
|
||||
Time: timeNano,
|
||||
Source: name,
|
||||
ID: ctr.ID,
|
||||
Service: ctr.Service,
|
||||
ExitCode: ctr.ExitCode,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&event)
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
func (c *monitor) getContainerSummary(event events.Message) (*api.ContainerSummary, error) {
|
||||
ctr := &api.ContainerSummary{
|
||||
ID: event.Actor.ID,
|
||||
Name: event.Actor.Attributes["name"],
|
||||
Project: c.project,
|
||||
Service: event.Actor.Attributes[api.ServiceLabel],
|
||||
Labels: event.Actor.Attributes, // More than just labels, but that'c the closest the API gives us
|
||||
}
|
||||
if ec, ok := event.Actor.Attributes["exitCode"]; ok {
|
||||
exitCode, err := strconv.Atoi(ec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.ExitCode = exitCode
|
||||
}
|
||||
return ctr, nil
|
||||
}
|
||||
|
||||
func (c *monitor) withListener(listener api.ContainerEventListener) {
|
||||
c.listeners = append(c.listeners, listener)
|
||||
}
|
|
@ -197,7 +197,7 @@ func (s *composeService) setupPluginCommand(ctx context.Context, project *types.
|
|||
|
||||
cmd := exec.CommandContext(ctx, path, args...)
|
||||
|
||||
err := s.prepareShellOut(ctx, project, cmd)
|
||||
err := s.prepareShellOut(ctx, project.Environment, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ func (s *composeService) setupPluginCommand(ctx context.Context, project *types.
|
|||
|
||||
func (s *composeService) getPluginMetadata(path, command string, project *types.Project) ProviderMetadata {
|
||||
cmd := exec.Command(path, "compose", "metadata")
|
||||
err := s.prepareShellOut(context.Background(), project, cmd)
|
||||
err := s.prepareShellOut(context.Background(), project.Environment, cmd)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to prepare plugin metadata command: %v", err)
|
||||
return ProviderMetadata{}
|
||||
|
|
|
@ -18,7 +18,6 @@ package compose
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
)
|
||||
|
@ -26,137 +25,29 @@ import (
|
|||
// logPrinter watch application containers and collect their logs
|
||||
type logPrinter interface {
|
||||
HandleEvent(event api.ContainerEvent)
|
||||
Run(cascade api.Cascade, exitCodeFrom string, stopFn func() error) (int, error)
|
||||
Cancel()
|
||||
Stop()
|
||||
}
|
||||
|
||||
type printer struct {
|
||||
queue chan api.ContainerEvent
|
||||
consumer api.LogConsumer
|
||||
stopCh chan struct{} // stopCh is a signal channel for producers to stop sending events to the queue
|
||||
stop sync.Once
|
||||
}
|
||||
|
||||
// newLogPrinter builds a LogPrinter passing containers logs to LogConsumer
|
||||
func newLogPrinter(consumer api.LogConsumer) logPrinter {
|
||||
printer := printer{
|
||||
consumer: consumer,
|
||||
queue: make(chan api.ContainerEvent),
|
||||
stopCh: make(chan struct{}),
|
||||
stop: sync.Once{},
|
||||
}
|
||||
return &printer
|
||||
}
|
||||
|
||||
func (p *printer) Cancel() {
|
||||
// note: HandleEvent is used to ensure this doesn't deadlock
|
||||
p.HandleEvent(api.ContainerEvent{Type: api.UserCancel})
|
||||
}
|
||||
|
||||
func (p *printer) Stop() {
|
||||
p.stop.Do(func() {
|
||||
close(p.stopCh)
|
||||
for {
|
||||
select {
|
||||
case <-p.queue:
|
||||
// purge the queue to free producers goroutines
|
||||
// p.queue will be garbage collected
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (p *printer) HandleEvent(event api.ContainerEvent) {
|
||||
select {
|
||||
case <-p.stopCh:
|
||||
return
|
||||
default:
|
||||
p.queue <- event
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (p *printer) Run(cascade api.Cascade, exitCodeFrom string, stopFn func() error) (int, error) {
|
||||
var (
|
||||
aborting bool
|
||||
exitCode int
|
||||
)
|
||||
defer p.Stop()
|
||||
|
||||
// containers we are tracking. Use true when container is running, false after we receive a stop|die signal
|
||||
containers := map[string]bool{}
|
||||
for {
|
||||
select {
|
||||
case <-p.stopCh:
|
||||
return exitCode, nil
|
||||
case event := <-p.queue:
|
||||
container, id := event.Container, event.ID
|
||||
switch event.Type {
|
||||
case api.UserCancel:
|
||||
aborting = true
|
||||
case api.ContainerEventAttach:
|
||||
if attached, ok := containers[id]; ok && attached {
|
||||
continue
|
||||
}
|
||||
containers[id] = true
|
||||
p.consumer.Register(container)
|
||||
case api.ContainerEventExit, api.ContainerEventStopped, api.ContainerEventRecreated:
|
||||
if !aborting && containers[id] {
|
||||
p.consumer.Status(container, fmt.Sprintf("exited with code %d", event.ExitCode))
|
||||
if event.Type == api.ContainerEventRecreated {
|
||||
p.consumer.Status(container, "has been recreated")
|
||||
}
|
||||
}
|
||||
containers[id] = false
|
||||
if !event.Restarting {
|
||||
delete(containers, id)
|
||||
}
|
||||
|
||||
if cascade == api.CascadeStop {
|
||||
if !aborting {
|
||||
aborting = true
|
||||
err := stopFn()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if event.Type == api.ContainerEventExit {
|
||||
if cascade == api.CascadeFail && event.ExitCode != 0 {
|
||||
exitCodeFrom = event.Service
|
||||
if !aborting {
|
||||
aborting = true
|
||||
err := stopFn()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if cascade == api.CascadeStop && exitCodeFrom == "" {
|
||||
exitCodeFrom = event.Service
|
||||
}
|
||||
}
|
||||
|
||||
if exitCodeFrom == event.Service && (event.Type == api.ContainerEventExit || event.Type == api.ContainerEventStopped) {
|
||||
// Container was interrupted or exited, let's capture exit code
|
||||
exitCode = event.ExitCode
|
||||
}
|
||||
if len(containers) == 0 {
|
||||
// Last container terminated, done
|
||||
return exitCode, nil
|
||||
}
|
||||
case api.ContainerEventLog, api.HookEventLog:
|
||||
if !aborting {
|
||||
p.consumer.Log(container, event.Line)
|
||||
}
|
||||
case api.ContainerEventErr:
|
||||
if !aborting {
|
||||
p.consumer.Err(container, event.Line)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch event.Type {
|
||||
case api.ContainerEventExited:
|
||||
p.consumer.Status(event.Source, fmt.Sprintf("exited with code %d", event.ExitCode))
|
||||
case api.ContainerEventRecreated:
|
||||
p.consumer.Status(event.Container.Labels[api.ContainerReplaceLabel], "has been recreated")
|
||||
case api.ContainerEventLog, api.HookEventLog:
|
||||
p.consumer.Log(event.Source, event.Line)
|
||||
case api.ContainerEventErr:
|
||||
p.consumer.Err(event.Source, event.Line)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,10 +29,8 @@ import (
|
|||
)
|
||||
|
||||
// prepareShellOut prepare a shell-out command to be ran by Compose
|
||||
func (s *composeService) prepareShellOut(gctx context.Context, project *types.Project, cmd *exec.Cmd) error {
|
||||
// exec command with same environment Compose is running
|
||||
env := types.NewMapping(project.Environment.Values())
|
||||
|
||||
func (s *composeService) prepareShellOut(gctx context.Context, env types.Mapping, cmd *exec.Cmd) error {
|
||||
env = env.Clone()
|
||||
// remove DOCKER_CLI_PLUGIN... variable so a docker-cli plugin will detect it run standalone
|
||||
delete(env, manager.ReexecEnvvar)
|
||||
|
||||
|
|
|
@ -20,19 +20,14 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
containerType "github.com/docker/docker/api/types/container"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func (s *composeService) Start(ctx context.Context, projectName string, options api.StartOptions) error {
|
||||
|
@ -56,60 +51,6 @@ func (s *composeService) start(ctx context.Context, projectName string, options
|
|||
}
|
||||
}
|
||||
|
||||
// use an independent context tied to the errgroup for background attach operations
|
||||
// the primary context is still used for other operations
|
||||
// this means that once any attach operation fails, all other attaches are cancelled,
|
||||
// but an attach failing won't interfere with the rest of the start
|
||||
eg, attachCtx := errgroup.WithContext(ctx)
|
||||
if listener != nil {
|
||||
_, err := s.attach(attachCtx, project, listener, options.AttachTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
// it's possible to have a required service whose log output is not desired
|
||||
// (i.e. it's not in the attach set), so watch everything and then filter
|
||||
// calls to attach; this ensures that `watchContainers` blocks until all
|
||||
// required containers have exited, even if their output is not being shown
|
||||
attachTo := utils.NewSet[string](options.AttachTo...)
|
||||
required := utils.NewSet[string](options.Services...)
|
||||
toWatch := attachTo.Union(required).Elements()
|
||||
|
||||
containers, err := s.getContainers(ctx, projectName, oneOffExclude, true, toWatch...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// N.B. this uses the parent context (instead of attachCtx) so that the watch itself can
|
||||
// continue even if one of the log streams fails
|
||||
return s.watchContainers(ctx, project.Name, toWatch, required.Elements(), listener, containers,
|
||||
func(ctr containerType.Summary, _ time.Time) error {
|
||||
svc := ctr.Labels[api.ServiceLabel]
|
||||
if attachTo.Has(svc) {
|
||||
return s.attachContainer(attachCtx, ctr, listener)
|
||||
}
|
||||
|
||||
// HACK: simulate an "attach" event
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventAttach,
|
||||
Container: getContainerNameWithoutProject(ctr),
|
||||
ID: ctr.ID,
|
||||
Service: svc,
|
||||
})
|
||||
return nil
|
||||
}, func(ctr containerType.Summary, _ time.Time) error {
|
||||
listener(api.ContainerEvent{
|
||||
Type: api.ContainerEventAttach,
|
||||
Container: "", // actual name will be set by start event
|
||||
ID: ctr.ID,
|
||||
Service: ctr.Labels[api.ServiceLabel],
|
||||
})
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var containers Containers
|
||||
containers, err := s.apiClient().ContainerList(ctx, containerType.ListOptions{
|
||||
Filters: filters.NewArgs(
|
||||
|
@ -157,7 +98,7 @@ func (s *composeService) start(ctx context.Context, projectName string, options
|
|||
}
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDependencyCondition checks if service is depended on by other services
|
||||
|
@ -173,182 +114,3 @@ func getDependencyCondition(service types.ServiceConfig, project *types.Project)
|
|||
}
|
||||
return ServiceConditionRunningOrHealthy
|
||||
}
|
||||
|
||||
type containerWatchFn func(ctr containerType.Summary, t time.Time) error
|
||||
|
||||
// watchContainers uses engine events to capture container start/die and notify ContainerEventListener
|
||||
func (s *composeService) watchContainers(ctx context.Context, //nolint:gocyclo
|
||||
projectName string, services, required []string,
|
||||
listener api.ContainerEventListener, containers Containers, onStart, onRecreate containerWatchFn,
|
||||
) error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(required) == 0 {
|
||||
required = services
|
||||
}
|
||||
|
||||
unexpected := utils.NewSet[string](required...).Diff(utils.NewSet[string](services...))
|
||||
if len(unexpected) != 0 {
|
||||
return fmt.Errorf(`required service(s) "%s" not present in watched service(s) "%s"`,
|
||||
strings.Join(unexpected.Elements(), ", "),
|
||||
strings.Join(services, ", "))
|
||||
}
|
||||
|
||||
// predicate to tell if a container we receive event for should be considered or ignored
|
||||
ofInterest := func(c containerType.Summary) bool {
|
||||
if len(services) > 0 {
|
||||
// we only watch some services
|
||||
return slices.Contains(services, c.Labels[api.ServiceLabel])
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// predicate to tell if a container we receive event for should be watched until termination
|
||||
isRequired := func(c containerType.Summary) bool {
|
||||
if len(services) > 0 && len(required) > 0 {
|
||||
// we only watch some services
|
||||
return slices.Contains(required, c.Labels[api.ServiceLabel])
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
expected = utils.NewSet[string]()
|
||||
watched = map[string]int{}
|
||||
replaced []string
|
||||
)
|
||||
for _, c := range containers {
|
||||
if isRequired(c) {
|
||||
expected.Add(c.ID)
|
||||
}
|
||||
watched[c.ID] = 0
|
||||
}
|
||||
|
||||
ctx, stop := context.WithCancel(ctx)
|
||||
err := s.Events(ctx, projectName, api.EventsOptions{
|
||||
Services: services,
|
||||
Consumer: func(event api.Event) error {
|
||||
defer func() {
|
||||
// after consuming each event, check to see if we're done
|
||||
if len(expected) == 0 {
|
||||
stop()
|
||||
}
|
||||
}()
|
||||
inspected, err := s.apiClient().ContainerInspect(ctx, event.Container)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
// it's possible to get "destroy" or "kill" events but not
|
||||
// be able to inspect in time before they're gone from the
|
||||
// API, so just remove the watch without erroring
|
||||
delete(watched, event.Container)
|
||||
expected.Remove(event.Container)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
container := containerType.Summary{
|
||||
ID: inspected.ID,
|
||||
Names: []string{inspected.Name},
|
||||
Labels: inspected.Config.Labels,
|
||||
}
|
||||
name := getContainerNameWithoutProject(container)
|
||||
service := container.Labels[api.ServiceLabel]
|
||||
switch event.Status {
|
||||
case "stop":
|
||||
if inspected.State.Running {
|
||||
// on sync+restart action the container stops -> dies -> start -> restart
|
||||
// we do not want to stop the current container, we want to restart it
|
||||
return nil
|
||||
}
|
||||
if _, ok := watched[container.ID]; ok {
|
||||
eType := api.ContainerEventStopped
|
||||
if slices.Contains(replaced, container.ID) {
|
||||
replaced = slices.DeleteFunc(replaced, func(e string) bool { return e == container.ID })
|
||||
eType = api.ContainerEventRecreated
|
||||
}
|
||||
listener(api.ContainerEvent{
|
||||
Type: eType,
|
||||
Container: name,
|
||||
ID: container.ID,
|
||||
Service: service,
|
||||
ExitCode: inspected.State.ExitCode,
|
||||
})
|
||||
}
|
||||
|
||||
delete(watched, container.ID)
|
||||
expected.Remove(container.ID)
|
||||
case "die":
|
||||
restarted := watched[container.ID]
|
||||
watched[container.ID] = restarted + 1
|
||||
// Container terminated.
|
||||
willRestart := inspected.State.Restarting
|
||||
if inspected.State.Running {
|
||||
// on sync+restart action inspected.State.Restarting is false,
|
||||
// however the container is already running before it restarts
|
||||
willRestart = true
|
||||
}
|
||||
|
||||
eType := api.ContainerEventExit
|
||||
if slices.Contains(replaced, container.ID) {
|
||||
replaced = slices.DeleteFunc(replaced, func(e string) bool { return e == container.ID })
|
||||
eType = api.ContainerEventRecreated
|
||||
}
|
||||
|
||||
listener(api.ContainerEvent{
|
||||
Type: eType,
|
||||
Container: name,
|
||||
ID: container.ID,
|
||||
Service: service,
|
||||
ExitCode: inspected.State.ExitCode,
|
||||
Restarting: willRestart,
|
||||
})
|
||||
|
||||
if !willRestart {
|
||||
// we're done with this one
|
||||
delete(watched, container.ID)
|
||||
expected.Remove(container.ID)
|
||||
}
|
||||
case "start":
|
||||
count, ok := watched[container.ID]
|
||||
mustAttach := ok && count > 0 // Container restarted, need to re-attach
|
||||
if !ok {
|
||||
// A new container has just been added to service by scale
|
||||
watched[container.ID] = 0
|
||||
expected.Add(container.ID)
|
||||
mustAttach = true
|
||||
}
|
||||
if mustAttach {
|
||||
// Container restarted, need to re-attach
|
||||
err := onStart(container, event.Timestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "create":
|
||||
if id, ok := container.Labels[api.ContainerReplaceLabel]; ok {
|
||||
replaced = append(replaced, id)
|
||||
err = onRecreate(container, event.Timestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if expected.Has(id) {
|
||||
expected.Add(inspected.ID)
|
||||
expected.Add(container.ID)
|
||||
}
|
||||
watched[container.ID] = 1
|
||||
} else if ofInterest(container) {
|
||||
watched[container.ID] = 1
|
||||
if isRequired(container) {
|
||||
expected.Add(container.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -27,11 +27,11 @@ import (
|
|||
|
||||
func (s *composeService) Stop(ctx context.Context, projectName string, options api.StopOptions) error {
|
||||
return progress.RunWithTitle(ctx, func(ctx context.Context) error {
|
||||
return s.stop(ctx, strings.ToLower(projectName), options)
|
||||
return s.stop(ctx, strings.ToLower(projectName), options, nil)
|
||||
}, s.stdinfo(), "Stopping")
|
||||
}
|
||||
|
||||
func (s *composeService) stop(ctx context.Context, projectName string, options api.StopOptions) error {
|
||||
func (s *composeService) stop(ctx context.Context, projectName string, options api.StopOptions, event api.ContainerEventListener) error {
|
||||
containers, err := s.getContainers(ctx, projectName, oneOffExclude, true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -55,6 +55,6 @@ func (s *composeService) stop(ctx context.Context, projectName string, options a
|
|||
return nil
|
||||
}
|
||||
serv := project.Services[service]
|
||||
return s.stopContainers(ctx, w, &serv, containers.filter(isService(service)).filter(isNotOneOff), options.Timeout)
|
||||
return s.stopContainers(ctx, w, &serv, containers.filter(isService(service)).filter(isNotOneOff), options.Timeout, event)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -21,11 +21,12 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"slices"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/compose/v2/cmd/formatter"
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
|
@ -34,6 +35,7 @@ import (
|
|||
"github.com/eiannone/keyboard"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func (s *composeService) Up(ctx context.Context, project *types.Project, options api.UpOptions) error { //nolint:gocyclo
|
||||
|
@ -105,17 +107,17 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
|
|||
eg.Go(func() error {
|
||||
first := true
|
||||
gracefulTeardown := func() {
|
||||
printer.Cancel()
|
||||
_, _ = fmt.Fprintln(s.stdinfo(), "Gracefully stopping... (press Ctrl+C again to force)")
|
||||
eg.Go(func() error {
|
||||
err := s.Stop(context.WithoutCancel(ctx), project.Name, api.StopOptions{
|
||||
Services: options.Create.Services,
|
||||
Project: project,
|
||||
})
|
||||
isTerminated.Store(true)
|
||||
return err
|
||||
})
|
||||
first = false
|
||||
fmt.Println("Gracefully Stopping... press Ctrl+C again to force")
|
||||
eg.Go(func() error {
|
||||
return progress.RunWithLog(context.WithoutCancel(ctx), func(ctx context.Context) error {
|
||||
return s.stop(ctx, project.Name, api.StopOptions{
|
||||
Services: options.Create.Services,
|
||||
Project: project,
|
||||
}, printer.HandleEvent)
|
||||
}, s.stdinfo(), logConsumer)
|
||||
})
|
||||
isTerminated.Store(true)
|
||||
}
|
||||
|
||||
for {
|
||||
|
@ -142,7 +144,7 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
|
|||
All: true,
|
||||
})
|
||||
// Ignore errors indicating that some of the containers were already stopped or removed.
|
||||
if cerrdefs.IsNotFound(err) || cerrdefs.IsConflict(err) {
|
||||
if errdefs.IsNotFound(err) || errdefs.IsConflict(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -155,21 +157,6 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
|
|||
}
|
||||
})
|
||||
|
||||
var exitCode int
|
||||
eg.Go(func() error {
|
||||
code, err := printer.Run(options.Start.OnExit, options.Start.ExitCodeFrom, func() error {
|
||||
_, _ = fmt.Fprintln(s.stdinfo(), "Aborting on container exit...")
|
||||
return progress.Run(ctx, func(ctx context.Context) error {
|
||||
return s.Stop(ctx, project.Name, api.StopOptions{
|
||||
Services: options.Create.Services,
|
||||
Project: project,
|
||||
})
|
||||
}, s.stdinfo())
|
||||
})
|
||||
exitCode = code
|
||||
return err
|
||||
})
|
||||
|
||||
if options.Start.Watch && watcher != nil {
|
||||
err = watcher.Start(ctx)
|
||||
if err != nil {
|
||||
|
@ -177,17 +164,102 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
|
|||
}
|
||||
}
|
||||
|
||||
monitor := newMonitor(s.apiClient(), project.Name)
|
||||
if len(options.Start.Services) > 0 {
|
||||
monitor.withServices(options.Start.Services)
|
||||
} else {
|
||||
monitor.withServices(project.ServiceNames())
|
||||
}
|
||||
monitor.withListener(printer.HandleEvent)
|
||||
|
||||
var exitCode int
|
||||
if options.Start.OnExit != api.CascadeIgnore {
|
||||
once := true
|
||||
// detect first container to exit to trigger application shutdown
|
||||
monitor.withListener(func(event api.ContainerEvent) {
|
||||
if once && event.Type == api.ContainerEventExited {
|
||||
if options.Start.OnExit == api.CascadeFail && event.ExitCode == 0 {
|
||||
return
|
||||
}
|
||||
once = false
|
||||
exitCode = event.ExitCode
|
||||
_, _ = fmt.Fprintln(s.stdinfo(), progress.ErrorColor("Aborting on container exit..."))
|
||||
eg.Go(func() error {
|
||||
return progress.RunWithLog(context.WithoutCancel(ctx), func(ctx context.Context) error {
|
||||
return s.stop(ctx, project.Name, api.StopOptions{
|
||||
Services: options.Create.Services,
|
||||
Project: project,
|
||||
}, printer.HandleEvent)
|
||||
}, s.stdinfo(), logConsumer)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if options.Start.ExitCodeFrom != "" {
|
||||
once := true
|
||||
// capture exit code from first container to exit with selected service
|
||||
monitor.withListener(func(event api.ContainerEvent) {
|
||||
if once && event.Type == api.ContainerEventExited && event.Service == options.Start.ExitCodeFrom {
|
||||
exitCode = event.ExitCode
|
||||
once = false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// use an independent context tied to the errgroup for background attach operations
|
||||
// the primary context is still used for other operations
|
||||
// this means that once any attach operation fails, all other attaches are cancelled,
|
||||
// but an attach failing won't interfere with the rest of the start
|
||||
_, attachCtx := errgroup.WithContext(ctx)
|
||||
containers, err := s.attach(attachCtx, project, printer.HandleEvent, options.Start.AttachTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attached := make([]string, len(containers))
|
||||
for i, ctr := range containers {
|
||||
attached[i] = ctr.ID
|
||||
}
|
||||
|
||||
monitor.withListener(func(event api.ContainerEvent) {
|
||||
if event.Type != api.ContainerEventStarted {
|
||||
return
|
||||
}
|
||||
if slices.Contains(attached, event.ID) {
|
||||
return
|
||||
}
|
||||
eg.Go(func() error {
|
||||
ctr, err := s.apiClient().ContainerInspect(ctx, event.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.doLogContainer(ctx, options.Start.Attach, event.Source, ctr, api.LogOptions{
|
||||
Follow: true,
|
||||
Since: ctr.State.StartedAt,
|
||||
})
|
||||
if errdefs.IsNotImplemented(err) {
|
||||
// container may be configured with logging_driver: none
|
||||
// as container already started, we might miss the very first logs. But still better than none
|
||||
return s.doAttachContainer(ctx, event.Service, event.ID, event.Source, printer.HandleEvent)
|
||||
}
|
||||
return err
|
||||
})
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
err := monitor.Start(context.Background())
|
||||
// Signal for the signal-handler goroutines to stop
|
||||
close(doneCh)
|
||||
return err
|
||||
})
|
||||
|
||||
// We use the parent context without cancellation as we manage sigterm to stop the stack
|
||||
err = s.start(context.WithoutCancel(ctx), project.Name, options.Start, printer.HandleEvent)
|
||||
if err != nil && !isTerminated.Load() { // Ignore error if the process is terminated
|
||||
return err
|
||||
}
|
||||
|
||||
// Signal for the signal-handler goroutines to stop
|
||||
close(doneCh)
|
||||
|
||||
printer.Stop()
|
||||
|
||||
err = eg.Wait().ErrorOrNil()
|
||||
if exitCode != 0 {
|
||||
errMsg := ""
|
||||
|
|
|
@ -29,14 +29,17 @@ import (
|
|||
gsync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
"github.com/compose-spec/compose-go/v2/utils"
|
||||
ccli "github.com/docker/cli/cli/command/container"
|
||||
pathutil "github.com/docker/compose/v2/internal/paths"
|
||||
"github.com/docker/compose/v2/internal/sync"
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
cutils "github.com/docker/compose/v2/pkg/utils"
|
||||
"github.com/docker/compose/v2/pkg/watch"
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
"github.com/compose-spec/compose-go/v2/utils"
|
||||
ccli "github.com/docker/cli/cli/command/container"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
|
@ -61,7 +64,6 @@ func NewWatcher(project *types.Project, options api.UpOptions, w WatchFunc, cons
|
|||
|
||||
if service.Develop != nil && service.Develop.Watch != nil {
|
||||
build := options.Create.Build
|
||||
build.Quiet = true
|
||||
return &Watcher{
|
||||
project: project,
|
||||
options: api.WatchOptions{
|
||||
|
@ -192,7 +194,6 @@ func (s *composeService) watch(ctx context.Context, project *types.Project, opti
|
|||
return nil, err
|
||||
}
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
options.LogTo.Register(api.WatchLogger)
|
||||
|
||||
var (
|
||||
rules []watchRule
|
||||
|
@ -599,6 +600,10 @@ func (s *composeService) rebuild(ctx context.Context, project *types.Project, se
|
|||
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Rebuilding service(s) %q after changes were detected...", services))
|
||||
// restrict the build to ONLY this service, not any of its dependencies
|
||||
options.Build.Services = services
|
||||
options.Build.Progress = progress.ModePlain
|
||||
options.Build.Out = cutils.GetWriter(func(line string) {
|
||||
options.LogTo.Log(api.WatchLogger, line)
|
||||
})
|
||||
|
||||
var (
|
||||
imageNameToIdMap map[string]string
|
||||
|
|
|
@ -71,9 +71,6 @@ func (s stdLogger) Status(containerName, msg string) {
|
|||
fmt.Printf("%s: %s\n", containerName, msg)
|
||||
}
|
||||
|
||||
func (s stdLogger) Register(containerName string) {
|
||||
}
|
||||
|
||||
func TestWatch_Sync(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
cli := mocks.NewMockCli(mockCtrl)
|
||||
|
|
|
@ -3,6 +3,6 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: bridge
|
||||
name: {{ .Values.namespace }}
|
||||
labels:
|
||||
com.docker.compose.project: bridge
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: bridge
|
||||
namespace: bridge
|
||||
name: {{ .Values.projectName }}
|
||||
namespace: {{ .Values.namespace }}
|
||||
labels:
|
||||
com.docker.compose.project: bridge
|
||||
data:
|
||||
|
|
|
@ -10,13 +10,13 @@ metadata:
|
|||
com.docker.compose.service: serviceA
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: {{ .Values.deployment.defaultReplicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
com.docker.compose.project: bridge
|
||||
com.docker.compose.service: serviceA
|
||||
strategy:
|
||||
type: Recreate
|
||||
type: {{ .Values.deployment.strategy }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -28,6 +28,10 @@ spec:
|
|||
- name: servicea
|
||||
image: {{ .Values.serviceA.image }}
|
||||
imagePullPolicy: {{ .Values.serviceA.imagePullPolicy }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ .Values.resources.defaultCpuLimit }}
|
||||
memory: {{ .Values.resources.defaultMemoryLimit }}
|
||||
ports:
|
||||
- name: servicea-8080
|
||||
containerPort: 8080
|
||||
|
@ -39,7 +43,7 @@ spec:
|
|||
volumes:
|
||||
- name: etc-my-config1-txt
|
||||
configMap:
|
||||
name: bridge
|
||||
name: {{ .Values.projectName }}
|
||||
items:
|
||||
- key: my-config
|
||||
path: my-config
|
||||
|
|
|
@ -12,7 +12,7 @@ metadata:
|
|||
com.docker.compose.service: serviceA
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
type: {{ .Values.service.type }}
|
||||
selector:
|
||||
com.docker.compose.project: bridge
|
||||
com.docker.compose.service: serviceA
|
||||
|
|
|
@ -10,13 +10,13 @@ metadata:
|
|||
com.docker.compose.service: serviceB
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: {{ .Values.deployment.defaultReplicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
com.docker.compose.project: bridge
|
||||
com.docker.compose.service: serviceB
|
||||
strategy:
|
||||
type: Recreate
|
||||
type: {{ .Values.deployment.strategy }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -29,6 +29,10 @@ spec:
|
|||
- name: serviceb
|
||||
image: {{ .Values.serviceB.image }}
|
||||
imagePullPolicy: {{ .Values.serviceB.imagePullPolicy }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ .Values.resources.defaultCpuLimit }}
|
||||
memory: {{ .Values.resources.defaultMemoryLimit }}
|
||||
ports:
|
||||
- name: serviceb-8082
|
||||
containerPort: 8082
|
||||
|
|
|
@ -10,7 +10,7 @@ metadata:
|
|||
com.docker.compose.service: serviceB
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
type: {{ .Values.service.type }}
|
||||
selector:
|
||||
com.docker.compose.project: bridge
|
||||
com.docker.compose.service: serviceB
|
||||
|
|
|
@ -1,8 +1,25 @@
|
|||
#! values.yaml
|
||||
# Project Name
|
||||
projectName: bridge
|
||||
# Namespace
|
||||
namespace: bridge
|
||||
# Default deployment settings
|
||||
deployment:
|
||||
strategy: Recreate
|
||||
defaultReplicas: 1
|
||||
# Default resource limits
|
||||
resources:
|
||||
defaultCpuLimit: "100m"
|
||||
defaultMemoryLimit: "512Mi"
|
||||
# Service settings
|
||||
service:
|
||||
type: LoadBalancer
|
||||
# Storage settings
|
||||
storage:
|
||||
defaultStorageClass: "hostpath"
|
||||
defaultSize: "100Mi"
|
||||
defaultAccessMode: "ReadWriteOnce"
|
||||
# Services variables
|
||||
|
||||
serviceA:
|
||||
image: alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
services:
|
||||
service1:
|
||||
image: alpine
|
||||
command: /bin/true
|
||||
service2:
|
||||
image: alpine
|
||||
command: ping -c 2 localhost
|
||||
pre_stop:
|
||||
- command: echo "stop hook running..."
|
|
@ -206,3 +206,20 @@ func TestUpImageID(t *testing.T) {
|
|||
c = NewCLI(t, WithEnv(fmt.Sprintf("ID=%s", id)))
|
||||
c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-composefile/id.yaml", "--project-name", projectName, "up")
|
||||
}
|
||||
|
||||
func TestUpStopWithLogsMixed(t *testing.T) {
|
||||
c := NewCLI(t)
|
||||
const projectName = "compose-e2e-stop-logs"
|
||||
|
||||
t.Cleanup(func() {
|
||||
c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "-v")
|
||||
})
|
||||
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/stop/compose.yaml", "--project-name", projectName, "up", "--abort-on-container-exit")
|
||||
// assert we still get service2 logs after service 1 Stopped event
|
||||
res.Assert(t, icmd.Expected{
|
||||
Err: "Container compose-e2e-stop-logs-service1-1 Stopped",
|
||||
})
|
||||
// assert we get stop hook logs
|
||||
res.Assert(t, icmd.Expected{Out: "service2-1 -> | stop hook running...\nservice2-1 | 64 bytes"})
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
//
|
||||
// mockgen -destination pkg/mocks/mock_docker_api.go -package mocks github.com/docker/docker/client APIClient
|
||||
//
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
|
@ -16,6 +17,7 @@ import (
|
|||
reflect "reflect"
|
||||
|
||||
types "github.com/docker/docker/api/types"
|
||||
build "github.com/docker/docker/api/types/build"
|
||||
checkpoint "github.com/docker/docker/api/types/checkpoint"
|
||||
common "github.com/docker/docker/api/types/common"
|
||||
container "github.com/docker/docker/api/types/container"
|
||||
|
@ -56,10 +58,10 @@ func (m *MockAPIClient) EXPECT() *MockAPIClientMockRecorder {
|
|||
}
|
||||
|
||||
// BuildCachePrune mocks base method.
|
||||
func (m *MockAPIClient) BuildCachePrune(arg0 context.Context, arg1 types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||
func (m *MockAPIClient) BuildCachePrune(arg0 context.Context, arg1 build.CachePruneOptions) (*build.CachePruneReport, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BuildCachePrune", arg0, arg1)
|
||||
ret0, _ := ret[0].(*types.BuildCachePruneReport)
|
||||
ret0, _ := ret[0].(*build.CachePruneReport)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
@ -156,10 +158,10 @@ func (mr *MockAPIClientMockRecorder) Close() *gomock.Call {
|
|||
}
|
||||
|
||||
// ConfigCreate mocks base method.
|
||||
func (m *MockAPIClient) ConfigCreate(arg0 context.Context, arg1 swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
|
||||
func (m *MockAPIClient) ConfigCreate(arg0 context.Context, arg1 swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ConfigCreate", arg0, arg1)
|
||||
ret0, _ := ret[0].(types.ConfigCreateResponse)
|
||||
ret0, _ := ret[0].(swarm.ConfigCreateResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
@ -187,7 +189,7 @@ func (mr *MockAPIClientMockRecorder) ConfigInspectWithRaw(arg0, arg1 any) *gomoc
|
|||
}
|
||||
|
||||
// ConfigList mocks base method.
|
||||
func (m *MockAPIClient) ConfigList(arg0 context.Context, arg1 types.ConfigListOptions) ([]swarm.Config, error) {
|
||||
func (m *MockAPIClient) ConfigList(arg0 context.Context, arg1 swarm.ConfigListOptions) ([]swarm.Config, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ConfigList", arg0, arg1)
|
||||
ret0, _ := ret[0].([]swarm.Config)
|
||||
|
@ -802,10 +804,10 @@ func (mr *MockAPIClientMockRecorder) HTTPClient() *gomock.Call {
|
|||
}
|
||||
|
||||
// ImageBuild mocks base method.
|
||||
func (m *MockAPIClient) ImageBuild(arg0 context.Context, arg1 io.Reader, arg2 types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
func (m *MockAPIClient) ImageBuild(arg0 context.Context, arg1 io.Reader, arg2 build.ImageBuildOptions) (build.ImageBuildResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ImageBuild", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(types.ImageBuildResponse)
|
||||
ret0, _ := ret[0].(build.ImageBuildResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
@ -1220,7 +1222,7 @@ func (mr *MockAPIClientMockRecorder) NodeInspectWithRaw(arg0, arg1 any) *gomock.
|
|||
}
|
||||
|
||||
// NodeList mocks base method.
|
||||
func (m *MockAPIClient) NodeList(arg0 context.Context, arg1 types.NodeListOptions) ([]swarm.Node, error) {
|
||||
func (m *MockAPIClient) NodeList(arg0 context.Context, arg1 swarm.NodeListOptions) ([]swarm.Node, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NodeList", arg0, arg1)
|
||||
ret0, _ := ret[0].([]swarm.Node)
|
||||
|
@ -1235,7 +1237,7 @@ func (mr *MockAPIClientMockRecorder) NodeList(arg0, arg1 any) *gomock.Call {
|
|||
}
|
||||
|
||||
// NodeRemove mocks base method.
|
||||
func (m *MockAPIClient) NodeRemove(arg0 context.Context, arg1 string, arg2 types.NodeRemoveOptions) error {
|
||||
func (m *MockAPIClient) NodeRemove(arg0 context.Context, arg1 string, arg2 swarm.NodeRemoveOptions) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NodeRemove", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
|
@ -1439,10 +1441,10 @@ func (mr *MockAPIClientMockRecorder) RegistryLogin(arg0, arg1 any) *gomock.Call
|
|||
}
|
||||
|
||||
// SecretCreate mocks base method.
|
||||
func (m *MockAPIClient) SecretCreate(arg0 context.Context, arg1 swarm.SecretSpec) (types.SecretCreateResponse, error) {
|
||||
func (m *MockAPIClient) SecretCreate(arg0 context.Context, arg1 swarm.SecretSpec) (swarm.SecretCreateResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SecretCreate", arg0, arg1)
|
||||
ret0, _ := ret[0].(types.SecretCreateResponse)
|
||||
ret0, _ := ret[0].(swarm.SecretCreateResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
@ -1470,7 +1472,7 @@ func (mr *MockAPIClientMockRecorder) SecretInspectWithRaw(arg0, arg1 any) *gomoc
|
|||
}
|
||||
|
||||
// SecretList mocks base method.
|
||||
func (m *MockAPIClient) SecretList(arg0 context.Context, arg1 types.SecretListOptions) ([]swarm.Secret, error) {
|
||||
func (m *MockAPIClient) SecretList(arg0 context.Context, arg1 swarm.SecretListOptions) ([]swarm.Secret, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SecretList", arg0, arg1)
|
||||
ret0, _ := ret[0].([]swarm.Secret)
|
||||
|
@ -1528,7 +1530,7 @@ func (mr *MockAPIClientMockRecorder) ServerVersion(arg0 any) *gomock.Call {
|
|||
}
|
||||
|
||||
// ServiceCreate mocks base method.
|
||||
func (m *MockAPIClient) ServiceCreate(arg0 context.Context, arg1 swarm.ServiceSpec, arg2 types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
|
||||
func (m *MockAPIClient) ServiceCreate(arg0 context.Context, arg1 swarm.ServiceSpec, arg2 swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ServiceCreate", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(swarm.ServiceCreateResponse)
|
||||
|
@ -1543,7 +1545,7 @@ func (mr *MockAPIClientMockRecorder) ServiceCreate(arg0, arg1, arg2 any) *gomock
|
|||
}
|
||||
|
||||
// ServiceInspectWithRaw mocks base method.
|
||||
func (m *MockAPIClient) ServiceInspectWithRaw(arg0 context.Context, arg1 string, arg2 types.ServiceInspectOptions) (swarm.Service, []byte, error) {
|
||||
func (m *MockAPIClient) ServiceInspectWithRaw(arg0 context.Context, arg1 string, arg2 swarm.ServiceInspectOptions) (swarm.Service, []byte, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ServiceInspectWithRaw", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(swarm.Service)
|
||||
|
@ -1559,7 +1561,7 @@ func (mr *MockAPIClientMockRecorder) ServiceInspectWithRaw(arg0, arg1, arg2 any)
|
|||
}
|
||||
|
||||
// ServiceList mocks base method.
|
||||
func (m *MockAPIClient) ServiceList(arg0 context.Context, arg1 types.ServiceListOptions) ([]swarm.Service, error) {
|
||||
func (m *MockAPIClient) ServiceList(arg0 context.Context, arg1 swarm.ServiceListOptions) ([]swarm.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ServiceList", arg0, arg1)
|
||||
ret0, _ := ret[0].([]swarm.Service)
|
||||
|
@ -1603,7 +1605,7 @@ func (mr *MockAPIClientMockRecorder) ServiceRemove(arg0, arg1 any) *gomock.Call
|
|||
}
|
||||
|
||||
// ServiceUpdate mocks base method.
|
||||
func (m *MockAPIClient) ServiceUpdate(arg0 context.Context, arg1 string, arg2 swarm.Version, arg3 swarm.ServiceSpec, arg4 types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
|
||||
func (m *MockAPIClient) ServiceUpdate(arg0 context.Context, arg1 string, arg2 swarm.Version, arg3 swarm.ServiceSpec, arg4 swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ServiceUpdate", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(swarm.ServiceUpdateResponse)
|
||||
|
@ -1618,10 +1620,10 @@ func (mr *MockAPIClientMockRecorder) ServiceUpdate(arg0, arg1, arg2, arg3, arg4
|
|||
}
|
||||
|
||||
// SwarmGetUnlockKey mocks base method.
|
||||
func (m *MockAPIClient) SwarmGetUnlockKey(arg0 context.Context) (types.SwarmUnlockKeyResponse, error) {
|
||||
func (m *MockAPIClient) SwarmGetUnlockKey(arg0 context.Context) (swarm.UnlockKeyResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SwarmGetUnlockKey", arg0)
|
||||
ret0, _ := ret[0].(types.SwarmUnlockKeyResponse)
|
||||
ret0, _ := ret[0].(swarm.UnlockKeyResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
@ -1735,7 +1737,7 @@ func (mr *MockAPIClientMockRecorder) TaskInspectWithRaw(arg0, arg1 any) *gomock.
|
|||
}
|
||||
|
||||
// TaskList mocks base method.
|
||||
func (m *MockAPIClient) TaskList(arg0 context.Context, arg1 types.TaskListOptions) ([]swarm.Task, error) {
|
||||
func (m *MockAPIClient) TaskList(arg0 context.Context, arg1 swarm.TaskListOptions) ([]swarm.Task, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "TaskList", arg0, arg1)
|
||||
ret0, _ := ret[0].([]swarm.Task)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
//
|
||||
// mockgen -destination pkg/mocks/mock_docker_cli.go -package mocks github.com/docker/cli/cli/command Cli
|
||||
//
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
//
|
||||
// mockgen -destination pkg/mocks/mock_docker_compose_api.go -package mocks -source=./pkg/api/api.go Service
|
||||
//
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ type Event struct {
|
|||
Total int64
|
||||
startTime time.Time
|
||||
endTime time.Time
|
||||
spinner *spinner
|
||||
spinner *Spinner
|
||||
}
|
||||
|
||||
// ErrorMessageEvent creates a new Error Event with message
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright 2024 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package progress
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestJsonWriter_Event(t *testing.T) {
|
||||
var out bytes.Buffer
|
||||
w := &jsonWriter{
|
||||
out: &out,
|
||||
done: make(chan bool),
|
||||
dryRun: true,
|
||||
}
|
||||
|
||||
event := Event{
|
||||
ID: "service1",
|
||||
ParentID: "project",
|
||||
Text: "Creating",
|
||||
StatusText: "Working",
|
||||
Current: 50,
|
||||
Total: 100,
|
||||
Percent: 50,
|
||||
}
|
||||
w.Event(event)
|
||||
|
||||
var actual jsonMessage
|
||||
err := json.Unmarshal(out.Bytes(), &actual)
|
||||
assert.NilError(t, err)
|
||||
|
||||
expected := jsonMessage{
|
||||
DryRun: true,
|
||||
ID: event.ID,
|
||||
ParentID: event.ParentID,
|
||||
Text: event.Text,
|
||||
Status: event.StatusText,
|
||||
Current: event.Current,
|
||||
Total: event.Total,
|
||||
Percent: event.Percent,
|
||||
}
|
||||
assert.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestJsonWriter_TailMsgf(t *testing.T) {
|
||||
var out bytes.Buffer
|
||||
w := &jsonWriter{
|
||||
out: &out,
|
||||
done: make(chan bool),
|
||||
dryRun: false,
|
||||
}
|
||||
|
||||
go func() {
|
||||
_ = w.Start(context.Background())
|
||||
}()
|
||||
|
||||
w.TailMsgf("hello %s", "world")
|
||||
|
||||
w.Stop()
|
||||
|
||||
var actual jsonMessage
|
||||
err := json.Unmarshal(out.Bytes(), &actual)
|
||||
assert.NilError(t, err)
|
||||
|
||||
expected := jsonMessage{
|
||||
Tail: true,
|
||||
Text: "hello world",
|
||||
}
|
||||
assert.DeepEqual(t, expected, actual)
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package progress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
)
|
||||
|
||||
// NewMixedWriter creates a Writer which allows to mix output from progress.Writer with a api.LogConsumer
|
||||
func NewMixedWriter(out *streams.Out, consumer api.LogConsumer, dryRun bool) Writer {
|
||||
isTerminal := out.IsTerminal()
|
||||
if Mode != ModeAuto || !isTerminal {
|
||||
return &plainWriter{
|
||||
out: out,
|
||||
done: make(chan bool),
|
||||
dryRun: dryRun,
|
||||
}
|
||||
}
|
||||
return &mixedWriter{
|
||||
out: consumer,
|
||||
done: make(chan bool),
|
||||
dryRun: dryRun,
|
||||
}
|
||||
}
|
||||
|
||||
type mixedWriter struct {
|
||||
done chan bool
|
||||
dryRun bool
|
||||
out api.LogConsumer
|
||||
}
|
||||
|
||||
func (p *mixedWriter) Start(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-p.done:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mixedWriter) Event(e Event) {
|
||||
p.out.Status("", fmt.Sprintf("%s %s %s", e.ID, e.Text, SuccessColor(e.StatusText)))
|
||||
}
|
||||
|
||||
func (p *mixedWriter) Events(events []Event) {
|
||||
for _, e := range events {
|
||||
p.Event(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mixedWriter) TailMsgf(msg string, args ...interface{}) {
|
||||
msg = fmt.Sprintf(msg, args...)
|
||||
p.out.Status("", WarningColor(msg))
|
||||
}
|
||||
|
||||
func (p *mixedWriter) Stop() {
|
||||
p.done <- true
|
||||
}
|
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
type spinner struct {
|
||||
type Spinner struct {
|
||||
time time.Time
|
||||
index int
|
||||
chars []string
|
||||
|
@ -29,7 +29,7 @@ type spinner struct {
|
|||
done string
|
||||
}
|
||||
|
||||
func newSpinner() *spinner {
|
||||
func NewSpinner() *Spinner {
|
||||
chars := []string{
|
||||
"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏",
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func newSpinner() *spinner {
|
|||
done = "-"
|
||||
}
|
||||
|
||||
return &spinner{
|
||||
return &Spinner{
|
||||
index: 0,
|
||||
time: time.Now(),
|
||||
chars: chars,
|
||||
|
@ -48,7 +48,7 @@ func newSpinner() *spinner {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *spinner) String() string {
|
||||
func (s *Spinner) String() string {
|
||||
if s.stop {
|
||||
return s.done
|
||||
}
|
||||
|
@ -61,10 +61,10 @@ func (s *spinner) String() string {
|
|||
return s.chars[s.index]
|
||||
}
|
||||
|
||||
func (s *spinner) Stop() {
|
||||
func (s *Spinner) Stop() {
|
||||
s.stop = true
|
||||
}
|
||||
|
||||
func (s *spinner) Restart() {
|
||||
func (s *Spinner) Restart() {
|
||||
s.stop = false
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ func (w *ttyWriter) event(e Event) {
|
|||
w.events[e.ID] = last
|
||||
} else {
|
||||
e.startTime = time.Now()
|
||||
e.spinner = newSpinner()
|
||||
e.spinner = NewSpinner()
|
||||
if e.Status == Done || e.Status == Error {
|
||||
e.stop()
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestLineText(t *testing.T) {
|
|||
StatusText: "Status",
|
||||
endTime: now,
|
||||
startTime: now,
|
||||
spinner: &spinner{
|
||||
spinner: &Spinner{
|
||||
chars: []string{"."},
|
||||
},
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func TestLineTextSingleEvent(t *testing.T) {
|
|||
Status: Done,
|
||||
StatusText: "Status",
|
||||
startTime: now,
|
||||
spinner: &spinner{
|
||||
spinner: &Spinner{
|
||||
chars: []string{"."},
|
||||
},
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func TestErrorEvent(t *testing.T) {
|
|||
Status: Working,
|
||||
StatusText: "Working",
|
||||
startTime: time.Now(),
|
||||
spinner: &spinner{
|
||||
spinner: &Spinner{
|
||||
chars: []string{"."},
|
||||
},
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ func TestWarningEvent(t *testing.T) {
|
|||
Status: Working,
|
||||
StatusText: "Working",
|
||||
startTime: time.Now(),
|
||||
spinner: &spinner{
|
||||
spinner: &Spinner{
|
||||
chars: []string{"."},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -65,6 +65,25 @@ func Run(ctx context.Context, pf progressFunc, out *streams.Out) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func RunWithLog(ctx context.Context, pf progressFunc, out *streams.Out, logConsumer api.LogConsumer) error {
|
||||
dryRun, ok := ctx.Value(api.DryRunKey{}).(bool)
|
||||
if !ok {
|
||||
dryRun = false
|
||||
}
|
||||
w := NewMixedWriter(out, logConsumer, dryRun)
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
return w.Start(context.Background())
|
||||
})
|
||||
eg.Go(func() error {
|
||||
defer w.Stop()
|
||||
ctx = WithContextWriter(ctx, w)
|
||||
err := pf(ctx)
|
||||
return err
|
||||
})
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func RunWithTitle(ctx context.Context, pf progressFunc, out *streams.Out, progressTitle string) error {
|
||||
_, err := RunWithStatus(ctx, func(ctx context.Context) (string, error) {
|
||||
return "", pf(ctx)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Container: github.com/docker/compose-cli/pkg/prompt (interfaces: UI)
|
||||
// Source: github.com/docker/compose-cli/pkg/prompt (interfaces: UI)
|
||||
|
||||
// Package prompt is a generated GoMock package.
|
||||
package prompt
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//go:build linux || openbsd
|
||||
//go:build linux || openbsd || freebsd
|
||||
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
|
|
@ -133,12 +133,12 @@ func (g gitRemoteLoader) resolveGitRef(ctx context.Context, path string, ref *gi
|
|||
if !commitSHA.MatchString(ref.Commit) {
|
||||
cmd := exec.CommandContext(ctx, "git", "ls-remote", "--exit-code", ref.Remote, ref.Commit)
|
||||
cmd.Env = g.gitCommandEnv()
|
||||
out, err := cmd.Output()
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if cmd.ProcessState.ExitCode() == 2 {
|
||||
return fmt.Errorf("repository does not contain ref %s, output: %q: %w", path, string(out), err)
|
||||
}
|
||||
return err
|
||||
return fmt.Errorf("failed to access repository at %s:\n %s", ref.Remote, out)
|
||||
}
|
||||
if len(out) < 40 {
|
||||
return fmt.Errorf("unexpected git command output: %q", string(out))
|
||||
|
|
Loading…
Reference in New Issue