mirror of https://github.com/docker/compose.git
introduce --watch
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
This commit is contained in:
parent
de178267df
commit
8ab8df86e0
|
@ -24,11 +24,10 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
xprogress "github.com/moby/buildkit/util/progress/progressui"
|
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/v2/types"
|
"github.com/compose-spec/compose-go/v2/types"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/compose/v2/cmd/formatter"
|
"github.com/docker/compose/v2/cmd/formatter"
|
||||||
|
xprogress "github.com/moby/buildkit/util/progress/progressui"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/docker/compose/v2/pkg/api"
|
"github.com/docker/compose/v2/pkg/api"
|
||||||
|
@ -55,6 +54,7 @@ type upOptions struct {
|
||||||
timestamp bool
|
timestamp bool
|
||||||
wait bool
|
wait bool
|
||||||
waitTimeout int
|
waitTimeout int
|
||||||
|
watch bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts upOptions) apply(project *types.Project, services []string) (*types.Project, error) {
|
func (opts upOptions) apply(project *types.Project, services []string) (*types.Project, error) {
|
||||||
|
@ -126,6 +126,7 @@ func upCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *c
|
||||||
flags.BoolVar(&up.attachDependencies, "attach-dependencies", false, "Automatically attach to log output of dependent services")
|
flags.BoolVar(&up.attachDependencies, "attach-dependencies", false, "Automatically attach to log output of dependent services")
|
||||||
flags.BoolVar(&up.wait, "wait", false, "Wait for services to be running|healthy. Implies detached mode.")
|
flags.BoolVar(&up.wait, "wait", false, "Wait for services to be running|healthy. Implies detached mode.")
|
||||||
flags.IntVar(&up.waitTimeout, "wait-timeout", 0, "Maximum duration to wait for the project to be running|healthy")
|
flags.IntVar(&up.waitTimeout, "wait-timeout", 0, "Maximum duration to wait for the project to be running|healthy")
|
||||||
|
flags.BoolVarP(&up.watch, "watch", "w", false, "Watch source code and rebuild/refresh containers when files are updated.")
|
||||||
|
|
||||||
return upCmd
|
return upCmd
|
||||||
}
|
}
|
||||||
|
@ -257,6 +258,7 @@ func runUp(
|
||||||
CascadeStop: upOptions.cascadeStop,
|
CascadeStop: upOptions.cascadeStop,
|
||||||
Wait: upOptions.wait,
|
Wait: upOptions.wait,
|
||||||
WaitTimeout: timeout,
|
WaitTimeout: timeout,
|
||||||
|
Watch: upOptions.watch,
|
||||||
Services: services,
|
Services: services,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/v2/types"
|
"github.com/compose-spec/compose-go/v2/types"
|
||||||
|
"github.com/docker/compose/v2/cmd/formatter"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/compose/v2/internal/locker"
|
"github.com/docker/compose/v2/internal/locker"
|
||||||
|
@ -31,8 +32,7 @@ import (
|
||||||
|
|
||||||
type watchOptions struct {
|
type watchOptions struct {
|
||||||
*ProjectOptions
|
*ProjectOptions
|
||||||
quiet bool
|
noUp bool
|
||||||
noUp bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
|
func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
|
||||||
|
@ -57,7 +57,7 @@ func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service)
|
||||||
ValidArgsFunction: completeServiceNames(dockerCli, p),
|
ValidArgsFunction: completeServiceNames(dockerCli, p),
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&watchOpts.quiet, "quiet", false, "hide build output")
|
cmd.Flags().BoolVar(&buildOpts.quiet, "quiet", false, "hide build output")
|
||||||
cmd.Flags().BoolVar(&watchOpts.noUp, "no-up", false, "Do not build & start services before watching")
|
cmd.Flags().BoolVar(&watchOpts.noUp, "no-up", false, "Do not build & start services before watching")
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, w
|
||||||
Recreate: api.RecreateDiverged,
|
Recreate: api.RecreateDiverged,
|
||||||
RecreateDependencies: api.RecreateNever,
|
RecreateDependencies: api.RecreateNever,
|
||||||
Inherit: true,
|
Inherit: true,
|
||||||
QuietPull: watchOpts.quiet,
|
QuietPull: buildOpts.quiet,
|
||||||
},
|
},
|
||||||
Start: api.StartOptions{
|
Start: api.StartOptions{
|
||||||
Project: project,
|
Project: project,
|
||||||
|
@ -114,7 +114,10 @@ func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, w
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
consumer := formatter.NewLogConsumer(ctx, dockerCli.Out(), dockerCli.Err(), false, false, false)
|
||||||
return backend.Watch(ctx, project, services, api.WatchOptions{
|
return backend.Watch(ctx, project, services, api.WatchOptions{
|
||||||
Build: build,
|
Build: &build,
|
||||||
|
LogTo: consumer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,11 @@ func (l *logConsumer) Register(name string) {
|
||||||
func (l *logConsumer) register(name string) *presenter {
|
func (l *logConsumer) register(name string) *presenter {
|
||||||
cf := monochrome
|
cf := monochrome
|
||||||
if l.color {
|
if l.color {
|
||||||
cf = nextColor()
|
if name == api.WatchLogger {
|
||||||
|
cf = makeColorFunc("92")
|
||||||
|
} else {
|
||||||
|
cf = nextColor()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
p := &presenter{
|
p := &presenter{
|
||||||
colors: cf,
|
colors: cf,
|
||||||
|
@ -138,5 +142,9 @@ type presenter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *presenter) setPrefix(width int) {
|
func (p *presenter) setPrefix(width int) {
|
||||||
|
if p.name == api.WatchLogger {
|
||||||
|
p.prefix = p.colors(strings.Repeat(" ", width) + " ⦿ ")
|
||||||
|
return
|
||||||
|
}
|
||||||
p.prefix = p.colors(fmt.Sprintf("%-"+strconv.Itoa(width)+"s | ", p.name))
|
p.prefix = p.colors(fmt.Sprintf("%-"+strconv.Itoa(width)+"s | ", p.name))
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ Create and start containers
|
||||||
| `--timestamps` | | | Show timestamps |
|
| `--timestamps` | | | Show timestamps |
|
||||||
| `--wait` | | | Wait for services to be running\|healthy. Implies detached mode. |
|
| `--wait` | | | Wait for services to be running\|healthy. Implies detached mode. |
|
||||||
| `--wait-timeout` | `int` | `0` | Maximum duration to wait for the project to be running\|healthy |
|
| `--wait-timeout` | `int` | `0` | Maximum duration to wait for the project to be running\|healthy |
|
||||||
|
| `-w`, `--watch` | | | Watch source code and rebuild/refresh containers when files are updated. |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
|
@ -274,6 +274,18 @@ options:
|
||||||
experimentalcli: false
|
experimentalcli: false
|
||||||
kubernetes: false
|
kubernetes: false
|
||||||
swarm: false
|
swarm: false
|
||||||
|
- option: watch
|
||||||
|
shorthand: w
|
||||||
|
value_type: bool
|
||||||
|
default_value: "false"
|
||||||
|
description: |
|
||||||
|
Watch source code and rebuild/refresh containers when files are updated.
|
||||||
|
deprecated: false
|
||||||
|
hidden: false
|
||||||
|
experimental: false
|
||||||
|
experimentalcli: false
|
||||||
|
kubernetes: false
|
||||||
|
swarm: false
|
||||||
inherited_options:
|
inherited_options:
|
||||||
- option: dry-run
|
- option: dry-run
|
||||||
value_type: bool
|
value_type: bool
|
||||||
|
|
|
@ -114,9 +114,13 @@ type VizOptions struct {
|
||||||
Indentation string
|
Indentation string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WatchLogger is a reserved name to log watch events
|
||||||
|
const WatchLogger = "#watch"
|
||||||
|
|
||||||
// WatchOptions group options of the Watch API
|
// WatchOptions group options of the Watch API
|
||||||
type WatchOptions struct {
|
type WatchOptions struct {
|
||||||
Build BuildOptions
|
Build *BuildOptions
|
||||||
|
LogTo LogConsumer
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildOptions group options of the Build API
|
// BuildOptions group options of the Build API
|
||||||
|
@ -214,6 +218,7 @@ type StartOptions struct {
|
||||||
WaitTimeout time.Duration
|
WaitTimeout time.Duration
|
||||||
// Services passed in the command line to be started
|
// Services passed in the command line to be started
|
||||||
Services []string
|
Services []string
|
||||||
|
Watch bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestartOptions group options of the Restart API
|
// RestartOptions group options of the Restart API
|
||||||
|
|
|
@ -127,6 +127,9 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||||
progressCtx, cancel := context.WithCancel(context.Background())
|
progressCtx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
if options.Quiet {
|
||||||
|
options.Progress = progress.ModeQuiet
|
||||||
|
}
|
||||||
w, err = xprogress.NewPrinter(progressCtx, os.Stdout, progressui.DisplayMode(options.Progress),
|
w, err = xprogress.NewPrinter(progressCtx, os.Stdout, progressui.DisplayMode(options.Progress),
|
||||||
xprogress.WithDesc(
|
xprogress.WithDesc(
|
||||||
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
|
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
|
||||||
|
|
|
@ -125,6 +125,15 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if options.Start.Watch {
|
||||||
|
eg.Go(func() error {
|
||||||
|
return s.Watch(ctx, project, options.Start.Services, api.WatchOptions{
|
||||||
|
Build: options.Create.Build,
|
||||||
|
LogTo: options.Start.Attach,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// We don't use parent (cancelable) context as we manage sigterm to stop the stack
|
// We don't use parent (cancelable) context as we manage sigterm to stop the stack
|
||||||
err = s.start(context.Background(), project.Name, options.Start, printer.HandleEvent)
|
err = s.start(context.Background(), project.Name, options.Start, printer.HandleEvent)
|
||||||
if err != nil && !isTerminated { // Ignore error if the process is terminated
|
if err != nil && !isTerminated { // Ignore error if the process is terminated
|
||||||
|
|
|
@ -76,6 +76,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||||
}
|
}
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
watching := false
|
watching := false
|
||||||
|
options.LogTo.Register(api.WatchLogger)
|
||||||
for i := range project.Services {
|
for i := range project.Services {
|
||||||
service := project.Services[i]
|
service := project.Services[i]
|
||||||
config, err := loadDevelopmentConfig(service, project)
|
config, err := loadDevelopmentConfig(service, project)
|
||||||
|
@ -91,9 +92,15 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(config.Watch) > 0 && service.Build == nil {
|
for _, trigger := range config.Watch {
|
||||||
// service configured with watchers but no build section
|
if trigger.Action == types.WatchActionRebuild {
|
||||||
return fmt.Errorf("can't watch service %q without a build context", service.Name)
|
if service.Build == nil {
|
||||||
|
return fmt.Errorf("can't watch service %q with action %s without a build context", service.Name, types.WatchActionRebuild)
|
||||||
|
}
|
||||||
|
if options.Build == nil {
|
||||||
|
return fmt.Errorf("--no-build is incompatible with watch action %s in service %s", types.WatchActionRebuild, service.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(services) > 0 && service.Build == nil {
|
if len(services) > 0 && service.Build == nil {
|
||||||
|
@ -142,9 +149,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(
|
logrus.Debugf("Watch configuration for service %q:%s\n",
|
||||||
s.stdinfo(),
|
|
||||||
"Watch configuration for service %q:%s\n",
|
|
||||||
service.Name,
|
service.Name,
|
||||||
strings.Join(append([]string{""}, pathLogs...), "\n - "),
|
strings.Join(append([]string{""}, pathLogs...), "\n - "),
|
||||||
)
|
)
|
||||||
|
@ -163,6 +168,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||||
if !watching {
|
if !watching {
|
||||||
return fmt.Errorf("none of the selected services is configured for watch, consider setting an 'develop' section")
|
return fmt.Errorf("none of the selected services is configured for watch, consider setting an 'develop' section")
|
||||||
}
|
}
|
||||||
|
options.LogTo.Log(api.WatchLogger, "watch enabled")
|
||||||
|
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
}
|
}
|
||||||
|
@ -190,7 +196,7 @@ func (s *composeService) watch(ctx context.Context, project *types.Project, name
|
||||||
case batch := <-batchEvents:
|
case batch := <-batchEvents:
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
logrus.Debugf("batch start: service[%s] count[%d]", name, len(batch))
|
logrus.Debugf("batch start: service[%s] count[%d]", name, len(batch))
|
||||||
if err := s.handleWatchBatch(ctx, project, name, options.Build, batch, syncer); err != nil {
|
if err := s.handleWatchBatch(ctx, project, name, options, batch, syncer); err != nil {
|
||||||
logrus.Warnf("Error handling changed files for service %s: %v", name, err)
|
logrus.Warnf("Error handling changed files for service %s: %v", name, err)
|
||||||
}
|
}
|
||||||
logrus.Debugf("batch complete: service[%s] duration[%s] count[%d]",
|
logrus.Debugf("batch complete: service[%s] duration[%s] count[%d]",
|
||||||
|
@ -431,32 +437,38 @@ func (t tarDockerClient) Untar(ctx context.Context, id string, archive io.ReadCl
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, serviceName string, build api.BuildOptions, batch []fileEvent, syncer sync.Syncer) error {
|
func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, serviceName string, options api.WatchOptions, batch []fileEvent, syncer sync.Syncer) error {
|
||||||
pathMappings := make([]sync.PathMapping, len(batch))
|
pathMappings := make([]sync.PathMapping, len(batch))
|
||||||
restartService := false
|
restartService := false
|
||||||
for i := range batch {
|
for i := range batch {
|
||||||
if batch[i].Action == types.WatchActionRebuild {
|
if batch[i].Action == types.WatchActionRebuild {
|
||||||
fmt.Fprintf(
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Rebuilding service %q after changes were detected...", serviceName))
|
||||||
s.stdinfo(),
|
|
||||||
"Rebuilding service %q after changes were detected:%s\n",
|
|
||||||
serviceName,
|
|
||||||
strings.Join(append([]string{""}, batch[i].HostPath), "\n - "),
|
|
||||||
)
|
|
||||||
// restrict the build to ONLY this service, not any of its dependencies
|
// restrict the build to ONLY this service, not any of its dependencies
|
||||||
build.Services = []string{serviceName}
|
options.Build.Services = []string{serviceName}
|
||||||
err := s.Up(ctx, project, api.UpOptions{
|
options.Build.Quiet = true
|
||||||
Create: api.CreateOptions{
|
_, err := s.build(ctx, project, *options.Build, nil)
|
||||||
Build: &build,
|
if err != nil {
|
||||||
Services: []string{serviceName},
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Build failed. Error: %v", err))
|
||||||
Inherit: true,
|
return err
|
||||||
},
|
}
|
||||||
Start: api.StartOptions{
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("service %q successfully built", serviceName))
|
||||||
Services: []string{serviceName},
|
|
||||||
Project: project,
|
err = s.create(ctx, project, api.CreateOptions{
|
||||||
},
|
Services: []string{serviceName},
|
||||||
|
Inherit: true,
|
||||||
|
Recreate: api.RecreateForce,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(s.stderr(), "Application failed to start after update. Error: %v\n", err)
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Failed to recreate service after update. Error: %v", err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.start(ctx, project.Name, api.StartOptions{
|
||||||
|
Project: project,
|
||||||
|
Services: []string{serviceName},
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Application failed to start after update. Error: %v", err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -466,7 +478,7 @@ func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Pr
|
||||||
pathMappings[i] = batch[i].PathMapping
|
pathMappings[i] = batch[i].PathMapping
|
||||||
}
|
}
|
||||||
|
|
||||||
writeWatchSyncMessage(s.stdinfo(), serviceName, pathMappings)
|
writeWatchSyncMessage(options.LogTo, serviceName, pathMappings)
|
||||||
|
|
||||||
service, err := project.GetService(serviceName)
|
service, err := project.GetService(serviceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -486,29 +498,19 @@ func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Pr
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeWatchSyncMessage prints out a message about the sync for the changed paths.
|
// writeWatchSyncMessage prints out a message about the sync for the changed paths.
|
||||||
func writeWatchSyncMessage(w io.Writer, serviceName string, pathMappings []sync.PathMapping) {
|
func writeWatchSyncMessage(log api.LogConsumer, serviceName string, pathMappings []sync.PathMapping) {
|
||||||
const maxPathsToShow = 10
|
const maxPathsToShow = 10
|
||||||
if len(pathMappings) <= maxPathsToShow || logrus.IsLevelEnabled(logrus.DebugLevel) {
|
if len(pathMappings) <= maxPathsToShow || logrus.IsLevelEnabled(logrus.DebugLevel) {
|
||||||
hostPathsToSync := make([]string, len(pathMappings))
|
hostPathsToSync := make([]string, len(pathMappings))
|
||||||
for i := range pathMappings {
|
for i := range pathMappings {
|
||||||
hostPathsToSync[i] = pathMappings[i].HostPath
|
hostPathsToSync[i] = pathMappings[i].HostPath
|
||||||
}
|
}
|
||||||
fmt.Fprintf(
|
log.Log(api.WatchLogger, fmt.Sprintf("Syncing %q after changes were detected", serviceName))
|
||||||
w,
|
|
||||||
"Syncing %q after changes were detected:%s\n",
|
|
||||||
serviceName,
|
|
||||||
strings.Join(append([]string{""}, hostPathsToSync...), "\n - "),
|
|
||||||
)
|
|
||||||
} else {
|
} else {
|
||||||
hostPathsToSync := make([]string, len(pathMappings))
|
hostPathsToSync := make([]string, len(pathMappings))
|
||||||
for i := range pathMappings {
|
for i := range pathMappings {
|
||||||
hostPathsToSync[i] = pathMappings[i].HostPath
|
hostPathsToSync[i] = pathMappings[i].HostPath
|
||||||
}
|
}
|
||||||
fmt.Fprintf(
|
log.Log(api.WatchLogger, fmt.Sprintf("Syncing service %q after %d changes were detected", serviceName, len(pathMappings)))
|
||||||
w,
|
|
||||||
"Syncing service %q after %d changes were detected\n",
|
|
||||||
serviceName,
|
|
||||||
len(pathMappings),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ package compose
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -91,10 +92,29 @@ func (t testWatcher) Errors() chan error {
|
||||||
return t.errors
|
return t.errors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type stdLogger struct{}
|
||||||
|
|
||||||
|
func (s stdLogger) Log(containerName, message string) {
|
||||||
|
fmt.Printf("%s: %s\n", containerName, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stdLogger) Err(containerName, message string) {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s: %s\n", containerName, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stdLogger) Status(container, msg string) {
|
||||||
|
fmt.Printf("%s: %s\n", container, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s stdLogger) Register(container string) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestWatch_Sync(t *testing.T) {
|
func TestWatch_Sync(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
cli := mocks.NewMockCli(mockCtrl)
|
cli := mocks.NewMockCli(mockCtrl)
|
||||||
cli.EXPECT().Err().Return(os.Stderr).AnyTimes()
|
cli.EXPECT().Err().Return(os.Stderr).AnyTimes()
|
||||||
|
cli.EXPECT().BuildKitEnabled().Return(true, nil)
|
||||||
apiClient := mocks.NewMockAPIClient(mockCtrl)
|
apiClient := mocks.NewMockAPIClient(mockCtrl)
|
||||||
apiClient.EXPECT().ContainerList(gomock.Any(), gomock.Any()).Return([]moby.Container{
|
apiClient.EXPECT().ContainerList(gomock.Any(), gomock.Any()).Return([]moby.Container{
|
||||||
testContainer("test", "123", false),
|
testContainer("test", "123", false),
|
||||||
|
@ -124,7 +144,10 @@ func TestWatch_Sync(t *testing.T) {
|
||||||
dockerCli: cli,
|
dockerCli: cli,
|
||||||
clock: clock,
|
clock: clock,
|
||||||
}
|
}
|
||||||
err := service.watch(ctx, &proj, "test", api.WatchOptions{}, watcher, syncer, []types.Trigger{
|
err := service.watch(ctx, &proj, "test", api.WatchOptions{
|
||||||
|
Build: &api.BuildOptions{},
|
||||||
|
LogTo: stdLogger{},
|
||||||
|
}, watcher, syncer, []types.Trigger{
|
||||||
{
|
{
|
||||||
Path: "/sync",
|
Path: "/sync",
|
||||||
Action: "sync",
|
Action: "sync",
|
||||||
|
|
Loading…
Reference in New Issue