pkg/autoupdate: allow updating multiple tasks per unit

Refactor the auto-update backend to allow for updating multiple
tasks/containers per unit.  This commit is merely doing the plumbing.
The actual integration comes in a following commit.

[NO NEW TESTS NEEDED] as behavior should not change and existing
tests are expected to continue to pass.

Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
Valentin Rothberg 2022-08-29 13:42:21 +02:00
parent 8882b7664d
commit bdfc4df1f2
1 changed files with 89 additions and 64 deletions

View File

@ -153,18 +153,19 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
} }
// Find auto-update tasks and assemble them by unit. // Find auto-update tasks and assemble them by unit.
errors := auto.assembleTasks(ctx) allErrors := auto.assembleTasks(ctx)
// Nothing to do. // Nothing to do.
if len(auto.unitToTasks) == 0 { if len(auto.unitToTasks) == 0 {
return nil, errors return nil, allErrors
} }
// Connect to DBUS. // Connect to DBUS.
conn, err := systemd.ConnectToDBUS() conn, err := systemd.ConnectToDBUS()
if err != nil { if err != nil {
logrus.Errorf(err.Error()) logrus.Errorf(err.Error())
return nil, []error{err} allErrors = append(allErrors, err)
return nil, allErrors
} }
defer conn.Close() defer conn.Close()
auto.conn = conn auto.conn = conn
@ -174,18 +175,28 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
// Update all images/container according to their auto-update policy. // Update all images/container according to their auto-update policy.
var allReports []*entities.AutoUpdateReport var allReports []*entities.AutoUpdateReport
for unit, tasks := range auto.unitToTasks { for unit, tasks := range auto.unitToTasks {
unitErrors := auto.updateUnit(ctx, unit, tasks)
allErrors = append(allErrors, unitErrors...)
for _, task := range tasks {
allReports = append(allReports, task.report())
}
}
return allReports, allErrors
}
// updateUnit auto updates the tasks in the specified systemd unit.
func (u *updater) updateUnit(ctx context.Context, unit string, tasks []*task) []error {
var errors []error
// Sanity check: we'll support that in the future. // Sanity check: we'll support that in the future.
if len(tasks) != 1 { if len(tasks) != 1 {
errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks))) errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks)))
return nil, errors return errors
} }
tasksUpdated := false
for _, task := range tasks { for _, task := range tasks {
err := func() error { err := func() error { // Use an anonymous function to avoid spaghetti continue's
// Transition from state to state. Will be
// split into multiple loops in the future to
// support more than one container/task per
// unit.
updateAvailable, err := task.updateAvailable(ctx) updateAvailable, err := task.updateAvailable(ctx)
if err != nil { if err != nil {
task.status = statusFailed task.status = statusFailed
@ -197,7 +208,7 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
return nil return nil
} }
if options.DryRun { if u.options.DryRun {
task.status = statusPending task.status = statusPending
return nil return nil
} }
@ -207,39 +218,53 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err) return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err)
} }
updateError := auto.restartSystemdUnit(ctx, unit) tasksUpdated = true
if updateError == nil {
task.status = statusUpdated
return nil
}
if !options.Rollback {
task.status = statusFailed
return fmt.Errorf("restarting unit %s for container %s: %w", task.unit, task.container.ID(), err)
}
if err := task.rollbackImage(); err != nil {
task.status = statusFailed
return fmt.Errorf("rolling back image for container %s: %w", task.container.ID(), err)
}
if err := auto.restartSystemdUnit(ctx, unit); err != nil {
task.status = statusFailed
return fmt.Errorf("restarting unit %s for container %s during rollback: %w", task.unit, task.container.ID(), err)
}
task.status = statusRolledBack
return nil return nil
}() }()
if err != nil { if err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
allReports = append(allReports, task.report()) }
// If no task has been updated, we can jump directly to the next unit.
if !tasksUpdated {
return errors
}
updateError := u.restartSystemdUnit(ctx, unit)
for _, task := range tasks {
if updateError == nil {
task.status = statusUpdated
} else {
task.status = statusFailed
} }
} }
return allReports, errors // Jump to the next unit on successful update or if rollbacks are disabled.
if updateError == nil || !u.options.Rollback {
return errors
}
// The update has failed and rollbacks are enabled.
for _, task := range tasks {
if err := task.rollbackImage(); err != nil {
err = fmt.Errorf("rolling back image for container %s in unit %s: %w", task.container.ID(), unit, err)
errors = append(errors, err)
}
}
if err := u.restartSystemdUnit(ctx, unit); err != nil {
err = fmt.Errorf("restarting unit %s during rollback: %w", unit, err)
errors = append(errors, err)
return errors
}
for _, task := range tasks {
task.status = statusRolledBack
}
return errors
} }
// report creates an auto-update report for the task. // report creates an auto-update report for the task.