mirror of https://github.com/containers/podman.git
Merge pull request #15175 from vrothberg/RUN-1606
refactor pkg/autoupdate
This commit is contained in:
commit
773149d569
|
@ -44,14 +44,40 @@ const (
|
||||||
// Map for easy lookups of supported policies.
|
// Map for easy lookups of supported policies.
|
||||||
var supportedPolicies = map[string]Policy{
|
var supportedPolicies = map[string]Policy{
|
||||||
"": PolicyDefault,
|
"": PolicyDefault,
|
||||||
"disabled": PolicyDefault,
|
string(PolicyDefault): PolicyDefault,
|
||||||
"image": PolicyRegistryImage,
|
"image": PolicyRegistryImage, // Deprecated in favor of PolicyRegistryImage
|
||||||
"registry": PolicyRegistryImage,
|
string(PolicyRegistryImage): PolicyRegistryImage,
|
||||||
"local": PolicyLocalImage,
|
string(PolicyLocalImage): PolicyLocalImage,
|
||||||
}
|
}
|
||||||
|
|
||||||
// policyMapper is used for tying a container to it's autoupdate policy
|
// updater includes shared state for auto-updating one or more containers.
|
||||||
type policyMapper map[Policy][]*libpod.Container
|
type updater struct {
|
||||||
|
conn *dbus.Conn // DBUS connection
|
||||||
|
options *entities.AutoUpdateOptions // User-specified options
|
||||||
|
unitToTasks map[string][]*task // Keeps track of tasks per unit
|
||||||
|
updatedRawImages map[string]bool // Keeps track of updated images
|
||||||
|
runtime *libpod.Runtime // The libpod runtime
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
statusFailed = "failed" // The update has failed
|
||||||
|
statusUpdated = "true" // The update succeeded
|
||||||
|
statusNotUpdated = "false" // No update was needed
|
||||||
|
statusPending = "pending" // The update is pending (see options.DryRun)
|
||||||
|
statusRolledBack = "rolled back" // Rollback after a failed update
|
||||||
|
)
|
||||||
|
|
||||||
|
// task includes data and state for updating a container
|
||||||
|
type task struct {
|
||||||
|
authfile string // Container-specific authfile
|
||||||
|
auto *updater // Reverse pointer to the updater
|
||||||
|
container *libpod.Container // Container to update
|
||||||
|
policy Policy // Update policy
|
||||||
|
image *libimage.Image // Original image before the update
|
||||||
|
rawImageName string // The container's raw image name
|
||||||
|
status string // Auto-update status
|
||||||
|
unit string // Name of the systemd unit
|
||||||
|
}
|
||||||
|
|
||||||
// LookupPolicy looks up the corresponding Policy for the specified
|
// LookupPolicy looks up the corresponding Policy for the specified
|
||||||
// string. If none is found, an errors is returned including the list of
|
// string. If none is found, an errors is returned including the list of
|
||||||
|
@ -116,23 +142,22 @@ func ValidateImageReference(imageName string) error {
|
||||||
// It returns a slice of successfully restarted systemd units and a slice of
|
// It returns a slice of successfully restarted systemd units and a slice of
|
||||||
// errors encountered during auto update.
|
// errors encountered during auto update.
|
||||||
func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) {
|
func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) {
|
||||||
// Create a map from `image ID -> []*Container`.
|
// Note that (most) errors are non-fatal such that a single
|
||||||
containerMap, errs := imageContainersMap(runtime)
|
// misconfigured container does not prevent others from being updated
|
||||||
if len(containerMap) == 0 {
|
// (which could be a security threat).
|
||||||
return nil, errs
|
|
||||||
|
auto := updater{
|
||||||
|
options: &options,
|
||||||
|
runtime: runtime,
|
||||||
|
updatedRawImages: make(map[string]bool),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a map from `image ID -> *libimage.Image` for image lookups.
|
// Find auto-update tasks and assemble them by unit.
|
||||||
listOptions := &libimage.ListImagesOptions{
|
errors := auto.assembleTasks(ctx)
|
||||||
Filters: []string{"readonly=false"},
|
|
||||||
}
|
// Nothing to do.
|
||||||
imagesSlice, err := runtime.LibimageRuntime().ListImages(ctx, nil, listOptions)
|
if len(auto.unitToTasks) == 0 {
|
||||||
if err != nil {
|
return nil, errors
|
||||||
return nil, []error{err}
|
|
||||||
}
|
|
||||||
imageMap := make(map[string]*libimage.Image)
|
|
||||||
for i := range imagesSlice {
|
|
||||||
imageMap[imagesSlice[i].ID()] = imagesSlice[i]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect to DBUS.
|
// Connect to DBUS.
|
||||||
|
@ -142,185 +167,176 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
|
||||||
return nil, []error{err}
|
return nil, []error{err}
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
auto.conn = conn
|
||||||
|
|
||||||
runtime.NewSystemEvent(events.AutoUpdate)
|
runtime.NewSystemEvent(events.AutoUpdate)
|
||||||
|
|
||||||
// Update all images/container according to their auto-update policy.
|
// Update all images/container according to their auto-update policy.
|
||||||
var allReports []*entities.AutoUpdateReport
|
var allReports []*entities.AutoUpdateReport
|
||||||
updatedRawImages := make(map[string]bool)
|
for unit, tasks := range auto.unitToTasks {
|
||||||
for imageID, policyMapper := range containerMap {
|
// Sanity check: we'll support that in the future.
|
||||||
image, exists := imageMap[imageID]
|
if len(tasks) != 1 {
|
||||||
if !exists {
|
errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks)))
|
||||||
errs = append(errs, fmt.Errorf("container image ID %q not found in local storage", imageID))
|
return nil, errors
|
||||||
return nil, errs
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ctr := range policyMapper[PolicyRegistryImage] {
|
for _, task := range tasks {
|
||||||
report, err := autoUpdateRegistry(ctx, image, ctr, updatedRawImages, &options, conn, runtime)
|
err := func() error {
|
||||||
|
// Transition from state to state. Will be
|
||||||
|
// split into multiple loops in the future to
|
||||||
|
// support more than one container/task per
|
||||||
|
// unit.
|
||||||
|
updateAvailable, err := task.updateAvailable(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
task.status = statusFailed
|
||||||
}
|
return fmt.Errorf("checking image updates for container %s: %w", task.container.ID(), err)
|
||||||
if report != nil {
|
|
||||||
allReports = append(allReports, report)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ctr := range policyMapper[PolicyLocalImage] {
|
if !updateAvailable {
|
||||||
report, err := autoUpdateLocally(ctx, image, ctr, &options, conn, runtime)
|
task.status = statusNotUpdated
|
||||||
if err != nil {
|
return nil
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
if report != nil {
|
|
||||||
allReports = append(allReports, report)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return allReports, errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// autoUpdateRegistry updates the image/container according to the "registry" policy.
|
|
||||||
func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.Container, updatedRawImages map[string]bool, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) {
|
|
||||||
cid := ctr.ID()
|
|
||||||
rawImageName := ctr.RawImageName()
|
|
||||||
if rawImageName == "" {
|
|
||||||
return nil, fmt.Errorf("registry auto-updating container %q: raw-image name is empty", cid)
|
|
||||||
}
|
|
||||||
|
|
||||||
labels := ctr.Labels()
|
|
||||||
unit, exists := labels[systemdDefine.EnvVariable]
|
|
||||||
if !exists {
|
|
||||||
return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
|
|
||||||
}
|
|
||||||
|
|
||||||
report := &entities.AutoUpdateReport{
|
|
||||||
ContainerID: cid,
|
|
||||||
ContainerName: ctr.Name(),
|
|
||||||
ImageName: rawImageName,
|
|
||||||
Policy: PolicyRegistryImage,
|
|
||||||
SystemdUnit: unit,
|
|
||||||
Updated: "failed",
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, updated := updatedRawImages[rawImageName]; updated {
|
|
||||||
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
|
|
||||||
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
|
|
||||||
return report, err
|
|
||||||
}
|
|
||||||
report.Updated = "true"
|
|
||||||
return report, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
authfile := getAuthfilePath(ctr, options)
|
|
||||||
needsUpdate, err := newerRemoteImageAvailable(ctx, image, rawImageName, authfile)
|
|
||||||
if err != nil {
|
|
||||||
return report, fmt.Errorf("registry auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !needsUpdate {
|
|
||||||
report.Updated = "false"
|
|
||||||
return report, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.DryRun {
|
if options.DryRun {
|
||||||
report.Updated = "pending"
|
task.status = statusPending
|
||||||
return report, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := updateImage(ctx, runtime, rawImageName, authfile); err != nil {
|
if err := task.update(ctx); err != nil {
|
||||||
return report, fmt.Errorf("registry auto-updating container %q: image update for %q failed: %w", cid, rawImageName, err)
|
task.status = statusFailed
|
||||||
|
return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err)
|
||||||
}
|
}
|
||||||
updatedRawImages[rawImageName] = true
|
|
||||||
|
|
||||||
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
|
updateError := auto.restartSystemdUnit(ctx, unit)
|
||||||
updateErr := restartSystemdUnit(ctx, ctr, unit, conn)
|
if updateError == nil {
|
||||||
if updateErr == nil {
|
task.status = statusUpdated
|
||||||
report.Updated = "true"
|
return nil
|
||||||
return report, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !options.Rollback {
|
if !options.Rollback {
|
||||||
return report, updateErr
|
task.status = statusFailed
|
||||||
|
return fmt.Errorf("restarting unit %s for container %s: %w", task.unit, task.container.ID(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// To fallback, simply retag the old image and restart the service.
|
if err := task.rollbackImage(); err != nil {
|
||||||
if err := image.Tag(rawImageName); err != nil {
|
task.status = statusFailed
|
||||||
return report, fmt.Errorf("falling back to previous image: %w", err)
|
return fmt.Errorf("rolling back image for container %s: %w", task.container.ID(), err)
|
||||||
}
|
|
||||||
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
|
|
||||||
return report, fmt.Errorf("restarting unit with old image during fallback: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
report.Updated = "rolled back"
|
if err := auto.restartSystemdUnit(ctx, unit); err != nil {
|
||||||
return report, nil
|
task.status = statusFailed
|
||||||
|
return fmt.Errorf("restarting unit %s for container %s during rollback: %w", task.unit, task.container.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
task.status = statusRolledBack
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
allReports = append(allReports, task.report())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allReports, errors
|
||||||
}
|
}
|
||||||
|
|
||||||
// autoUpdateRegistry updates the image/container according to the "local" policy.
|
// report creates an auto-update report for the task.
|
||||||
func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.Container, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) {
|
func (t *task) report() *entities.AutoUpdateReport {
|
||||||
cid := ctr.ID()
|
return &entities.AutoUpdateReport{
|
||||||
rawImageName := ctr.RawImageName()
|
ContainerID: t.container.ID(),
|
||||||
if rawImageName == "" {
|
ContainerName: t.container.Name(),
|
||||||
return nil, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", cid)
|
ImageName: t.container.RawImageName(),
|
||||||
|
Policy: string(t.policy),
|
||||||
|
SystemdUnit: t.unit,
|
||||||
|
Updated: t.status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateAvailable returns whether an update for the task is available.
|
||||||
|
func (t *task) updateAvailable(ctx context.Context) (bool, error) {
|
||||||
|
switch t.policy {
|
||||||
|
case PolicyRegistryImage:
|
||||||
|
return t.registryUpdateAvailable(ctx)
|
||||||
|
case PolicyLocalImage:
|
||||||
|
return t.localUpdateAvailable()
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unexpected auto-update policy %s for container %s", t.policy, t.container.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the task according to its auto-update policy.
|
||||||
|
func (t *task) update(ctx context.Context) error {
|
||||||
|
switch t.policy {
|
||||||
|
case PolicyRegistryImage:
|
||||||
|
return t.registryUpdate(ctx)
|
||||||
|
case PolicyLocalImage:
|
||||||
|
// Nothing to do as the image is already available in the local storage.
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected auto-update policy %s for container %s", t.policy, t.container.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registryUpdateAvailable returns whether a new image on the registry is available.
|
||||||
|
func (t *task) registryUpdateAvailable(ctx context.Context) (bool, error) {
|
||||||
|
// The newer image has already been pulled for another task, so we know
|
||||||
|
// there's a newer one available.
|
||||||
|
if _, exists := t.auto.updatedRawImages[t.rawImageName]; exists {
|
||||||
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := ctr.Labels()
|
remoteRef, err := docker.ParseReference("//" + t.rawImageName)
|
||||||
unit, exists := labels[systemdDefine.EnvVariable]
|
|
||||||
if !exists {
|
|
||||||
return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
|
|
||||||
}
|
|
||||||
|
|
||||||
report := &entities.AutoUpdateReport{
|
|
||||||
ContainerID: cid,
|
|
||||||
ContainerName: ctr.Name(),
|
|
||||||
ImageName: rawImageName,
|
|
||||||
Policy: PolicyLocalImage,
|
|
||||||
SystemdUnit: unit,
|
|
||||||
Updated: "failed",
|
|
||||||
}
|
|
||||||
|
|
||||||
needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return report, fmt.Errorf("locally auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err)
|
return false, err
|
||||||
|
}
|
||||||
|
options := &libimage.HasDifferentDigestOptions{AuthFilePath: t.authfile}
|
||||||
|
return t.image.HasDifferentDigest(ctx, remoteRef, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// registryUpdate pulls down the image from the registry.
|
||||||
|
func (t *task) registryUpdate(ctx context.Context) error {
|
||||||
|
// The newer image has already been pulled for another task.
|
||||||
|
if _, exists := t.auto.updatedRawImages[t.rawImageName]; exists {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !needsUpdate {
|
pullOptions := &libimage.PullOptions{}
|
||||||
report.Updated = "false"
|
pullOptions.AuthFilePath = t.authfile
|
||||||
return report, nil
|
pullOptions.Writer = os.Stderr
|
||||||
|
if _, err := t.auto.runtime.LibimageRuntime().Pull(ctx, t.rawImageName, config.PullPolicyAlways, pullOptions); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.DryRun {
|
t.auto.updatedRawImages[t.rawImageName] = true
|
||||||
report.Updated = "pending"
|
return nil
|
||||||
return report, nil
|
}
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName)
|
// localUpdateAvailable returns whether a new image in the local storage is available.
|
||||||
updateErr := restartSystemdUnit(ctx, ctr, unit, conn)
|
func (t *task) localUpdateAvailable() (bool, error) {
|
||||||
if updateErr == nil {
|
localImg, _, err := t.auto.runtime.LibimageRuntime().LookupImage(t.rawImageName, nil)
|
||||||
report.Updated = "true"
|
if err != nil {
|
||||||
return report, nil
|
return false, err
|
||||||
}
|
|
||||||
|
|
||||||
if !options.Rollback {
|
|
||||||
return report, updateErr
|
|
||||||
}
|
}
|
||||||
|
return localImg.Digest().String() != t.image.Digest().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rollbackImage rolls back the task's image to the previous version before the update.
|
||||||
|
func (t *task) rollbackImage() error {
|
||||||
// To fallback, simply retag the old image and restart the service.
|
// To fallback, simply retag the old image and restart the service.
|
||||||
if err := image.Tag(rawImageName); err != nil {
|
if err := t.image.Tag(t.rawImageName); err != nil {
|
||||||
return report, fmt.Errorf("falling back to previous image: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
|
t.auto.updatedRawImages[t.rawImageName] = false
|
||||||
return report, fmt.Errorf("restarting unit with old image during fallback: %w", err)
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
report.Updated = "rolled back"
|
|
||||||
return report, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// restartSystemdUnit restarts the systemd unit the container is running in.
|
// restartSystemdUnit restarts the systemd unit the container is running in.
|
||||||
func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, conn *dbus.Conn) error {
|
func (u *updater) restartSystemdUnit(ctx context.Context, unit string) error {
|
||||||
restartChan := make(chan string)
|
restartChan := make(chan string)
|
||||||
if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil {
|
if _, err := u.conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil {
|
||||||
return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: %w", ctr.ID(), unit, err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the restart to finish and actually check if it was
|
// Wait for the restart to finish and actually check if it was
|
||||||
|
@ -329,25 +345,34 @@ func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string,
|
||||||
|
|
||||||
switch result {
|
switch result {
|
||||||
case "done":
|
case "done":
|
||||||
logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID())
|
logrus.Infof("Successfully restarted systemd unit %q", unit)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: expected %q but received %q", ctr.ID(), unit, "done", result)
|
return fmt.Errorf("expected %q but received %q", "done", result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// imageContainersMap generates a map[image ID] -> [containers using the image]
|
// assembleTasks assembles update tasks per unit and populates a mapping from
|
||||||
// of all containers with a valid auto-update policy.
|
// `unit -> []*task` such that multiple containers _can_ run in a single unit.
|
||||||
func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []error) {
|
func (u *updater) assembleTasks(ctx context.Context) []error {
|
||||||
allContainers, err := runtime.GetAllContainers()
|
// Assemble a map `image ID -> *libimage.Image` that we can consult
|
||||||
|
// later on for lookups.
|
||||||
|
imageMap, err := u.assembleImageMap(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, []error{err}
|
return []error{err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
allContainers, err := u.runtime.GetAllContainers()
|
||||||
|
if err != nil {
|
||||||
|
return []error{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
u.unitToTasks = make(map[string][]*task)
|
||||||
|
|
||||||
errors := []error{}
|
errors := []error{}
|
||||||
containerMap := make(map[string]policyMapper)
|
for _, c := range allContainers {
|
||||||
for _, ctr := range allContainers {
|
ctr := c
|
||||||
state, err := ctr.State()
|
state, err := ctr.State()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
|
@ -358,77 +383,75 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only update containers with the specific label/policy set.
|
// Check the container's auto-update policy which is configured
|
||||||
|
// as a label.
|
||||||
labels := ctr.Labels()
|
labels := ctr.Labels()
|
||||||
value, exists := labels[Label]
|
value, exists := labels[Label]
|
||||||
if !exists {
|
if !exists {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
policy, err := LookupPolicy(value)
|
policy, err := LookupPolicy(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip labels not related to autoupdate
|
|
||||||
if policy == PolicyDefault {
|
if policy == PolicyDefault {
|
||||||
continue
|
continue
|
||||||
} else {
|
}
|
||||||
id, _ := ctr.Image()
|
|
||||||
policyMap, exists := containerMap[id]
|
// Make sure the container runs in a systemd unit which is
|
||||||
|
// stored as a label at container creation.
|
||||||
|
unit, exists := labels[systemdDefine.EnvVariable]
|
||||||
if !exists {
|
if !exists {
|
||||||
policyMap = make(map[Policy][]*libpod.Container)
|
errors = append(errors, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable))
|
||||||
}
|
continue
|
||||||
policyMap[policy] = append(policyMap[policy], ctr)
|
|
||||||
containerMap[id] = policyMap
|
|
||||||
// Now we know that `ctr` is configured for auto updates.
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return containerMap, errors
|
id, _ := ctr.Image()
|
||||||
|
image, exists := imageMap[id]
|
||||||
|
if !exists {
|
||||||
|
err := fmt.Errorf("internal error: no image found for ID %s", id)
|
||||||
|
errors = append(errors, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rawImageName := ctr.RawImageName()
|
||||||
|
if rawImageName == "" {
|
||||||
|
errors = append(errors, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", ctr.ID()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
t := task{
|
||||||
|
authfile: labels[AuthfileLabel],
|
||||||
|
auto: u,
|
||||||
|
container: ctr,
|
||||||
|
policy: policy,
|
||||||
|
image: image,
|
||||||
|
unit: unit,
|
||||||
|
rawImageName: rawImageName,
|
||||||
|
status: statusFailed, // must be updated later on
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the task to the unit.
|
||||||
|
u.unitToTasks[unit] = append(u.unitToTasks[unit], &t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAuthfilePath returns an authfile path, if set. The authfile label in the
|
// assembleImageMap creates a map from `image ID -> *libimage.Image` for image lookups.
|
||||||
// container, if set, as precedence over the one set in the options.
|
func (u *updater) assembleImageMap(ctx context.Context) (map[string]*libimage.Image, error) {
|
||||||
func getAuthfilePath(ctr *libpod.Container, options *entities.AutoUpdateOptions) string {
|
listOptions := &libimage.ListImagesOptions{
|
||||||
labels := ctr.Labels()
|
Filters: []string{"readonly=false"},
|
||||||
authFilePath, exists := labels[AuthfileLabel]
|
|
||||||
if exists {
|
|
||||||
return authFilePath
|
|
||||||
}
|
}
|
||||||
return options.Authfile
|
imagesSlice, err := u.runtime.LibimageRuntime().ListImages(ctx, nil, listOptions)
|
||||||
}
|
|
||||||
|
|
||||||
// newerRemoteImageAvailable returns true if there corresponding image on the remote
|
|
||||||
// registry is newer.
|
|
||||||
func newerRemoteImageAvailable(ctx context.Context, img *libimage.Image, origName string, authfile string) (bool, error) {
|
|
||||||
remoteRef, err := docker.ParseReference("//" + origName)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
options := &libimage.HasDifferentDigestOptions{AuthFilePath: authfile}
|
|
||||||
return img.HasDifferentDigest(ctx, remoteRef, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newerLocalImageAvailable returns true if the container and local image have different digests
|
|
||||||
func newerLocalImageAvailable(runtime *libpod.Runtime, img *libimage.Image, rawImageName string) (bool, error) {
|
|
||||||
localImg, _, err := runtime.LibimageRuntime().LookupImage(rawImageName, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return localImg.Digest().String() != img.Digest().String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateImage pulls the specified image.
|
|
||||||
func updateImage(ctx context.Context, runtime *libpod.Runtime, name, authfile string) (*libimage.Image, error) {
|
|
||||||
pullOptions := &libimage.PullOptions{}
|
|
||||||
pullOptions.AuthFilePath = authfile
|
|
||||||
pullOptions.Writer = os.Stderr
|
|
||||||
|
|
||||||
pulledImages, err := runtime.LibimageRuntime().Pull(ctx, name, config.PullPolicyAlways, pullOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return pulledImages[0], nil
|
imageMap := make(map[string]*libimage.Image)
|
||||||
|
for i := range imagesSlice {
|
||||||
|
imageMap[imagesSlice[i].ID()] = imagesSlice[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return imageMap, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -234,6 +234,8 @@ function _confirm_update() {
|
||||||
_confirm_update $cname $ori_image
|
_confirm_update $cname $ori_image
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# This test can fail in dev. environment because of SELinux.
|
||||||
|
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
|
||||||
@test "podman auto-update - label io.containers.autoupdate=local with rollback" {
|
@test "podman auto-update - label io.containers.autoupdate=local with rollback" {
|
||||||
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
|
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
|
||||||
# assume that we work only with crun, nothing else.
|
# assume that we work only with crun, nothing else.
|
||||||
|
|
Loading…
Reference in New Issue