farm build: push built images to registry

Update farm build to directly push images to a registry
after all the builds are complete on all the nodes.
A manifest list is then created locally and pushed to
the registry as well.

Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
This commit is contained in:
Urvashi Mohnani 2023-12-12 11:29:47 -05:00
parent e63513542b
commit d95710a89c
18 changed files with 152 additions and 391 deletions

View File

@ -49,7 +49,7 @@ type BuildFlagsWrapper struct {
// FarmBuildHiddenFlags are the flags hidden from the farm build command because they are either not
// supported or don't make sense in the farm build use case
var FarmBuildHiddenFlags = []string{"arch", "all-platforms", "compress", "cw", "disable-content-trust",
"logsplit", "manifest", "os", "output", "platform", "sign-by", "signature-policy", "stdin", "tls-verify",
"logsplit", "manifest", "os", "output", "platform", "sign-by", "signature-policy", "stdin",
"variant"}
func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBuild bool) {
@ -252,6 +252,7 @@ func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrap
}
apiBuildOpts.BuildOptions = *buildahDefineOpts
apiBuildOpts.ContainerFiles = containerFiles
apiBuildOpts.Authfile = buildOpts.Authfile
return &apiBuildOpts, err
}

View File

@ -4,12 +4,13 @@ import (
"errors"
"fmt"
"os"
"strings"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/farm"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -25,12 +26,13 @@ type buildOptions struct {
var (
farmBuildDescription = `Build images on farm nodes, then bundle them into a manifest list`
buildCommand = &cobra.Command{
Use: "build [options] [CONTEXT]",
Short: "Build a container image for multiple architectures",
Long: farmBuildDescription,
RunE: build,
Example: "podman farm build [flags] buildContextDirectory",
Args: cobra.ExactArgs(1),
Use: "build [options] [CONTEXT]",
Short: "Build a container image for multiple architectures",
Long: farmBuildDescription,
RunE: build,
Example: "podman farm build [flags] buildContextDirectory",
ValidArgsFunction: common.AutocompleteDefaultOneArg,
Args: cobra.MaximumNArgs(1),
}
buildOpts = buildOptions{
buildOptions: common.BuildFlagsWrapper{},
@ -45,20 +47,10 @@ func init() {
flags := buildCommand.Flags()
flags.SetNormalizeFunc(utils.AliasFlags)
localFlagName := "local"
// Default for local is true and hide this flag for the remote use case
if !registry.IsRemote() {
flags.BoolVarP(&buildOpts.local, localFlagName, "l", true, "Build image on local machine as well as on farm nodes")
}
cleanupFlag := "cleanup"
flags.BoolVar(&buildOpts.buildOptions.Cleanup, cleanupFlag, false, "Remove built images from farm nodes on success")
platformsFlag := "platforms"
buildCommand.PersistentFlags().StringSliceVar(&buildOpts.platforms, platformsFlag, nil, "Build only on farm nodes that match the given platforms")
common.DefineBuildFlags(buildCommand, &buildOpts.buildOptions, true)
podmanConfig := registry.PodmanConfig()
farmFlagName := "farm"
// If remote, don't read the client's containers.conf file
defaultFarm := ""
@ -66,6 +58,17 @@ func init() {
defaultFarm = podmanConfig.ContainersConfDefaultsRO.Farms.Default
}
flags.StringVar(&buildOpts.farm, farmFlagName, defaultFarm, "Farm to use for builds")
_ = buildCommand.RegisterFlagCompletionFunc(farmFlagName, common.AutoCompleteFarms)
localFlagName := "local"
// Default for local is true
flags.BoolVarP(&buildOpts.local, localFlagName, "l", true, "Build image on local machine as well as on farm nodes")
platformsFlag := "platforms"
buildCommand.PersistentFlags().StringSliceVar(&buildOpts.platforms, platformsFlag, nil, "Build only on farm nodes that match the given platforms")
_ = buildCommand.RegisterFlagCompletionFunc(platformsFlag, completion.AutocompletePlatform)
common.DefineBuildFlags(buildCommand, &buildOpts.buildOptions, true)
}
func build(cmd *cobra.Command, args []string) error {
@ -79,7 +82,18 @@ func build(cmd *cobra.Command, args []string) error {
if !cmd.Flags().Changed("tag") {
return errors.New("cannot create manifest list without a name, value for --tag is required")
}
opts, err := common.ParseBuildOpts(cmd, args, &buildOpts.buildOptions)
// Ensure that the user gives a full name so we can push the built images from
// the node to the given registry and repository
// Should be of the format registry/repository/imageName
tag, err := cmd.Flags().GetStringArray("tag")
if err != nil {
return err
}
if !strings.Contains(tag[0], "/") {
return fmt.Errorf("%q is not a full image reference name", tag[0])
}
bopts := buildOpts.buildOptions
opts, err := common.ParseBuildOpts(cmd, args, &bopts)
if err != nil {
return err
}
@ -102,6 +116,11 @@ func build(cmd *cobra.Command, args []string) error {
return err
}
opts.IIDFile = iidFile
tlsVerify, err := cmd.Flags().GetBool("tls-verify")
if err != nil {
return err
}
opts.SkipTLSVerify = !tlsVerify
cfg, err := config.ReadCustomConfig()
if err != nil {
@ -117,13 +136,9 @@ func build(cmd *cobra.Command, args []string) error {
defaultFarm = f
}
var localEngine entities.ImageEngine
if buildOpts.local {
localEngine = registry.ImageEngine()
}
localEngine := registry.ImageEngine()
ctx := registry.Context()
farm, err := farm.NewFarm(ctx, defaultFarm, localEngine)
farm, err := farm.NewFarm(ctx, defaultFarm, localEngine, buildOpts.local)
if err != nil {
return fmt.Errorf("initializing: %w", err)
}
@ -137,7 +152,7 @@ func build(cmd *cobra.Command, args []string) error {
manifestName := opts.Output
// Set Output to "" so that the images built on the farm nodes have no name
opts.Output = ""
if err = farm.Build(ctx, schedule, *opts, manifestName); err != nil {
if err = farm.Build(ctx, schedule, *opts, manifestName, localEngine); err != nil {
return fmt.Errorf("build: %w", err)
}
logrus.Infof("build: ok")

View File

@ -16,7 +16,7 @@ var (
The "podman system connection add --farm" command can be used to add a new connection to a new or existing farm.`
createCommand = &cobra.Command{
Use: "create [options] NAME [CONNECTIONS...]",
Use: "create NAME [CONNECTIONS...]",
Args: cobra.MinimumNArgs(1),
Short: "Create a new farm",
Long: farmCreateDescription,

View File

@ -20,5 +20,4 @@ func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Command: farmCmd,
})
farmCmd.Hidden = true
}

View File

@ -1,5 +1,5 @@
####> This option file is used in:
####> podman auto update, build, container runlabel, create, kube play, login, manifest add, manifest create, manifest inspect, manifest push, pull, push, run, search
####> podman auto update, build, container runlabel, create, farm build, kube play, login, manifest add, manifest create, manifest inspect, manifest push, pull, push, run, search
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--tls-verify**

View File

@ -8,7 +8,10 @@ podman\-farm\-build - Build images on farm nodes, then bundle them into a manife
## DESCRIPTION
**podman farm build** Builds an image on all nodes in a farm and bundles them up into a manifest list.
It executes the `podman build` command on the nodes in the farm with the given Containerfile.
It executes the `podman build` command on the nodes in the farm with the given Containerfile. Once the
images are built on all the farm nodes, the images will be pushed to the registry given via the **--tag**
flag. Once all the images have been pushed, a manifest list will be created locally and pushed to the registry
as well.
The manifest list will contain an image per native architecture type that is present in the farm.
@ -17,6 +20,9 @@ via emulation using `podman build --arch --platform`.
If no farm is specified, the build will be sent out to all the nodes that `podman system connection` knows of.
Note: Since the images built are directly pushed to a registry, the user must pass in a full image name using the
**--tag** option in the format _registry_**/**_repository_**/**_imageName_[**:**_tag_]`.
## OPTIONS
@@option add-host
@ -193,6 +199,8 @@ Build only on farm nodes that match the given platforms.
@@option timestamp
@@option tls-verify
@@option ulimit.image
@@option unsetenv.image

View File

@ -4,7 +4,7 @@
podman\-farm\-create - Create a new farm
## SYNOPSIS
**podman farm create** [*options*] *name* [*connections*]
**podman farm create** *name* [*connections*]
## DESCRIPTION
Create a new farm with connections that Podman knows about which were added via the
@ -13,8 +13,6 @@ Create a new farm with connections that Podman knows about which were added via
An empty farm can be created without adding any connections to it. Add or remove
connections from a farm via the *podman farm update* command.
## OPTIONS
## EXAMPLE
```

View File

@ -45,6 +45,4 @@ type ImageEngine interface { //nolint:interfacebloat
FarmNodeName(ctx context.Context) string
FarmNodeDriver(ctx context.Context) string
FarmNodeInspect(ctx context.Context) (*FarmInspectReport, error)
PullToFile(ctx context.Context, options PullToFileOptions) (string, error)
PullToLocal(ctx context.Context, options PullToLocalOptions) (string, error)
}

View File

@ -492,19 +492,3 @@ type FarmInspectReport struct {
Arch string
Variant string
}
// PullToFileOptions are the options for pulling the images from farm
// nodes into a dir
type PullToFileOptions struct {
ImageID string
SaveFormat string
SaveFile string
}
// PullToLocalOptions are the options for pulling the images from farm
// nodes into containers-storage
type PullToLocalOptions struct {
ImageID string
SaveFormat string
Destination ImageEngine
}

View File

@ -131,6 +131,10 @@ type BuildReport struct {
type FarmBuildOptions struct {
// Cleanup removes built images from farm nodes on success
Cleanup bool
// Authfile is the path to the file holding registry credentials
Authfile string
// SkipTLSVerify skips tls verification when set to true
SkipTLSVerify bool
}
type IDOrNameResponse struct {

View File

@ -5,12 +5,10 @@ package abi
import (
"context"
"fmt"
"os"
"strings"
"github.com/containers/buildah/pkg/parse"
lplatform "github.com/containers/common/libimage/platform"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/emulation"
)
@ -56,64 +54,3 @@ func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspe
Arch: ir.arch,
Variant: ir.variant}, ir.platformsErr
}
// PullToFile pulls the image from the remote engine and saves it to a file,
// returning a string-format reference which can be parsed by containers/image.
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: options.SaveFile,
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
return options.SaveFormat + ":" + options.SaveFile, nil
}
// PullToFile pulls the image from the remote engine and saves it to the local
// engine passed in via options, returning a string-format reference which can
// be parsed by containers/image.
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
destination := options.Destination
if destination == nil {
return "", fmt.Errorf("destination not given, cannot pull image %q", options.ImageID)
}
// Check if the image is already present at destination
var br *entities.BoolReport
br, err = destination.Exists(ctx, options.ImageID)
if err != nil {
return "", err
}
if br.Value {
return istorage.Transport.Name() + ":" + options.ImageID, nil
}
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: tempFile.Name(),
}
// Save image built on builder in a temp file
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
// Load the image saved in tempFile into the local engine
loadOptions := entities.ImageLoadOptions{
Input: tempFile.Name(),
}
_, err = destination.Load(ctx, loadOptions)
if err != nil {
return "", err
}
return istorage.Transport.Name() + ":" + options.ImageID, nil
}

View File

@ -39,6 +39,8 @@ import (
"github.com/sirupsen/logrus"
)
const UnknownDigestSuffix = docker.UnknownDigestSuffix
func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
exists, err := ir.Libpod.LibimageRuntime().Exists(nameOrID)
if err != nil {

View File

@ -2,11 +2,8 @@ package tunnel
import (
"context"
"errors"
"fmt"
"os"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/pkg/bindings/system"
"github.com/containers/podman/v4/pkg/domain/entities"
)
@ -47,47 +44,3 @@ func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspe
Arch: ir.arch,
Variant: ir.variant}, ir.platformsErr
}
// PullToFile pulls the image from the remote engine and saves it to a file,
// returning a string-format reference which can be parsed by containers/image.
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: options.SaveFile,
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
return options.SaveFormat + ":" + options.SaveFile, nil
}
// PullToLocal pulls the image from the remote engine and saves it to the local
// engine passed in via options, returning a string-format reference which can
// be parsed by containers/image.
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: tempFile.Name(),
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q to temporary file: %w", options.ImageID, err)
}
loadOptions := entities.ImageLoadOptions{
Input: tempFile.Name(),
}
if options.Destination == nil {
return "", errors.New("internal error: options.Destination not set")
} else {
if _, err = options.Destination.Load(ctx, loadOptions); err != nil {
return "", fmt.Errorf("loading image %q: %w", options.ImageID, err)
}
}
name := fmt.Sprintf("%s:%s", istorage.Transport.Name(), options.ImageID)
return name, err
}

View File

@ -32,7 +32,7 @@ type Schedule struct {
platformBuilders map[string]string // target->connection
}
func newFarmWithBuilders(_ context.Context, name string, destinations *map[string]config.Destination, localEngine entities.ImageEngine) (*Farm, error) {
func newFarmWithBuilders(_ context.Context, name string, destinations *map[string]config.Destination, localEngine entities.ImageEngine, buildLocal bool) (*Farm, error) {
farm := &Farm{
builders: make(map[string]entities.ImageEngine),
localEngine: localEngine,
@ -66,7 +66,7 @@ func newFarmWithBuilders(_ context.Context, name string, destinations *map[strin
})
}
// If local=true then use the local machine for builds as well
if localEngine != nil {
if buildLocal {
builderGroup.Go(func() error {
fmt.Println("Setting up local builder")
defer fmt.Println("Local builder ready")
@ -88,14 +88,14 @@ func newFarmWithBuilders(_ context.Context, name string, destinations *map[strin
return nil, errors.New("no builders configured")
}
func NewFarm(ctx context.Context, name string, localEngine entities.ImageEngine) (*Farm, error) {
func NewFarm(ctx context.Context, name string, localEngine entities.ImageEngine, buildLocal bool) (*Farm, error) {
// Get the destinations of the connections specified in the farm
destinations, err := getFarmDestinations(name)
if err != nil {
return nil, err
}
return newFarmWithBuilders(ctx, name, &destinations, localEngine)
return newFarmWithBuilders(ctx, name, &destinations, localEngine, buildLocal)
}
// Done performs any necessary end-of-process cleanup for the farm's members.
@ -315,7 +315,7 @@ func (f *Farm) Schedule(ctx context.Context, platforms []string) (Schedule, erro
// Build runs a build using the specified targetplatform:service map. If all
// builds succeed, it copies the resulting images from the remote hosts to the
// local service and builds a manifest list with the specified reference name.
func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.BuildOptions, reference string) error {
func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.BuildOptions, reference string, localEngine entities.ImageEngine) error {
switch options.OutputFormat {
default:
return fmt.Errorf("unknown output format %q requested", options.OutputFormat)
@ -359,24 +359,13 @@ func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.Bu
})
}
// Decide where the final result will be stored.
var (
manifestListBuilder listBuilder
err error
)
listBuilderOptions := listBuilderOptions{
cleanup: options.Cleanup,
iidFile: options.IIDFile,
}
if strings.HasPrefix(reference, "dir:") || f.localEngine == nil {
location := strings.TrimPrefix(reference, "dir:")
manifestListBuilder, err = newFileManifestListBuilder(location, listBuilderOptions)
if err != nil {
return fmt.Errorf("preparing to build list: %w", err)
}
} else {
manifestListBuilder = newLocalManifestListBuilder(reference, f.localEngine, listBuilderOptions)
cleanup: options.Cleanup,
iidFile: options.IIDFile,
authfile: options.Authfile,
skipTLSVerify: options.SkipTLSVerify,
}
manifestListBuilder := newManifestListBuilder(reference, f.localEngine, listBuilderOptions)
// Start builds in parallel and wait for them all to finish.
var (

View File

@ -3,31 +3,21 @@ package farm
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
lmanifests "github.com/containers/common/libimage/manifests"
"github.com/containers/common/pkg/supplemented"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/hashicorp/go-multierror"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
type listBuilder interface {
build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error)
}
type listBuilderOptions struct {
cleanup bool
iidFile string
cleanup bool
iidFile string
authfile string
skipTLSVerify bool
}
type listLocal struct {
@ -38,7 +28,7 @@ type listLocal struct {
// newLocalManifestListBuilder returns a manifest list builder which saves a
// manifest list and images to local storage.
func newLocalManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) listBuilder {
func newManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) *listLocal {
return &listLocal{
listName: listName,
options: options,
@ -49,47 +39,42 @@ func newLocalManifestListBuilder(listName string, localEngine entities.ImageEngi
// Build retrieves images from the build reports and assembles them into a
// manifest list in local container storage.
func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
manifest := l.listName
exists, err := l.localEngine.ManifestExists(ctx, l.listName)
if err != nil {
return "", err
}
// Create list if it doesn't exist
if !exists.Value {
manifest, err = l.localEngine.ManifestCreate(ctx, l.listName, []string{}, entities.ManifestCreateOptions{})
_, err = l.localEngine.ManifestCreate(ctx, l.listName, []string{}, entities.ManifestCreateOptions{SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return "", fmt.Errorf("creating manifest list %q: %w", l.listName, err)
}
}
// Pull the images into local storage
// Push the images to the registry given by the user
var (
pullGroup multierror.Group
pushGroup multierror.Group
refsMutex sync.Mutex
)
refs := []string{}
for image, engine := range images {
image, engine := image, engine
pullOptions := entities.PullToLocalOptions{
ImageID: image.ID,
SaveFormat: image.SaveFormat,
Destination: l.localEngine,
}
pullGroup.Go(func() error {
logrus.Infof("copying image %s", image.ID)
defer logrus.Infof("copied image %s", image.ID)
ref, err := engine.PullToLocal(ctx, pullOptions)
pushGroup.Go(func() error {
logrus.Infof("pushing image %s", image.ID)
defer logrus.Infof("pushed image %s", image.ID)
// Push the image to the registry
report, err := engine.Push(ctx, image.ID, l.listName+docker.UnknownDigestSuffix, entities.ImagePushOptions{Authfile: l.options.authfile, Quiet: false, SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return fmt.Errorf("pulling image %q to local storage: %w", image, err)
return fmt.Errorf("pushing image %q to registry: %w", image, err)
}
refsMutex.Lock()
defer refsMutex.Unlock()
refs = append(refs, ref)
refs = append(refs, "docker://"+l.listName+"@"+report.ManifestDigest)
return nil
})
}
pullErrors := pullGroup.Wait()
err = pullErrors.ErrorOrNil()
pushErrors := pushGroup.Wait()
err = pushErrors.ErrorOrNil()
if err != nil {
return "", fmt.Errorf("building: %w", err)
}
@ -119,17 +104,21 @@ func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]e
// Clear the list in the event it already existed
if exists.Value {
_, err = l.localEngine.ManifestListClear(ctx, manifest)
_, err = l.localEngine.ManifestListClear(ctx, l.listName)
if err != nil {
return "", fmt.Errorf("error clearing list %q", manifest)
return "", fmt.Errorf("error clearing list %q", l.listName)
}
}
// Add the images to the list
listID, err := l.localEngine.ManifestAdd(ctx, manifest, refs, entities.ManifestAddOptions{})
listID, err := l.localEngine.ManifestAdd(ctx, l.listName, refs, entities.ManifestAddOptions{Authfile: l.options.authfile, SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return "", fmt.Errorf("adding images %q to list: %w", refs, err)
}
_, err = l.localEngine.ManifestPush(ctx, l.listName, l.listName, entities.ImagePushOptions{Authfile: l.options.authfile, SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return "", err
}
// Write the manifest list's ID file if we're expected to
if l.options.iidFile != "" {
@ -140,158 +129,3 @@ func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]e
return l.listName, nil
}
type listFiles struct {
directory string
options listBuilderOptions
}
// newFileManifestListBuilder returns a manifest list builder which saves a manifest
// list and images to a specified directory in the non-standard dir: format.
func newFileManifestListBuilder(directory string, options listBuilderOptions) (listBuilder, error) {
if options.iidFile != "" {
return nil, fmt.Errorf("saving to dir: format doesn't use image IDs, --iidfile not supported")
}
return &listFiles{directory: directory, options: options}, nil
}
// Build retrieves images from the build reports and assembles them into a
// manifest list in the configured directory.
func (m *listFiles) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
listFormat := v1.MediaTypeImageIndex
imageFormat := v1.MediaTypeImageManifest
tempDir, err := os.MkdirTemp("", "")
if err != nil {
return "", err
}
defer os.RemoveAll(tempDir)
name := fmt.Sprintf("dir:%s", tempDir)
tempRef, err := alltransports.ParseImageName(name)
if err != nil {
return "", fmt.Errorf("parsing temporary image ref %q: %w", name, err)
}
if err := os.MkdirAll(m.directory, 0o755); err != nil {
return "", err
}
output, err := alltransports.ParseImageName("dir:" + m.directory)
if err != nil {
return "", fmt.Errorf("parsing output directory ref %q: %w", "dir:"+m.directory, err)
}
// Pull the images into the temporary directory
var (
pullGroup multierror.Group
pullErrors *multierror.Error
refsMutex sync.Mutex
)
refs := make(map[entities.BuildReport]types.ImageReference)
for image, engine := range images {
image, engine := image, engine
tempFile, err := os.CreateTemp(tempDir, "archive-*.tar")
if err != nil {
defer func() {
pullErrors = pullGroup.Wait()
}()
perr := pullErrors.ErrorOrNil()
if perr != nil {
return "", perr
}
return "", err
}
defer tempFile.Close()
pullGroup.Go(func() error {
logrus.Infof("copying image %s", image.ID)
defer logrus.Infof("copied image %s", image.ID)
pullOptions := entities.PullToFileOptions{
ImageID: image.ID,
SaveFormat: image.SaveFormat,
SaveFile: tempFile.Name(),
}
if image.SaveFormat == manifest.DockerV2Schema2MediaType {
listFormat = manifest.DockerV2ListMediaType
imageFormat = manifest.DockerV2Schema2MediaType
}
reference, err := engine.PullToFile(ctx, pullOptions)
if err != nil {
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
}
ref, err := alltransports.ParseImageName(reference)
if err != nil {
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
}
refsMutex.Lock()
defer refsMutex.Unlock()
refs[image] = ref
return nil
})
}
pullErrors = pullGroup.Wait()
err = pullErrors.ErrorOrNil()
if err != nil {
return "", fmt.Errorf("building: %w", err)
}
if m.options.cleanup {
var rmGroup multierror.Group
for image, engine := range images {
image, engine := image, engine
rmGroup.Go(func() error {
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
if len(err) > 0 {
return err[0]
}
return nil
})
}
rmErrors := rmGroup.Wait()
if rmErrors != nil {
if err = rmErrors.ErrorOrNil(); err != nil {
return "", fmt.Errorf("removing intermediate images: %w", err)
}
}
}
supplemental := []types.ImageReference{}
var sys types.SystemContext
// Create a manifest list
list := lmanifests.Create()
// Add the images to the list
for image, ref := range refs {
if _, err = list.Add(ctx, &sys, ref, true); err != nil {
return "", fmt.Errorf("adding image %q to list: %w", image.ID, err)
}
supplemental = append(supplemental, ref)
}
// Save the list to the temporary directory to be the main manifest
listBytes, err := list.Serialize(listFormat)
if err != nil {
return "", fmt.Errorf("serializing manifest list: %w", err)
}
if err = os.WriteFile(filepath.Join(tempDir, "manifest.json"), listBytes, fs.FileMode(0o600)); err != nil {
return "", fmt.Errorf("writing temporary manifest list: %w", err)
}
// Now copy everything to the final dir: location
defaultPolicy, err := signature.DefaultPolicy(&sys)
if err != nil {
return "", err
}
policyContext, err := signature.NewPolicyContext(defaultPolicy)
if err != nil {
return "", err
}
input := supplemented.Reference(tempRef, supplemental, cp.CopyAllImages, nil)
copyOptions := cp.Options{
ForceManifestMIMEType: imageFormat,
ImageListSelection: cp.CopyAllImages,
}
_, err = cp.Image(ctx, policyContext, output, input, &copyOptions)
if err != nil {
return "", fmt.Errorf("copying images to dir:%q: %w", m.directory, err)
}
return "dir:" + m.directory, nil
}

View File

@ -5,7 +5,6 @@
load helpers.bash
@test "farm - check farm has been created" {
run_podman farm ls
assert "$output" =~ $FARMNAME
@ -17,18 +16,20 @@ load helpers.bash
empty_farm="empty-farm"
# create an empty farm
run_podman farm create $empty_farm
run_podman farm build --farm $empty_farm -t $iname $PODMAN_TMPDIR
run_podman farm build --farm $empty_farm --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Local builder ready"
# get the system architecture
run_podman info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in local containers-storage
# FIXME: use --format?
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
run_podman images -a
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
# FIXME-someday: why do we need the prune?
run_podman manifest rm $iname
@ -37,18 +38,19 @@ load helpers.bash
@test "farm - build on farm node only with --cleanup" {
iname="test-image-2"
run_podman farm build --cleanup --local=false -t $iname $PODMAN_TMPDIR
run_podman farm build --cleanup --local=false --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Farm \"$FARMNAME\" ready"
# get the system architecture
run_podman info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in dir
# FIXME FIXME FIXME! #20505: do not write anything under cwd
ls -l $iname
# inspect manifest list built and saved in local containers-storage
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
# FIXME FIXME FIXME FIXME! NEVER WRITE INTO PWD!
manifestarch=$(jq -r '.manifests[].platform.architecture' <$iname/manifest.json)
assert "$manifestarch" = "$ARCH" "arch from $iname/manifest.json"
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
# see if we can ssh into node to check the image was cleaned up
run ssh $ROOTLESS_USER@localhost podman images --filter dangling=true --noheading
@ -58,21 +60,27 @@ load helpers.bash
run_podman images --filter dangling=true --noheading
assert "$output" = "" "podman images on local host"
run_podman manifest rm $iname
run_podman image prune -f
}
@test "farm - build on farm node and local" {
iname="test-image-3"
run_podman farm build -t $iname $PODMAN_TMPDIR
run_podman farm build --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Farm \"$FARMNAME\" ready"
# get the system architecture
run_podman info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in dir
# inspect manifest list built and saved
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
run_podman manifest rm $iname
run_podman image prune -f
}
@ -81,15 +89,21 @@ load helpers.bash
@test "farm - build on farm node only (podman-remote)" {
iname="test-image-4"
run_podman --remote farm build -t $iname $PODMAN_TMPDIR
run_podman --remote farm build --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Farm \"$FARMNAME\" ready"
# get the system architecture
run_podman --remote info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in dir
manifestarch=$(jq -r '.manifests[].platform.architecture' <$iname/manifest.json)
assert "$manifestarch" = "$ARCH" "arch from $iname/manifest.json"
# inspect manifest list built and saved
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
run_podman manifest rm $iname
run_podman image prune -f
}

View File

@ -2,11 +2,13 @@
load ../system/helpers.bash
export FARM_TMPDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-/tmp} podman_bats.XXXXXX)
function setup(){
basic_setup
# Always create the same containerfile
cat >$PODMAN_TMPDIR/Containerfile <<EOF
cat >$FARM_TMPDIR/Containerfile <<EOF
FROM $IMAGE
RUN arch | tee /arch.txt
RUN date | tee /built.txt

View File

@ -3,6 +3,9 @@
bats_require_minimum_version 1.8.0
load helpers
load ../system/helpers
load ../system/helpers.registry
load ../system/helpers.network
function setup_suite(){
if [[ -z "$ROOTLESS_USER" ]]; then
@ -32,9 +35,29 @@ function setup_suite(){
# only set up the podman farm before the first test
run_podman system connection add --identity $sshkey test-node $ROOTLESS_USER@localhost
run_podman farm create $FARMNAME test-node
export PODMAN_LOGIN_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} podman-bats-registry.XXXXXX)
export PODMAN_LOGIN_USER="user$(random_string 4)"
export PODMAN_LOGIN_PASS="pw$(random_string 15)"
# FIXME: racy! It could be many minutes between now and when we start it.
# To mitigate, we use a range not used anywhere else in system tests.
export PODMAN_LOGIN_REGISTRY_PORT=$(random_free_port 42000-42999)
# create a local registry to push images to
export REGISTRY=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
export AUTHFILE=$FARM_TMPDIR/authfile.json
start_registry
run_podman login --authfile=$AUTHFILE \
--tls-verify=false \
--username ${PODMAN_LOGIN_USER} \
--password ${PODMAN_LOGIN_PASS} \
$REGISTRY
}
function teardown_suite(){
# clear out the farms after the last farm test
run_podman farm rm --all
stop_registry
}