diff --git a/builder/dispatchers.go b/builder/dispatchers.go new file mode 100644 index 0000000000..8abdb51d8a --- /dev/null +++ b/builder/dispatchers.go @@ -0,0 +1,324 @@ +package builder + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/runconfig" +) + +// dispatch with no layer / parsing. This is effectively not a command. +func nullDispatch(b *Builder, args []string, attributes map[string]bool) error { + return nil +} + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 2 { + return fmt.Errorf("ENV accepts two arguments") + } + + fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) + + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if args[0] == envParts[0] { + b.Config.Env[i] = fullEnv + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + } + } + b.Config.Env = append(b.Config.Env, fullEnv) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 1 { + return fmt.Errorf("MAINTAINER requires only one argument") + } + + b.maintainer = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 2 { + return fmt.Errorf("ADD requires two arguments") + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 2 { + return fmt.Errorf("COPY requires two arguments") + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 1 { + return fmt.Errorf("FROM requires one argument") + } + + name := args[0] + + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + if b.Daemon.Graph().IsNotExist(err) { + image, err = b.pullImage(name) + } + + // note that the top level err will still be !nil here if IsNotExist is + // not the error. This approach just simplifies hte logic a bit. + if err != nil { + return err + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool) error { + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + trigger := strings.Join(args, " ") + + b.Config.OnBuild = append(b.Config.OnBuild, trigger) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 1 { + return fmt.Errorf("WORKDIR requires exactly one argument") + } + + workdir := args[0] + + if workdir[0] == '/' { + b.Config.WorkingDir = workdir + } else { + if b.Config.WorkingDir == "" { + b.Config.WorkingDir = "/" + } + b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir) + } + + return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' in the event there is only one argument. The difference in +// processing: +// +// RUN echo hi # sh -c echo hi +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool) error { + args = handleJsonArgs(args, attributes) + + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + config, _, _, err := runconfig.Parse(append([]string{b.image}, args...), nil) + if err != nil { + return err + } + + cmd := b.Config.Cmd + // set Cmd manually, this is special case only for Dockerfiles + b.Config.Cmd = config.Cmd + runconfig.Merge(b.Config, config) + + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + log.Debugf("Command to be executed: %v", b.Config.Cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + c, err := b.create() + if err != nil { + return err + } + + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { + return err + } + + return nil +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool) error { + b.Config.Cmd = handleJsonArgs(args, attributes) + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { + return err + } + + b.cmdSet = true + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will +// accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool) error { + b.Config.Entrypoint = handleJsonArgs(args, attributes) + + // if there is no cmd in current Dockerfile - cleanup cmd + if !b.cmdSet { + b.Config.Cmd = nil + } + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil { + return err + } + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.Config.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool) error { + portsTab := args + + if b.Config.ExposedPorts == nil { + b.Config.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...)) + if err != nil { + return err + } + + for port := range ports { + if _, exists := b.Config.ExposedPorts[port]; !exists { + b.Config.ExposedPorts[port] = struct{}{} + } + } + b.Config.PortSpecs = nil + + return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 1 { + return fmt.Errorf("USER requires exactly one argument") + } + + b.Config.User = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON form, but either +// way requires exactly one argument. +// +func volume(b *Builder, args []string, attributes map[string]bool) error { + if len(args) != 1 { + return fmt.Errorf("Volume cannot be empty") + } + + volume := args + + if b.Config.Volumes == nil { + b.Config.Volumes = map[string]struct{}{} + } + for _, v := range volume { + b.Config.Volumes[v] = struct{}{} + } + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { + return err + } + return nil +} + +// INSERT is no longer accepted, but we still parse it. +func insert(b *Builder, args []string, attributes map[string]bool) error { + return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") +} diff --git a/builder/evaluator.go b/builder/evaluator.go new file mode 100644 index 0000000000..33d8f080e0 --- /dev/null +++ b/builder/evaluator.go @@ -0,0 +1,213 @@ +// builder is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of resposibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package builder + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/docker/docker/builder/parser" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +var ( + ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") +) + +var evaluateTable map[string]func(*Builder, []string, map[string]bool) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool) error{ + "env": env, + "maintainer": maintainer, + "add": add, + "copy": dispatchCopy, // copy() is a go builtin + "from": from, + "onbuild": onbuild, + "workdir": workdir, + "docker-version": nullDispatch, // we don't care about docker-version + "run": run, + "cmd": cmd, + "entrypoint": entrypoint, + "expose": expose, + "volume": volume, + "user": user, + "insert": insert, + } +} + +// internal struct, used to maintain configuration of the Dockerfile's +// processing as it evaluates the parsing result. +type Builder struct { + Daemon *daemon.Daemon + Engine *engine.Engine + + // effectively stdio for the run. Because it is not stdio, I said + // "Effectively". Do not use stdio anywhere in this package for any reason. + OutStream io.Writer + ErrStream io.Writer + + Verbose bool + UtilizeCache bool + + // controls how images and containers are handled between steps. + Remove bool + ForceRemove bool + + AuthConfig *registry.AuthConfig + AuthConfigFile *registry.ConfigFile + + // Deprecated, original writer used for ImagePull. To be removed. + OutOld io.Writer + StreamFormatter *utils.StreamFormatter + + Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + + // both of these are controlled by the Remove and ForceRemove options in BuildOpts + TmpContainers map[string]struct{} // a map of containers used for removes + + dockerfile *parser.Node // the syntax tree of the dockerfile + image string // image name for commit processing + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + context *tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) + +} + +// Run the builder with the context. This is the lynchpin of this package. This +// will (barring errors): +// +// * call readContext() which will set up the temporary directory and unpack +// the context into it. +// * read the dockerfile +// * parse the dockerfile +// * walk the parse tree and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Print a happy message and return the image ID. +// +func (b *Builder) Run(context io.Reader) (string, error) { + if err := b.readContext(context); err != nil { + return "", err + } + + filename := path.Join(b.contextPath, "Dockerfile") + + fi, err := os.Stat(filename) + if os.IsNotExist(err) { + return "", fmt.Errorf("Cannot build a directory without a Dockerfile") + } + if fi.Size() == 0 { + return "", ErrDockerfileEmpty + } + + f, err := os.Open(filename) + if err != nil { + return "", err + } + + defer f.Close() + + ast, err := parser.Parse(f) + if err != nil { + return "", err + } + + b.dockerfile = ast + + // some initializations that would not have been supplied by the caller. + b.Config = &runconfig.Config{} + b.TmpContainers = map[string]struct{}{} + + for i, n := range b.dockerfile.Children { + if err := b.dispatch(i, n); err != nil { + if b.ForceRemove { + b.clearTmp() + } + return "", err + } + fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image)) + if b.Remove { + b.clearTmp() + } + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n") + } + + fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) + return b.image, nil +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + attrs := ast.Attributes + strs := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) + + if cmd == "onbuild" { + ast = ast.Next.Children[0] + strs = append(strs, b.replaceEnv(ast.Value)) + msg += " " + ast.Value + } + + for ast.Next != nil { + ast = ast.Next + strs = append(strs, b.replaceEnv(ast.Value)) + msg += " " + ast.Value + } + + fmt.Fprintln(b.OutStream, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + return f(b, strs, attrs) + } + + fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd)) + + return nil +} diff --git a/builder/internals.go b/builder/internals.go new file mode 100644 index 0000000000..1767d7d9dd --- /dev/null +++ b/builder/internals.go @@ -0,0 +1,563 @@ +package builder + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon" + imagepkg "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (b *Builder) readContext(context io.Reader) error { + tmpdirPath, err := ioutil.TempDir("", "docker-build") + if err != nil { + return err + } + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return err + } + + b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true} + if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { + return err + } + + b.contextPath = tmpdirPath + return nil +} + +func (b *Builder) commit(id string, autoCmd []string, comment string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.Config.Image = b.image + if id == "" { + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + container, warnings, err := b.Daemon.Create(b.Config, "") + if err != nil { + return err + } + for _, warning := range warnings { + fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) + } + b.TmpContainers[container.ID] = struct{}{} + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) + id = container.ID + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + } + container := b.Daemon.Get(id) + if container == nil { + return fmt.Errorf("An error occured while creating the container") + } + + // Note: Actually copy the struct + autoConfig := *b.Config + autoConfig.Cmd = autoCmd + // Commit the container + image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + if err != nil { + return err + } + b.image = image.ID + return nil +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) != 2 { + return fmt.Errorf("Invalid %s format", cmdName) + } + + orig := args[0] + dest := args[1] + + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + b.Config.Image = b.image + + var ( + origPath = orig + destPath = dest + remoteHash string + isRemote bool + decompress = true + ) + + isRemote = utils.IsURL(orig) + if isRemote && !allowRemote { + return fmt.Errorf("Source can't be an URL for %s", cmdName) + } else if utils.IsURL(orig) { + // Initiate the download + resp, err := utils.Download(orig) + if err != nil { + return err + } + + // Create a tmp dir + tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") + if err != nil { + return err + } + + // Create a tmp file within our tmp dir + tmpFileName := path.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer os.RemoveAll(tmpDirName) + + // Download and dump result to tmp file + if _, err := io.Copy(tmpFile, resp.Body); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + // Remove the mtime of the newly created tmp file + if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + return err + } + + origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + + // Process the checksum + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true} + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + remoteHash = tarSum.Sum(nil) + r.Close() + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(dest, "/") { + u, err := url.Parse(orig) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + parts := strings.Split(path, "/") + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + destPath = dest + filename + } + } + + if err := b.checkPathForAddition(origPath); err != nil { + return err + } + + // Hash path and check the cache + if b.UtilizeCache { + var ( + hash string + sums = b.context.GetSums() + ) + + if remoteHash != "" { + hash = remoteHash + } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { + return err + } else if fi.IsDir() { + var subfiles []string + for file, sum := range sums { + absFile := path.Join(b.contextPath, file) + absOrigPath := path.Join(b.contextPath, origPath) + if strings.HasPrefix(absFile, absOrigPath) { + subfiles = append(subfiles, sum) + } + } + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + } else { + if origPath[0] == '/' && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "./") + if h, ok := sums[origPath]; ok { + hash = "file:" + h + } + } + b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} + hit, err := b.probeCache() + if err != nil { + return err + } + // If we do not have a hash, never use the cache + if hit && hash != "" { + return nil + } + } + + // Create the container + container, _, err := b.Daemon.Create(b.Config, "") + if err != nil { + return err + } + b.TmpContainers[container.ID] = struct{}{} + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + + if !allowDecompression || isRemote { + decompress = false + } + if err := b.addContext(container, origPath, destPath, decompress); err != nil { + return err + } + + if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { + return err + } + return nil +} + +func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { + remote, tag := parsers.ParseRepositoryTag(name) + pullRegistryAuth := b.AuthConfig + if len(b.AuthConfigFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return nil, err + } + resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } + job := b.Engine.Job("pull", remote, tag) + job.SetenvBool("json", b.StreamFormatter.Json()) + job.SetenvBool("parallel", true) + job.SetenvJson("authConfig", pullRegistryAuth) + job.Stdout.Add(b.OutOld) + if err := job.Run(); err != nil { + return nil, err + } + image, err := b.Daemon.Repositories().LookupImage(name) + if err != nil { + return nil, err + } + + return image, nil +} + +func (b *Builder) processImageFrom(img *imagepkg.Image) error { + b.image = img.ID + + if img.Config != nil { + b.Config = img.Config + } + + if len(b.Config.Env) == 0 { + b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. + onBuildTriggers := b.Config.OnBuild + b.Config.OnBuild = []string{} + + // FIXME rewrite this so that builder/parser is used; right now steps in + // onbuild are muted because we have no good way to represent the step + // number + for _, step := range onBuildTriggers { + splitStep := strings.Split(step, " ") + stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) + switch stepInstruction { + case "ONBUILD": + return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) + case "MAINTAINER", "FROM": + return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) + } + + // FIXME we have to run the evaluator manually here. This does not belong + // in this function. Once removed, the init() in evaluator.go should no + // longer be necessary. + + if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok { + if err := f(b, splitStep[1:], nil); err != nil { + return err + } + } else { + return fmt.Errorf("%s doesn't appear to be a valid Dockerfile instruction", splitStep[0]) + } + } + + return nil +} + +// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) +// and if so attempts to look up the current `b.image` and `b.Config` pair +// in the current server `b.Daemon`. If an image is found, probeCache returns +// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there +// is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + if b.UtilizeCache { + if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil { + return false, err + } else if cache != nil { + fmt.Fprintf(b.OutStream, " ---> Using cache\n") + log.Debugf("[BUILDER] Use cached version") + b.image = cache.ID + return true, nil + } else { + log.Debugf("[BUILDER] Cache miss") + } + } + return false, nil +} + +func (b *Builder) create() (*daemon.Container, error) { + if b.image == "" { + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.Config.Image = b.image + + // Create the container + c, _, err := b.Daemon.Create(b.Config, "") + if err != nil { + return nil, err + } + + b.TmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + c.Path = b.Config.Cmd[0] + c.Args = b.Config.Cmd[1:] + + return c, nil +} + +func (b *Builder) run(c *daemon.Container) error { + var errCh chan error + if b.Verbose { + errCh = utils.Go(func() error { + // FIXME: call the 'attach' job so that daemon.Attach can be made private + // + // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach + // but without hijacking for stdin. Also, with attach there can be race + // condition because of some output already was printed before it. + return <-b.Daemon.Attach(c, nil, nil, b.OutStream, b.ErrStream) + }) + } + + //start the container + if err := c.Start(); err != nil { + return err + } + + if errCh != nil { + if err := <-errCh; err != nil { + return err + } + } + + // Wait for it to finish + if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 { + err := &utils.JSONError{ + Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret), + Code: ret, + } + return err + } + + return nil +} + +func (b *Builder) checkPathForAddition(orig string) error { + origPath := path.Join(b.contextPath, orig) + origPath, err := filepath.EvalSymlinks(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + if !strings.HasPrefix(origPath, b.contextPath) { + return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) + } + if _, err := os.Stat(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} + +func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { + var ( + err error + destExists = true + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) + ) + + if destPath != container.RootfsPath() { + destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) + if err != nil { + return err + } + } + + // Preserve the trailing '/' + if strings.HasSuffix(dest, "/") || dest == "." { + destPath = destPath + "/" + } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + destExists = false + } + + fi, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + + if fi.IsDir() { + return copyAsDirectory(origPath, destPath, destExists) + } + + // If we are adding a remote file (or we've been told not to decompress), do not try to untar it + if decompress { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + if err := archive.UntarPath(origPath, tarDest); err == nil { + return nil + } else if err != io.EOF { + log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + } + } + + if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { + return err + } + if err := archive.CopyWithTar(origPath, destPath); err != nil { + return err + } + + resPath := destPath + if destExists && destStat.IsDir() { + resPath = path.Join(destPath, path.Base(origPath)) + } + + return fixPermissions(resPath, 0, 0) +} + +func copyAsDirectory(source, destination string, destinationExists bool) error { + if err := archive.CopyWithTar(source, destination); err != nil { + return err + } + + if destinationExists { + files, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + for _, file := range files { + if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { + return err + } + } + return nil + } + + return fixPermissions(destination, 0, 0) +} + +func fixPermissions(destination string, uid, gid int) error { + return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { + return err + } + return nil + }) +} + +func (b *Builder) clearTmp() { + for c := range b.TmpContainers { + tmp := b.Daemon.Get(c) + if err := b.Daemon.Destroy(tmp); err != nil { + fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + } else { + delete(b.TmpContainers, c) + fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + } + } +} diff --git a/builder/job.go b/builder/job.go new file mode 100644 index 0000000000..1aa2c3b6b9 --- /dev/null +++ b/builder/job.go @@ -0,0 +1,118 @@ +package builder + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type BuilderJob struct { + Engine *engine.Engine + Daemon *daemon.Daemon +} + +func (b *BuilderJob) Install() { + b.Engine.Register("build", b.CmdBuild) +} + +func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} + tag string + context io.ReadCloser + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + repoName, tag = parsers.ParseRepositoryTag(repoName) + + if remoteURL == "" { + context = ioutil.NopCloser(job.Stdin) + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := archive.Generate("Dockerfile", string(dockerFile)) + if err != nil { + return job.Error(err) + } + context = c + } + defer context.Close() + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + + builder := &Builder{ + Daemon: b.Daemon, + Engine: b.Engine, + OutStream: &utils.StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + ErrStream: &utils.StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + Verbose: !suppressOutput, + UtilizeCache: !noCache, + Remove: rm, + ForceRemove: forceRm, + OutOld: job.Stdout, + StreamFormatter: sf, + AuthConfig: authConfig, + AuthConfigFile: configFile, + } + + id, err := builder.Run(context) + if err != nil { + return job.Error(err) + } + + if repoName != "" { + b.Daemon.Repositories().Set(repoName, tag, id, false) + } + return engine.StatusOK +} diff --git a/builder/parser/dumper/main.go b/builder/parser/dumper/main.go new file mode 100644 index 0000000000..aea7ee74cb --- /dev/null +++ b/builder/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + + ast, err := parser.Parse(f) + if err != nil { + panic(err) + } else { + fmt.Print(ast.Dump()) + } + } +} diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go new file mode 100644 index 0000000000..93fa23ee85 --- /dev/null +++ b/builder/parser/line_parsers.go @@ -0,0 +1,131 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "strconv" + "strings" +) + +var ( + errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + _, child, err := parseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseEnv(rest string) (*Node, map[string]bool, error) { + node := &Node{} + rootnode := node + strs := TOKEN_WHITESPACE.Split(rest, 2) + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + node := &Node{} + rootnode := node + prevnode := node + for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + var ( + myJson []interface{} + next = &Node{} + orignext = next + prevnode = next + ) + + if err := json.Unmarshal([]byte(rest), &myJson); err != nil { + return nil, nil, err + } + + for _, str := range myJson { + switch str.(type) { + case string: + case float64: + str = strconv.FormatFloat(str.(float64), 'G', -1, 64) + default: + return nil, nil, errDockerfileJSONNesting + } + next.Value = str.(string) + next.Next = &Node{} + prevnode = next + next = next.Next + } + + prevnode.Next = nil + + return orignext, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimSpace(rest) + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} diff --git a/builder/parser/parser.go b/builder/parser/parser.go new file mode 100644 index 0000000000..8315412bd7 --- /dev/null +++ b/builder/parser/parser.go @@ -0,0 +1,127 @@ +// This package implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) + TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`) + TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // recieves the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propogated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + "user": parseString, + "onbuild": parseSubCommand, + "workdir": parseString, + "env": parseEnv, + "maintainer": parseString, + "docker-version": parseString, + "from": parseString, + "add": parseStringsWhitespaceDelimited, + "copy": parseStringsWhitespaceDelimited, + "run": parseMaybeJSON, + "cmd": parseMaybeJSON, + "entrypoint": parseMaybeJSON, + "expose": parseStringsWhitespaceDelimited, + "volume": parseMaybeJSON, + "insert": parseIgnore, + } +} + +// parse a line and return the remainder. +func parseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if TOKEN_LINE_CONTINUATION.MatchString(line) { + line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, args := splitCommand(line) + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + + return "", node, nil +} + +// The main parse routine. Handles an io.ReadWriteCloser and returns the root +// of the AST. +func Parse(rwc io.Reader) (*Node, error) { + root := &Node{} + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + line, child, err := parseLine(strings.TrimSpace(scanner.Text())) + if err != nil { + return nil, err + } + + if line != "" && child == nil { + for scanner.Scan() { + newline := strings.TrimSpace(scanner.Text()) + + if newline == "" { + continue + } + + line, child, err = parseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + } + + if child != nil { + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/builder/parser/parser_test.go b/builder/parser/parser_test.go new file mode 100644 index 0000000000..871da477c1 --- /dev/null +++ b/builder/parser/parser_test.go @@ -0,0 +1,79 @@ +package parser + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" + +func getDirs(t *testing.T, dir string) []os.FileInfo { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdir(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + _, err = Parse(df) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir.Name()) + } + + df.Close() + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile") + resultfile := filepath.Join(testDir, dir.Name(), "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + rf, err := os.Open(resultfile) + if err != nil { + t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error()) + } + + ast, err := Parse(df) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error()) + } + + content, err := ioutil.ReadAll(rf) + if err != nil { + t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error()) + } + + if ast.Dump() != string(content) { + t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name()) + } + + df.Close() + rf.Close() + } +} diff --git a/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000000..d1be4596c7 --- /dev/null +++ b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/builder/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000000..5c75a2e0ca --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,25 @@ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-consuldock/result b/builder/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000000..b6ef4f817b --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") \ No newline at end of file diff --git a/builder/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000000..25ae352166 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-docker-consul/result b/builder/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000000..e7fee03985 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd "") +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/hashicorp/consul && mv $GOPATH/bin/consul /usr/bin/consul && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") \ No newline at end of file diff --git a/builder/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000000..8ccb71a578 --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/parser/testfiles/cpuguy83-nagios/result b/builder/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000000..b95e96b153 --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") \ No newline at end of file diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000000..68f8f0b78b --- /dev/null +++ b/builder/parser/testfiles/docker/Dockerfile @@ -0,0 +1,105 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: Apparmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +docker-version 0.6.1 +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get code.google.com/p/go.tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result new file mode 100644 index 0000000000..a7960244b3 --- /dev/null +++ b/builder/parser/testfiles/docker/result @@ -0,0 +1,25 @@ +(docker-version "0.6.1") +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get code.google.com/p/go.tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") \ No newline at end of file diff --git a/builder/parser/testfiles/escapes/Dockerfile b/builder/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000000..87a8e028a2 --- /dev/null +++ b/builder/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,8 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/escapes/result b/builder/parser/testfiles/escapes/result new file mode 100644 index 0000000000..724c399c21 --- /dev/null +++ b/builder/parser/testfiles/escapes/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(cmd "/usr\\\"/bin/znc" "-f" "-r") \ No newline at end of file diff --git a/builder/parser/testfiles/influxdb/Dockerfile b/builder/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000000..587fb9b54b --- /dev/null +++ b/builder/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/builder/parser/testfiles/influxdb/result b/builder/parser/testfiles/influxdb/result new file mode 100644 index 0000000000..f0d45a4e27 --- /dev/null +++ b/builder/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000000..39fe27d99c --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000000..bfd84ae489 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000000..eaae081a06 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000000..f8f7b5017b --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000000..c3ac63c07a --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000000..0623f8bf45 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000000..5fd4afa522 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000000..d621ddcff3 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000000..30cc4bb48f --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000000..acedd80c45 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") \ No newline at end of file diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000000..35f9c24aa6 --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/result b/builder/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000000..375257a49e --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") \ No newline at end of file diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000000..188395fe83 --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000000..920ed544b5 --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") \ No newline at end of file diff --git a/builder/parser/testfiles/mail/Dockerfile b/builder/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000000..f64c1168c1 --- /dev/null +++ b/builder/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/builder/parser/testfiles/mail/result b/builder/parser/testfiles/mail/result new file mode 100644 index 0000000000..2d9c30db9c --- /dev/null +++ b/builder/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") \ No newline at end of file diff --git a/builder/parser/testfiles/mumble/Dockerfile b/builder/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000000..5b9ec06a6c --- /dev/null +++ b/builder/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/builder/parser/testfiles/mumble/result b/builder/parser/testfiles/mumble/result new file mode 100644 index 0000000000..123e893dc5 --- /dev/null +++ b/builder/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") \ No newline at end of file diff --git a/builder/parser/testfiles/nginx/Dockerfile b/builder/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000000..bf8368e1ca --- /dev/null +++ b/builder/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/builder/parser/testfiles/nginx/result b/builder/parser/testfiles/nginx/result new file mode 100644 index 0000000000..5ac8c77c2f --- /dev/null +++ b/builder/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") \ No newline at end of file diff --git a/builder/parser/testfiles/tf2/Dockerfile b/builder/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000000..72b79bdd7d --- /dev/null +++ b/builder/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/parser/testfiles/tf2/result b/builder/parser/testfiles/tf2/result new file mode 100644 index 0000000000..5ec173f67f --- /dev/null +++ b/builder/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") \ No newline at end of file diff --git a/builder/parser/testfiles/weechat/Dockerfile b/builder/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000000..4842088166 --- /dev/null +++ b/builder/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/builder/parser/testfiles/weechat/result b/builder/parser/testfiles/weechat/result new file mode 100644 index 0000000000..b358645cde --- /dev/null +++ b/builder/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") \ No newline at end of file diff --git a/builder/parser/testfiles/znc/Dockerfile b/builder/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000000..3a4da6e916 --- /dev/null +++ b/builder/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/znc/result b/builder/parser/testfiles/znc/result new file mode 100644 index 0000000000..b4ddf3e653 --- /dev/null +++ b/builder/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") \ No newline at end of file diff --git a/builder/parser/utils.go b/builder/parser/utils.go new file mode 100644 index 0000000000..53cda5808b --- /dev/null +++ b/builder/parser/utils.go @@ -0,0 +1,86 @@ +package parser + +import ( + "fmt" + "strings" +) + +// QuoteString walks characters (after trimming), escapes any quotes and +// escapes, then wraps the whole thing in quotes. Very useful for generating +// argument output in nodes. +func QuoteString(str string) string { + result := "" + chars := strings.Split(strings.TrimSpace(str), "") + + for _, char := range chars { + switch char { + case `"`: + result += `\"` + case `\`: + result += `\\` + default: + result += char + } + } + + return `"` + result + `"` +} + +// dumps the AST defined by `node` as a list of sexps. Returns a string +// suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + QuoteString(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + if _, ok := dispatch[cmd]; !ok { + return nil, nil, fmt.Errorf("'%s' is not a valid dockerfile command", cmd) + } + + sexp, attrs, err := dispatch[cmd](args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, string) { + cmdline := TOKEN_WHITESPACE.Split(line, 2) + cmd := strings.ToLower(cmdline[0]) + // the cmd should never have whitespace, but it's possible for the args to + // have trailing whitespace. + return cmd, strings.TrimSpace(cmdline[1]) +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if TOKEN_COMMENT.MatchString(line) { + return TOKEN_COMMENT.ReplaceAllString(line, "") + } + + return line +} diff --git a/builder/support.go b/builder/support.go new file mode 100644 index 0000000000..de5d57b501 --- /dev/null +++ b/builder/support.go @@ -0,0 +1,37 @@ +package builder + +import ( + "regexp" + "strings" +) + +var ( + TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") +) + +// handle environment replacement. Used in dispatcher. +func (b *Builder) replaceEnv(str string) string { + for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { + match = match[strings.Index(match, "$"):] + matchKey := strings.Trim(match, "${}") + + for _, keyval := range b.Config.Env { + tmp := strings.SplitN(keyval, "=", 2) + if tmp[0] == matchKey { + str = strings.Replace(str, match, tmp[1], -1) + break + } + } + } + + return str +} + +func handleJsonArgs(args []string, attributes map[string]bool) []string { + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return append([]string{"/bin/sh", "-c", strings.Join(args, " ")}) +} diff --git a/daemon/build.go b/daemon/build.go deleted file mode 100644 index 6812139663..0000000000 --- a/daemon/build.go +++ /dev/null @@ -1,1006 +0,0 @@ -package daemon - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" - "syscall" - "time" - - "github.com/docker/docker/archive" - "github.com/docker/docker/engine" - "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/log" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" -) - -func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status { - if len(job.Args) != 0 { - return job.Errorf("Usage: %s\n", job.Name) - } - var ( - remoteURL = job.Getenv("remote") - repoName = job.Getenv("t") - suppressOutput = job.GetenvBool("q") - noCache = job.GetenvBool("nocache") - rm = job.GetenvBool("rm") - forceRm = job.GetenvBool("forcerm") - authConfig = ®istry.AuthConfig{} - configFile = ®istry.ConfigFile{} - tag string - context io.ReadCloser - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("configFile", configFile) - repoName, tag = parsers.ParseRepositoryTag(repoName) - - if remoteURL == "" { - context = ioutil.NopCloser(job.Stdin) - } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return job.Errorf("Error trying to use git: %s (%s)", err, output) - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return job.Error(err) - } - context = c - } else if utils.IsURL(remoteURL) { - f, err := utils.Download(remoteURL) - if err != nil { - return job.Error(err) - } - defer f.Body.Close() - dockerFile, err := ioutil.ReadAll(f.Body) - if err != nil { - return job.Error(err) - } - c, err := archive.Generate("Dockerfile", string(dockerFile)) - if err != nil { - return job.Error(err) - } - context = c - } - defer context.Close() - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - b := NewBuildFile(daemon, daemon.eng, - &utils.StdoutFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - &utils.StderrFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile) - id, err := b.Build(context) - if err != nil { - return job.Error(err) - } - if repoName != "" { - daemon.Repositories().Set(repoName, tag, id, false) - } - return engine.StatusOK -} - -var ( - ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") -) - -type BuildFile interface { - Build(io.Reader) (string, error) - CmdFrom(string) error - CmdRun(string) error -} - -type buildFile struct { - daemon *Daemon - eng *engine.Engine - - image string - maintainer string - config *runconfig.Config - - contextPath string - context *tarsum.TarSum - - verbose bool - utilizeCache bool - rm bool - forceRm bool - - authConfig *registry.AuthConfig - configFile *registry.ConfigFile - - tmpContainers map[string]struct{} - tmpImages map[string]struct{} - - outStream io.Writer - errStream io.Writer - - // Deprecated, original writer used for ImagePull. To be removed. - outOld io.Writer - sf *utils.StreamFormatter - - // cmdSet indicates is CMD was set in current Dockerfile - cmdSet bool -} - -func (b *buildFile) clearTmp(containers map[string]struct{}) { - for c := range containers { - tmp := b.daemon.Get(c) - if err := b.daemon.Destroy(tmp); err != nil { - fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) - } else { - delete(containers, c) - fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) - } - } -} - -func (b *buildFile) CmdFrom(name string) error { - image, err := b.daemon.Repositories().LookupImage(name) - if err != nil { - if b.daemon.Graph().IsNotExist(err) { - remote, tag := parsers.ParseRepositoryTag(name) - pullRegistryAuth := b.authConfig - if len(b.configFile.Configs) > 0 { - // The request came with a full auth config file, we prefer to use that - endpoint, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) - pullRegistryAuth = &resolvedAuth - } - job := b.eng.Job("pull", remote, tag) - job.SetenvBool("json", b.sf.Json()) - job.SetenvBool("parallel", true) - job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.outOld) - if err := job.Run(); err != nil { - return err - } - image, err = b.daemon.Repositories().LookupImage(name) - if err != nil { - return err - } - } else { - return err - } - } - b.image = image.ID - b.config = &runconfig.Config{} - if image.Config != nil { - b.config = image.Config - } - if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv) - } - // Process ONBUILD triggers if they exist - if nTriggers := len(b.config.OnBuild); nTriggers != 0 { - fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) - } - - // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. - onBuildTriggers := b.config.OnBuild - b.config.OnBuild = []string{} - - for n, step := range onBuildTriggers { - splitStep := strings.Split(step, " ") - stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) - switch stepInstruction { - case "ONBUILD": - return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) - case "MAINTAINER", "FROM": - return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) - } - if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { - return err - } - } - return nil -} - -// The ONBUILD command declares a build instruction to be executed in any future build -// using the current image as a base. -func (b *buildFile) CmdOnbuild(trigger string) error { - splitTrigger := strings.Split(trigger, " ") - triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - b.config.OnBuild = append(b.config.OnBuild, trigger) - return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) -} - -func (b *buildFile) CmdMaintainer(name string) error { - b.maintainer = name - return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) -} - -// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) -// and if so attempts to look up the current `b.image` and `b.config` pair -// in the current server `b.daemon`. If an image is found, probeCache returns -// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there -// is any error, it returns `(false, err)`. -func (b *buildFile) probeCache() (bool, error) { - if b.utilizeCache { - if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil { - return false, err - } else if cache != nil { - fmt.Fprintf(b.outStream, " ---> Using cache\n") - log.Debugf("[BUILDER] Use cached version") - b.image = cache.ID - return true, nil - } else { - log.Debugf("[BUILDER] Cache miss") - } - } - return false, nil -} - -func (b *buildFile) CmdRun(args string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) - if err != nil { - return err - } - - cmd := b.config.Cmd - // set Cmd manually, this is special case only for Dockerfiles - b.config.Cmd = config.Cmd - runconfig.Merge(b.config, config) - - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - log.Debugf("Command to be executed: %v", b.config.Cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - c, err := b.create() - if err != nil { - return err - } - // Ensure that we keep the container mounted until the commit - // to avoid unmounting and then mounting directly again - c.Mount() - defer c.Unmount() - - err = b.run(c) - if err != nil { - return err - } - if err := b.commit(c.ID, cmd, "run"); err != nil { - return err - } - - return nil -} - -func (b *buildFile) FindEnvKey(key string) int { - for k, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if key == envParts[0] { - return k - } - } - return -1 -} - -func (b *buildFile) ReplaceEnvMatches(value string) (string, error) { - exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") - if err != nil { - return value, err - } - matches := exp.FindAllString(value, -1) - for _, match := range matches { - match = match[strings.Index(match, "$"):] - matchKey := strings.Trim(match, "${}") - - for _, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - envKey := envParts[0] - envValue := envParts[1] - - if envKey == matchKey { - value = strings.Replace(value, match, envValue, -1) - break - } - } - } - return value, nil -} - -func (b *buildFile) CmdEnv(args string) error { - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid ENV format") - } - key := strings.Trim(tmp[0], " \t") - value := strings.Trim(tmp[1], " \t") - - envKey := b.FindEnvKey(key) - replacedValue, err := b.ReplaceEnvMatches(value) - if err != nil { - return err - } - replacedVar := fmt.Sprintf("%s=%s", key, replacedValue) - - if envKey >= 0 { - b.config.Env[envKey] = replacedVar - } else { - b.config.Env = append(b.config.Env, replacedVar) - } - return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar)) -} - -func (b *buildFile) buildCmdFromJson(args string) []string { - var cmd []string - if err := json.Unmarshal([]byte(args), &cmd); err != nil { - log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) - cmd = []string{"/bin/sh", "-c", args} - } - return cmd -} - -func (b *buildFile) CmdCmd(args string) error { - cmd := b.buildCmdFromJson(args) - b.config.Cmd = cmd - if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { - return err - } - b.cmdSet = true - return nil -} - -func (b *buildFile) CmdEntrypoint(args string) error { - entrypoint := b.buildCmdFromJson(args) - b.config.Entrypoint = entrypoint - // if there is no cmd in current Dockerfile - cleanup cmd - if !b.cmdSet { - b.config.Cmd = nil - } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdExpose(args string) error { - portsTab := strings.Split(args, " ") - - if b.config.ExposedPorts == nil { - b.config.ExposedPorts = make(nat.PortSet) - } - ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) - if err != nil { - return err - } - for port := range ports { - if _, exists := b.config.ExposedPorts[port]; !exists { - b.config.ExposedPorts[port] = struct{}{} - } - } - b.config.PortSpecs = nil - - return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) -} - -func (b *buildFile) CmdUser(args string) error { - b.config.User = args - return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) -} - -func (b *buildFile) CmdInsert(args string) error { - return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") -} - -func (b *buildFile) CmdCopy(args string) error { - return b.runContextCommand(args, false, false, "COPY") -} - -func (b *buildFile) CmdWorkdir(workdir string) error { - if workdir[0] == '/' { - b.config.WorkingDir = workdir - } else { - if b.config.WorkingDir == "" { - b.config.WorkingDir = "/" - } - b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir) - } - return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) -} - -func (b *buildFile) CmdVolume(args string) error { - if args == "" { - return fmt.Errorf("Volume cannot be empty") - } - - var volume []string - if err := json.Unmarshal([]byte(args), &volume); err != nil { - volume = []string{args} - } - if b.config.Volumes == nil { - b.config.Volumes = map[string]struct{}{} - } - for _, v := range volume { - b.config.Volumes[v] = struct{}{} - } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { - return err - } - return nil -} - -func (b *buildFile) checkPathForAddition(orig string) error { - origPath := path.Join(b.contextPath, orig) - origPath, err := filepath.EvalSymlinks(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - if !strings.HasPrefix(origPath, b.contextPath) { - return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) - } - if _, err := os.Stat(origPath); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - return nil -} - -func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error { - var ( - err error - destExists = true - origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) - ) - - if destPath != container.RootfsPath() { - destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) - if err != nil { - return err - } - } - - // Preserve the trailing '/' - if strings.HasSuffix(dest, "/") || dest == "." { - destPath = destPath + "/" - } - - destStat, err := os.Stat(destPath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - destExists = false - } - - fi, err := os.Stat(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - - if fi.IsDir() { - return copyAsDirectory(origPath, destPath, destExists) - } - - // If we are adding a remote file (or we've been told not to decompress), do not try to untar it - if decompress { - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in / . - tarDest := destPath - if strings.HasSuffix(tarDest, "/") { - tarDest = filepath.Dir(destPath) - } - - // try to successfully untar the orig - if err := archive.UntarPath(origPath, tarDest); err == nil { - return nil - } else if err != io.EOF { - log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) - } - } - - if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { - return err - } - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - - resPath := destPath - if destExists && destStat.IsDir() { - resPath = path.Join(destPath, path.Base(origPath)) - } - - return fixPermissions(resPath, 0, 0) -} - -func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use %s", cmdName) - } - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid %s format", cmdName) - } - - orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) - if err != nil { - return err - } - - dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) - if err != nil { - return err - } - - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - b.config.Image = b.image - - var ( - origPath = orig - destPath = dest - remoteHash string - isRemote bool - decompress = true - ) - - isRemote = utils.IsURL(orig) - if isRemote && !allowRemote { - return fmt.Errorf("Source can't be an URL for %s", cmdName) - } else if utils.IsURL(orig) { - // Initiate the download - resp, err := utils.Download(orig) - if err != nil { - return err - } - - // Create a tmp dir - tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") - if err != nil { - return err - } - - // Create a tmp file within our tmp dir - tmpFileName := path.Join(tmpDirName, "tmp") - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return err - } - defer os.RemoveAll(tmpDirName) - - // Download and dump result to tmp file - if _, err := io.Copy(tmpFile, resp.Body); err != nil { - tmpFile.Close() - return err - } - tmpFile.Close() - - // Remove the mtime of the newly created tmp file - if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { - return err - } - - origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) - - // Process the checksum - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return err - } - tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true} - if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { - return err - } - remoteHash = tarSum.Sum(nil) - r.Close() - - // If the destination is a directory, figure out the filename. - if strings.HasSuffix(dest, "/") { - u, err := url.Parse(orig) - if err != nil { - return err - } - path := u.Path - if strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - parts := strings.Split(path, "/") - filename := parts[len(parts)-1] - if filename == "" { - return fmt.Errorf("cannot determine filename from url: %s", u) - } - destPath = dest + filename - } - } - - if err := b.checkPathForAddition(origPath); err != nil { - return err - } - - // Hash path and check the cache - if b.utilizeCache { - var ( - hash string - sums = b.context.GetSums() - ) - - if remoteHash != "" { - hash = remoteHash - } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { - return err - } else if fi.IsDir() { - var subfiles []string - for file, sum := range sums { - absFile := path.Join(b.contextPath, file) - absOrigPath := path.Join(b.contextPath, origPath) - if strings.HasPrefix(absFile, absOrigPath) { - subfiles = append(subfiles, sum) - } - } - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) - } else { - if origPath[0] == '/' && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "./") - if h, ok := sums[origPath]; ok { - hash = "file:" + h - } - } - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} - hit, err := b.probeCache() - if err != nil { - return err - } - // If we do not have a hash, never use the cache - if hit && hash != "" { - return nil - } - } - - // Create the container - container, _, err := b.daemon.Create(b.config, "") - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - - if !allowDecompression || isRemote { - decompress = false - } - if err := b.addContext(container, origPath, destPath, decompress); err != nil { - return err - } - - if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdAdd(args string) error { - return b.runContextCommand(args, true, true, "ADD") -} - -func (b *buildFile) create() (*Container, error) { - if b.image == "" { - return nil, fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.config.Image = b.image - - // Create the container - c, _, err := b.daemon.Create(b.config, "") - if err != nil { - return nil, err - } - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - c.Path = b.config.Cmd[0] - c.Args = b.config.Cmd[1:] - - return c, nil -} - -func (b *buildFile) run(c *Container) error { - var errCh chan error - if b.verbose { - errCh = utils.Go(func() error { - // FIXME: call the 'attach' job so that daemon.Attach can be made private - // - // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach - // but without hijacking for stdin. Also, with attach there can be race - // condition because of some output already was printed before it. - return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream) - }) - } - - //start the container - if err := c.Start(); err != nil { - return err - } - - if errCh != nil { - if err := <-errCh; err != nil { - return err - } - } - - // Wait for it to finish - if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 { - err := &utils.JSONError{ - Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), - Code: ret, - } - return err - } - - return nil -} - -// Commit the container with the autorun command -func (b *buildFile) commit(id string, autoCmd []string, comment string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.config.Image = b.image - if id == "" { - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - container, warnings, err := b.daemon.Create(b.config, "") - if err != nil { - return err - } - for _, warning := range warnings { - fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) - } - b.tmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) - id = container.ID - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - } - container := b.daemon.Get(id) - if container == nil { - return fmt.Errorf("An error occured while creating the container") - } - - // Note: Actually copy the struct - autoConfig := *b.config - autoConfig.Cmd = autoCmd - // Commit the container - image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) - if err != nil { - return err - } - b.tmpImages[image.ID] = struct{}{} - b.image = image.ID - return nil -} - -// Long lines can be split with a backslash -var lineContinuation = regexp.MustCompile(`\\\s*\n`) - -func (b *buildFile) Build(context io.Reader) (string, error) { - tmpdirPath, err := ioutil.TempDir("", "docker-build") - if err != nil { - return "", err - } - - decompressedStream, err := archive.DecompressStream(context) - if err != nil { - return "", err - } - - b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true} - if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { - return "", err - } - defer os.RemoveAll(tmpdirPath) - - b.contextPath = tmpdirPath - filename := path.Join(tmpdirPath, "Dockerfile") - if _, err := os.Stat(filename); os.IsNotExist(err) { - return "", fmt.Errorf("Can't build a directory with no Dockerfile") - } - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return "", err - } - if len(fileBytes) == 0 { - return "", ErrDockerfileEmpty - } - var ( - dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "") - stepN = 0 - ) - for _, line := range strings.Split(dockerfile, "\n") { - line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") - if len(line) == 0 { - continue - } - if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { - if b.forceRm { - b.clearTmp(b.tmpContainers) - } - return "", err - } else if b.rm { - b.clearTmp(b.tmpContainers) - } - stepN++ - } - if b.image != "" { - fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) - return b.image, nil - } - return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") -} - -// BuildStep parses a single build step from `instruction` and executes it in the current context. -func (b *buildFile) BuildStep(name, expression string) error { - fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) - tmp := strings.SplitN(expression, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid Dockerfile format") - } - instruction := strings.ToLower(strings.Trim(tmp[0], " ")) - arguments := strings.Trim(tmp[1], " ") - - method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) - if !exists { - fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) - return nil - } - - ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() - if ret != nil { - return ret.(error) - } - - fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) - return nil -} - -func stripComments(raw []byte) string { - var ( - out []string - lines = strings.Split(string(raw), "\n") - ) - for _, l := range lines { - if len(l) == 0 || l[0] == '#' { - continue - } - out = append(out, l) - } - return strings.Join(out, "\n") -} - -func copyAsDirectory(source, destination string, destinationExists bool) error { - if err := archive.CopyWithTar(source, destination); err != nil { - return err - } - - if destinationExists { - files, err := ioutil.ReadDir(source) - if err != nil { - return err - } - - for _, file := range files { - if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { - return err - } - } - return nil - } - - return fixPermissions(destination, 0, 0) -} - -func fixPermissions(destination string, uid, gid int) error { - return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { - if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { - return err - } - return nil - }) -} - -func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { - return &buildFile{ - daemon: d, - eng: eng, - config: &runconfig.Config{}, - outStream: outStream, - errStream: errStream, - tmpContainers: make(map[string]struct{}), - tmpImages: make(map[string]struct{}), - verbose: verbose, - utilizeCache: utilizeCache, - rm: rm, - forceRm: forceRm, - sf: sf, - authConfig: auth, - configFile: authConfigFile, - outOld: outOld, - } -} diff --git a/daemon/daemon.go b/daemon/daemon.go index 8ff79801c8..0a4d6e0bc5 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -101,7 +101,6 @@ func (daemon *Daemon) Install(eng *engine.Engine) error { // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ for name, method := range map[string]engine.Handler{ "attach": daemon.ContainerAttach, - "build": daemon.CmdBuild, "commit": daemon.ContainerCommit, "container_changes": daemon.ContainerChanges, "container_copy": daemon.ContainerCopy, diff --git a/docker/daemon.go b/docker/daemon.go index dc9d56d1d9..eef17efdc4 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -5,6 +5,7 @@ package main import ( "log" + "github.com/docker/docker/builder" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" _ "github.com/docker/docker/daemon/execdriver/lxc" @@ -48,6 +49,10 @@ func mainDaemon() { if err := d.Install(eng); err != nil { log.Fatal(err) } + + b := &builder.BuilderJob{eng, d} + b.Install() + // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index bcff199c85..e6572a1bf4 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -685,10 +685,11 @@ func TestBuildRelativeWorkdir(t *testing.T) { func TestBuildEnv(t *testing.T) { name := "testbuildenv" - expected := "[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" defer deleteImages(name) _, err := buildImage(name, `FROM busybox + ENV PATH /test:$PATH ENV PORT 2375 RUN [ $(env | grep PORT) = 'PORT=2375' ]`, true) @@ -1708,6 +1709,9 @@ func TestBuildEnvUsage(t *testing.T) { name := "testbuildenvusage" defer deleteImages(name) dockerfile := `FROM busybox +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] ENV FOO /foo/baz ENV BAR /bar ENV BAZ $BAR @@ -1717,7 +1721,8 @@ RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] ENV FROM hello/docker/world ENV TO /docker/world/hello ADD $FROM $TO -RUN [ "$(cat $TO)" = "hello" ]` +RUN [ "$(cat $TO)" = "hello" ] +` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", })