From 22c46af4b358cf3ec4f7af47c701064c00cf7de4 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 5 Aug 2014 13:17:40 -0700 Subject: [PATCH 01/14] builder: parser and beginnings of the evaluator Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/.gitignore | 2 + builder/evaluator/builder/.gitignore | 2 + builder/evaluator/builder/main.go | 31 +++ builder/evaluator/dispatchers.go | 45 ++++ builder/evaluator/evaluator.go | 118 +++++++++ builder/evaluator/internals.go | 247 ++++++++++++++++++ builder/evaluator/support.go | 33 +++ builder/parser/dumper/.gitignore | 2 + builder/parser/dumper/main.go | 32 +++ builder/parser/line_parsers.go | 99 +++++++ builder/parser/parser.go | 147 +++++++++++ builder/parser/parser_test.go | 56 ++++ .../testfiles/brimstone-consuldock/Dockerfile | 25 ++ .../testfiles/brimstone-consuldock/result | 5 + .../brimstone-docker-consul/Dockerfile | 52 ++++ .../testfiles/brimstone-docker-consul/result | 9 + .../testfiles/cpuguy83-nagios/Dockerfile | 54 ++++ .../parser/testfiles/cpuguy83-nagios/result | 40 +++ builder/parser/testfiles/docker/Dockerfile | 105 ++++++++ builder/parser/testfiles/docker/result | 25 ++ builder/parser/testfiles/escapes/Dockerfile | 8 + builder/parser/testfiles/escapes/result | 5 + builder/parser/testfiles/influxdb/Dockerfile | 15 ++ builder/parser/testfiles/influxdb/result | 11 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 ++++ .../lk4d4-the-edge-case-generator/result | 29 ++ builder/parser/testfiles/mail/Dockerfile | 16 ++ builder/parser/testfiles/mail/result | 14 + builder/parser/testfiles/mumble/Dockerfile | 7 + builder/parser/testfiles/mumble/result | 4 + builder/parser/testfiles/nginx/Dockerfile | 14 + builder/parser/testfiles/nginx/result | 11 + builder/parser/testfiles/tf2/Dockerfile | 23 ++ builder/parser/testfiles/tf2/result | 20 ++ builder/parser/testfiles/weechat/Dockerfile | 9 + builder/parser/testfiles/weechat/result | 6 + builder/parser/testfiles/znc/Dockerfile | 7 + builder/parser/testfiles/znc/result | 5 + builder/parser/utils.go | 86 ++++++ 41 files changed, 1481 insertions(+) create mode 100644 builder/.gitignore create mode 100644 builder/evaluator/builder/.gitignore create mode 100644 builder/evaluator/builder/main.go create mode 100644 builder/evaluator/dispatchers.go create mode 100644 builder/evaluator/evaluator.go create mode 100644 builder/evaluator/internals.go create mode 100644 builder/evaluator/support.go create mode 100644 builder/parser/dumper/.gitignore create mode 100644 builder/parser/dumper/main.go create mode 100644 builder/parser/line_parsers.go create mode 100644 builder/parser/parser.go create mode 100644 builder/parser/parser_test.go create mode 100644 builder/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 builder/parser/testfiles/brimstone-consuldock/result create mode 100644 builder/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 builder/parser/testfiles/brimstone-docker-consul/result create mode 100644 builder/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 builder/parser/testfiles/cpuguy83-nagios/result create mode 100644 builder/parser/testfiles/docker/Dockerfile create mode 100644 builder/parser/testfiles/docker/result create mode 100644 builder/parser/testfiles/escapes/Dockerfile create mode 100644 builder/parser/testfiles/escapes/result create mode 100644 builder/parser/testfiles/influxdb/Dockerfile create mode 100644 builder/parser/testfiles/influxdb/result create mode 100644 builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 builder/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 builder/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 builder/parser/testfiles/mail/Dockerfile create mode 100644 builder/parser/testfiles/mail/result create mode 100644 builder/parser/testfiles/mumble/Dockerfile create mode 100644 builder/parser/testfiles/mumble/result create mode 100644 builder/parser/testfiles/nginx/Dockerfile create mode 100644 builder/parser/testfiles/nginx/result create mode 100644 builder/parser/testfiles/tf2/Dockerfile create mode 100644 builder/parser/testfiles/tf2/result create mode 100644 builder/parser/testfiles/weechat/Dockerfile create mode 100644 builder/parser/testfiles/weechat/result create mode 100644 builder/parser/testfiles/znc/Dockerfile create mode 100644 builder/parser/testfiles/znc/result create mode 100644 builder/parser/utils.go diff --git a/builder/.gitignore b/builder/.gitignore new file mode 100644 index 0000000000..34b1c07205 --- /dev/null +++ b/builder/.gitignore @@ -0,0 +1,2 @@ +main +gopath diff --git a/builder/evaluator/builder/.gitignore b/builder/evaluator/builder/.gitignore new file mode 100644 index 0000000000..85354a2b0e --- /dev/null +++ b/builder/evaluator/builder/.gitignore @@ -0,0 +1,2 @@ +builder +Dockerfile diff --git a/builder/evaluator/builder/main.go b/builder/evaluator/builder/main.go new file mode 100644 index 0000000000..8c0b6d1f0e --- /dev/null +++ b/builder/evaluator/builder/main.go @@ -0,0 +1,31 @@ +package main + +import ( + "os" + + "github.com/erikh/buildfile/evaluator" +) + +func main() { + if len(os.Args) < 2 { + os.Stderr.WriteString("Please supply filename(s) to evaluate") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err := os.Open(fn) + if err != nil { + panic(err) + } + + opts := &evaluator.BuildOpts{} + + bf, err := opts.NewBuildFile(f) + if err != nil { + panic(err) + } + if err := bf.Run(); err != nil { + panic(err) + } + } +} diff --git a/builder/evaluator/dispatchers.go b/builder/evaluator/dispatchers.go new file mode 100644 index 0000000000..f6c99a4db1 --- /dev/null +++ b/builder/evaluator/dispatchers.go @@ -0,0 +1,45 @@ +package evaluator + +import ( + "fmt" + "strings" +) + +func env(b *buildFile, args ...string) error { + if len(args) != 2 { + return fmt.Errorf("ENV accepts two arguments") + } + + // the duplication here is intended to ease the replaceEnv() call's env + // handling. This routine gets much shorter with the denormalization here. + key := args[0] + b.env[key] = args[1] + b.config.Env = append(b.config.Env, strings.Join("=", key, b.env[key])) + + return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", value)) +} + +func maintainer(b *buildFile, args ...string) error { + if len(args) != 1 { + return fmt.Errorf("MAINTAINER requires only one argument") + } + + b.maintainer = args[0] + return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +func add(b *buildFile, args ...string) error { + if len(args) != 2 { + return fmt.Errorf("ADD requires two arguments") + } + + return b.runContextCommand(args, true, true, "ADD") +} + +func dispatchCopy(b *buildFile, args ...string) error { + if len(args) != 2 { + return fmt.Errorf("COPY requires two arguments") + } + + return b.runContextCommand(args, false, false, "COPY") +} diff --git a/builder/evaluator/evaluator.go b/builder/evaluator/evaluator.go new file mode 100644 index 0000000000..9b35bc2e15 --- /dev/null +++ b/builder/evaluator/evaluator.go @@ -0,0 +1,118 @@ +package evaluator + +import ( + "fmt" + "io" + "regexp" + "strings" + + "github.com/erikh/buildfile/parser" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +var ( + evaluateTable = map[string]func(*buildFile, ...string) error{ + "env": env, + "maintainer": maintainer, + "add": add, + "copy": dispatchCopy, // copy() is a go builtin + //"onbuild": parseMaybeJSON, + //"workdir": parseString, + //"docker-version": parseString, + //"run": parseMaybeJSON, + //"cmd": parseMaybeJSON, + //"entrypoint": parseMaybeJSON, + //"expose": parseMaybeJSON, + //"volume": parseMaybeJSON, + } +) + +type buildFile struct { + dockerfile *parser.Node + env envMap + image string + config *runconfig.Config + options *BuildOpts + maintainer string +} + +type BuildOpts struct { + Daemon *daemon.Daemon + Engine *engine.Engine + OutStream io.Writer + ErrStream io.Writer + Verbose bool + UtilizeCache bool + Remove bool + ForceRm bool + OutOld io.Writer + StreamFormatter *utils.StreamFormatter + Auth *registry.AuthConfig + AuthConfigFile *registry.ConfigFile +} + +func (opts *BuildOpts) NewBuildFile(file io.ReadWriteCloser) (*buildFile, error) { + ast, err := parser.Parse(file) + if err != nil { + return nil, err + } + + return &buildFile{ + dockerfile: ast, + env: envMap{}, + config: initRunConfig(), + options: opts, + }, nil +} + +func (b *buildFile) Run() error { + node := b.dockerfile + + for i, n := range node.Children { + if err := b.dispatch(i, n); err != nil { + return err + } + } + + return nil +} + +func initRunConfig() *runconfig.Config { + return &runconfig.Config{ + PortSpecs: []string{}, + // FIXME(erikh) this should be a type that lives in runconfig + ExposedPorts: map[nat.Port]struct{}{}, + Env: []string{}, + Cmd: []string{}, + + // FIXME(erikh) this should also be a type in runconfig + Volumes: map[string]struct{}{}, + Entrypoint: []string{}, + OnBuild: []string{}, + } +} + +func (b *buildFile) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + strs := []string{} + for ast.Next != nil { + ast = ast.Next + strs = append(strs, replaceEnv(b, stripQuotes(ast.Value))) + } + + fmt.Fprintf(b.outStream, "Step %d : %s\n", i, cmd, expression) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + return f(b, strs...) + } + + return nil +} diff --git a/builder/evaluator/internals.go b/builder/evaluator/internals.go new file mode 100644 index 0000000000..8f81624381 --- /dev/null +++ b/builder/evaluator/internals.go @@ -0,0 +1,247 @@ +package evaluator + +func (b *buildFile) addContext(context io.Reader) (string, error) { + tmpdirPath, err := ioutil.TempDir("", "docker-build") + if err != nil { + return err + } + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return err + } + + b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true} + if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { + return err + } + + b.contextPath = tmpdirPath + return tmpdirPath +} + +func (b *buildFile) commit(id string, autoCmd []string, comment string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.config.Image = b.image + if id == "" { + cmd := b.config.Cmd + b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + container, warnings, err := b.daemon.Create(b.config, "") + if err != nil { + return err + } + for _, warning := range warnings { + fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) + } + b.tmpContainers[container.ID] = struct{}{} + fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) + id = container.ID + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + } + container := b.daemon.Get(id) + if container == nil { + return fmt.Errorf("An error occured while creating the container") + } + + // Note: Actually copy the struct + autoConfig := *b.config + autoConfig.Cmd = autoCmd + // Commit the container + image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + if err != nil { + return err + } + b.tmpImages[image.ID] = struct{}{} + b.image = image.ID + return nil +} + +func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + tmp := strings.SplitN(args, " ", 2) + if len(tmp) != 2 { + return fmt.Errorf("Invalid %s format", cmdName) + } + + orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) + if err != nil { + return err + } + + dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) + if err != nil { + return err + } + + cmd := b.config.Cmd + b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + b.config.Image = b.image + + var ( + origPath = orig + destPath = dest + remoteHash string + isRemote bool + decompress = true + ) + + isRemote = utils.IsURL(orig) + if isRemote && !allowRemote { + return fmt.Errorf("Source can't be an URL for %s", cmdName) + } else if utils.IsURL(orig) { + // Initiate the download + resp, err := utils.Download(orig) + if err != nil { + return err + } + + // Create a tmp dir + tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") + if err != nil { + return err + } + + // Create a tmp file within our tmp dir + tmpFileName := path.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer os.RemoveAll(tmpDirName) + + // Download and dump result to tmp file + if _, err := io.Copy(tmpFile, resp.Body); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + // Remove the mtime of the newly created tmp file + if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + return err + } + + origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + + // Process the checksum + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true} + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + remoteHash = tarSum.Sum(nil) + r.Close() + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(dest, "/") { + u, err := url.Parse(orig) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + parts := strings.Split(path, "/") + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + destPath = dest + filename + } + } + + if err := b.checkPathForAddition(origPath); err != nil { + return err + } + + // Hash path and check the cache + if b.utilizeCache { + var ( + hash string + sums = b.context.GetSums() + ) + + if remoteHash != "" { + hash = remoteHash + } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { + return err + } else if fi.IsDir() { + var subfiles []string + for file, sum := range sums { + absFile := path.Join(b.contextPath, file) + absOrigPath := path.Join(b.contextPath, origPath) + if strings.HasPrefix(absFile, absOrigPath) { + subfiles = append(subfiles, sum) + } + } + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + } else { + if origPath[0] == '/' && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "./") + if h, ok := sums[origPath]; ok { + hash = "file:" + h + } + } + b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} + hit, err := b.probeCache() + if err != nil { + return err + } + // If we do not have a hash, never use the cache + if hit && hash != "" { + return nil + } + } + + // Create the container + container, _, err := b.daemon.Create(b.config, "") + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + + if !allowDecompression || isRemote { + decompress = false + } + if err := b.addContext(container, origPath, destPath, decompress); err != nil { + return err + } + + if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { + return err + } + return nil +} diff --git a/builder/evaluator/support.go b/builder/evaluator/support.go new file mode 100644 index 0000000000..41f9c6a5ba --- /dev/null +++ b/builder/evaluator/support.go @@ -0,0 +1,33 @@ +package evaluator + +import ( + "regexp" + "strings" +) + +var ( + TOKEN_ESCAPED_QUOTE = regexp.MustCompile(`\\"`) + TOKEN_ESCAPED_ESCAPE = regexp.MustCompile(`\\\\`) + TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") +) + +func stripQuotes(str string) string { + str = str[1 : len(str)-1] + str = TOKEN_ESCAPED_QUOTE.ReplaceAllString(str, `"`) + return TOKEN_ESCAPED_ESCAPE.ReplaceAllString(str, `\`) +} + +func replaceEnv(b *buildFile, str string) string { + for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { + match = match[strings.Index(match, "$"):] + matchKey := strings.Trim(match, "${}") + + for envKey, envValue := range b.env { + if matchKey == envKey { + str = strings.Replace(str, match, envValue, -1) + } + } + } + + return str +} diff --git a/builder/parser/dumper/.gitignore b/builder/parser/dumper/.gitignore new file mode 100644 index 0000000000..d80aecaec6 --- /dev/null +++ b/builder/parser/dumper/.gitignore @@ -0,0 +1,2 @@ +dumper +Dockerfile diff --git a/builder/parser/dumper/main.go b/builder/parser/dumper/main.go new file mode 100644 index 0000000000..96c0db2d05 --- /dev/null +++ b/builder/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/erikh/buildfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + + ast, err := parser.Parse(f) + if err != nil { + panic(err) + } else { + fmt.Print(ast.Dump()) + } + } +} diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go new file mode 100644 index 0000000000..d9716698ec --- /dev/null +++ b/builder/parser/line_parsers.go @@ -0,0 +1,99 @@ +package parser + +import ( + "encoding/json" + "strconv" + "strings" +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, error) { + return blankNode(), nil +} + +func parseSubCommand(rest string) (*Node, error) { + _, child, err := parseLine(rest) + if err != nil { + return nil, err + } + + return &Node{Children: []*Node{child}}, nil +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseEnv(rest string) (*Node, error) { + node := blankNode() + rootnode := node + strs := TOKEN_WHITESPACE.Split(rest, 2) + node.Value = QuoteString(strs[0]) + node.Next = blankNode() + node.Next.Value = QuoteString(strs[1]) + + return rootnode, nil + + return node, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, error) { + node := blankNode() + rootnode := node + for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp + node.Value = QuoteString(str) + node.Next = blankNode() + node = node.Next + } + + return rootnode, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, error) { + return &Node{QuoteString(rest), nil, nil}, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, error) { + var ( + myJson []interface{} + next = blankNode() + orignext = next + ) + + if err := json.Unmarshal([]byte(rest), &myJson); err != nil { + return nil, err + } + + for _, str := range myJson { + switch str.(type) { + case float64: + str = strconv.FormatFloat(str.(float64), 'G', -1, 64) + } + next.Value = QuoteString(str.(string)) + next.Next = blankNode() + next = next.Next + } + + return orignext, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, error) { + rest = strings.TrimSpace(rest) + + if strings.HasPrefix(rest, "[") { + node, err := parseJSON(rest) + if err == nil { + return node, nil + } + } + + node := blankNode() + node.Value = QuoteString(rest) + return node, nil +} diff --git a/builder/parser/parser.go b/builder/parser/parser.go new file mode 100644 index 0000000000..c2715d43c7 --- /dev/null +++ b/builder/parser/parser.go @@ -0,0 +1,147 @@ +// This package implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" +) + +// Node is the building block of the AST this package will create. +// +// Nodes are structured to have a value, next, and child, the latter two of +// which are Nodes themselves. +// +// This terminology is unfortunately rather confusing, so here's a diagram. +// Anything after the ; is a comment. +// +// ( +// (run "foo") ; value run, and next is a value foo. +// (run "1" "2" "3") ; +// (something (really cool)) +// ) +// +// Will give you something like this: +// +// &Node{ +// Value:"", +// Child: &Node{Value: "run", Next: &Node{Value: "foo"}, Child: nil}, +// Next: &Node{Value:"", Child: &Node{Value:"run", Next: &Node{Value:`"1"`.... +// +// ... and so on. +// +// The short and fast rule is that anything that starts with ( is a child of +// something. Anything which follows a previous statement is a next of +// something. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp +} + +var ( + dispatch map[string]func(string) (*Node, error) + TOKEN_WHITESPACE = regexp.MustCompile(`\s+`) + TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`) + TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // recieves the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propogated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, error){ + "user": parseString, + "onbuild": parseSubCommand, + "workdir": parseString, + "env": parseEnv, + "maintainer": parseString, + "docker-version": parseString, + "from": parseString, + "add": parseStringsWhitespaceDelimited, + "copy": parseStringsWhitespaceDelimited, + "run": parseMaybeJSON, + "cmd": parseMaybeJSON, + "entrypoint": parseMaybeJSON, + "expose": parseStringsWhitespaceDelimited, + "volume": parseMaybeJSON, + } +} + +// empty node. Useful for managing structure. +func blankNode() *Node { + return &Node{"", nil, []*Node{}} +} + +func parseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if TOKEN_LINE_CONTINUATION.MatchString(line) { + line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, args := splitCommand(line) + + node := blankNode() + node.Value = cmd + + sexp, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + node.Next = sexp + + return "", node, nil +} + +// The main parse routine. Handles an io.ReadWriteCloser and returns the root +// of the AST. +func Parse(rwc io.Reader) (*Node, error) { + var child *Node + var line string + var err error + root := blankNode() + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + line, child, err = parseLine(strings.TrimSpace(scanner.Text())) + if err != nil { + return nil, err + } + + if line != "" && child == nil { + for { + scanner.Scan() + newline := strings.TrimSpace(scanner.Text()) + + if newline == "" { + continue + } + + line, child, err = parseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + } + + if child != nil { + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/builder/parser/parser_test.go b/builder/parser/parser_test.go new file mode 100644 index 0000000000..1482a011fe --- /dev/null +++ b/builder/parser/parser_test.go @@ -0,0 +1,56 @@ +package parser + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const testDir = "testfiles" + +func TestTestData(t *testing.T) { + f, err := os.Open(testDir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdir(0) + if err != nil { + t.Fatal(err) + } + + for _, dir := range dirs { + dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile") + resultfile := filepath.Join(testDir, dir.Name(), "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + rf, err := os.Open(resultfile) + if err != nil { + t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error()) + } + + ast, err := Parse(df) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error()) + } + + content, err := ioutil.ReadAll(rf) + if err != nil { + t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error()) + } + + if ast.Dump() != string(content) { + t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name()) + } + + df.Close() + rf.Close() + } +} diff --git a/builder/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000000..5c75a2e0ca --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,25 @@ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-consuldock/result b/builder/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000000..b6ef4f817b --- /dev/null +++ b/builder/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") \ No newline at end of file diff --git a/builder/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000000..25ae352166 --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/parser/testfiles/brimstone-docker-consul/result b/builder/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000000..739dc6423f --- /dev/null +++ b/builder/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/hashicorp/consul && mv $GOPATH/bin/consul /usr/bin/consul && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH") \ No newline at end of file diff --git a/builder/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000000..8ccb71a578 --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/parser/testfiles/cpuguy83-nagios/result b/builder/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000000..b95e96b153 --- /dev/null +++ b/builder/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") \ No newline at end of file diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000000..68f8f0b78b --- /dev/null +++ b/builder/parser/testfiles/docker/Dockerfile @@ -0,0 +1,105 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: Apparmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +docker-version 0.6.1 +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + lxc=1.0* \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get code.google.com/p/go.tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result new file mode 100644 index 0000000000..a7960244b3 --- /dev/null +++ b/builder/parser/testfiles/docker/result @@ -0,0 +1,25 @@ +(docker-version "0.6.1") +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get code.google.com/p/go.tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") \ No newline at end of file diff --git a/builder/parser/testfiles/escapes/Dockerfile b/builder/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000000..87a8e028a2 --- /dev/null +++ b/builder/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,8 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/escapes/result b/builder/parser/testfiles/escapes/result new file mode 100644 index 0000000000..724c399c21 --- /dev/null +++ b/builder/parser/testfiles/escapes/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(cmd "/usr\\\"/bin/znc" "-f" "-r") \ No newline at end of file diff --git a/builder/parser/testfiles/influxdb/Dockerfile b/builder/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000000..587fb9b54b --- /dev/null +++ b/builder/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/builder/parser/testfiles/influxdb/result b/builder/parser/testfiles/influxdb/result new file mode 100644 index 0000000000..f0d45a4e27 --- /dev/null +++ b/builder/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") \ No newline at end of file diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000000..35f9c24aa6 --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/parser/testfiles/kartar-entrypoint-oddities/result b/builder/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000000..375257a49e --- /dev/null +++ b/builder/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") \ No newline at end of file diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000000..188395fe83 --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/builder/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000000..920ed544b5 --- /dev/null +++ b/builder/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") \ No newline at end of file diff --git a/builder/parser/testfiles/mail/Dockerfile b/builder/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000000..f64c1168c1 --- /dev/null +++ b/builder/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/builder/parser/testfiles/mail/result b/builder/parser/testfiles/mail/result new file mode 100644 index 0000000000..2d9c30db9c --- /dev/null +++ b/builder/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") \ No newline at end of file diff --git a/builder/parser/testfiles/mumble/Dockerfile b/builder/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000000..5b9ec06a6c --- /dev/null +++ b/builder/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/builder/parser/testfiles/mumble/result b/builder/parser/testfiles/mumble/result new file mode 100644 index 0000000000..123e893dc5 --- /dev/null +++ b/builder/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") \ No newline at end of file diff --git a/builder/parser/testfiles/nginx/Dockerfile b/builder/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000000..bf8368e1ca --- /dev/null +++ b/builder/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/builder/parser/testfiles/nginx/result b/builder/parser/testfiles/nginx/result new file mode 100644 index 0000000000..5ac8c77c2f --- /dev/null +++ b/builder/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") \ No newline at end of file diff --git a/builder/parser/testfiles/tf2/Dockerfile b/builder/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000000..72b79bdd7d --- /dev/null +++ b/builder/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/parser/testfiles/tf2/result b/builder/parser/testfiles/tf2/result new file mode 100644 index 0000000000..5ec173f67f --- /dev/null +++ b/builder/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") \ No newline at end of file diff --git a/builder/parser/testfiles/weechat/Dockerfile b/builder/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000000..4842088166 --- /dev/null +++ b/builder/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/builder/parser/testfiles/weechat/result b/builder/parser/testfiles/weechat/result new file mode 100644 index 0000000000..b358645cde --- /dev/null +++ b/builder/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") \ No newline at end of file diff --git a/builder/parser/testfiles/znc/Dockerfile b/builder/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000000..3a4da6e916 --- /dev/null +++ b/builder/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/parser/testfiles/znc/result b/builder/parser/testfiles/znc/result new file mode 100644 index 0000000000..b4ddf3e653 --- /dev/null +++ b/builder/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") \ No newline at end of file diff --git a/builder/parser/utils.go b/builder/parser/utils.go new file mode 100644 index 0000000000..08357b42db --- /dev/null +++ b/builder/parser/utils.go @@ -0,0 +1,86 @@ +package parser + +import ( + "fmt" + "strings" +) + +// QuoteString walks characters (after trimming), escapes any quotes and +// escapes, then wraps the whole thing in quotes. Very useful for generating +// argument output in nodes. +func QuoteString(str string) string { + result := "" + chars := strings.Split(strings.TrimSpace(str), "") + + for _, char := range chars { + switch char { + case `"`: + result += `\"` + case `\`: + result += `\\` + default: + result += char + } + } + + return `"` + result + `"` +} + +// dumps the AST defined by `node` as a list of sexps. Returns a string +// suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + n.Value + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, error) { + if _, ok := dispatch[cmd]; !ok { + return nil, fmt.Errorf("'%s' is not a valid dockerfile command", cmd) + } + + sexp, err := dispatch[cmd](args) + if err != nil { + return nil, err + } + + return sexp, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, string) { + cmdline := TOKEN_WHITESPACE.Split(line, 2) + cmd := strings.ToLower(cmdline[0]) + // the cmd should never have whitespace, but it's possible for the args to + // have trailing whitespace. + return cmd, strings.TrimSpace(cmdline[1]) +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if TOKEN_COMMENT.MatchString(line) { + return TOKEN_COMMENT.ReplaceAllString(line, "") + } + + return line +} From bb36902db60d5d4e3b18bc018212e40df2d412e8 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 5 Aug 2014 14:03:49 -0700 Subject: [PATCH 02/14] builder: rewrote NewBuildFile according to tibor's comments Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/evaluator/evaluator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/evaluator/evaluator.go b/builder/evaluator/evaluator.go index 9b35bc2e15..4669386b26 100644 --- a/builder/evaluator/evaluator.go +++ b/builder/evaluator/evaluator.go @@ -57,7 +57,8 @@ type BuildOpts struct { AuthConfigFile *registry.ConfigFile } -func (opts *BuildOpts) NewBuildFile(file io.ReadWriteCloser) (*buildFile, error) { +func NewBuildFile(file io.ReadWriteCloser, opts *BuildOpts) (*buildFile, error) { + defer file.Close() ast, err := parser.Parse(file) if err != nil { return nil, err From d6c0bbc3cbf9204c506fedf4d0c5a477c7559a1d Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 5 Aug 2014 15:41:09 -0700 Subject: [PATCH 03/14] builder: add command handling to evaluator. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/evaluator/builder/.gitignore | 2 - builder/evaluator/builder/main.go | 31 -- builder/evaluator/dispatchers.go | 211 ++++++++++- builder/evaluator/evaluator.go | 159 +++++--- builder/evaluator/internals.go | 354 ++++++++++++++++-- builder/evaluator/support.go | 8 - builder/parser/dumper/main.go | 2 +- builder/parser/line_parsers.go | 25 +- builder/parser/parser.go | 3 +- .../testfiles/brimstone-docker-consul/result | 2 +- builder/parser/utils.go | 2 +- 11 files changed, 670 insertions(+), 129 deletions(-) delete mode 100644 builder/evaluator/builder/.gitignore delete mode 100644 builder/evaluator/builder/main.go diff --git a/builder/evaluator/builder/.gitignore b/builder/evaluator/builder/.gitignore deleted file mode 100644 index 85354a2b0e..0000000000 --- a/builder/evaluator/builder/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -builder -Dockerfile diff --git a/builder/evaluator/builder/main.go b/builder/evaluator/builder/main.go deleted file mode 100644 index 8c0b6d1f0e..0000000000 --- a/builder/evaluator/builder/main.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "os" - - "github.com/erikh/buildfile/evaluator" -) - -func main() { - if len(os.Args) < 2 { - os.Stderr.WriteString("Please supply filename(s) to evaluate") - os.Exit(1) - } - - for _, fn := range os.Args[1:] { - f, err := os.Open(fn) - if err != nil { - panic(err) - } - - opts := &evaluator.BuildOpts{} - - bf, err := opts.NewBuildFile(f) - if err != nil { - panic(err) - } - if err := bf.Run(); err != nil { - panic(err) - } - } -} diff --git a/builder/evaluator/dispatchers.go b/builder/evaluator/dispatchers.go index f6c99a4db1..eefcb629be 100644 --- a/builder/evaluator/dispatchers.go +++ b/builder/evaluator/dispatchers.go @@ -2,10 +2,20 @@ package evaluator import ( "fmt" + "path/filepath" "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) -func env(b *buildFile, args ...string) error { +// dispatch with no layer / parsing. +func nullDispatch(b *buildFile, args []string) error { + return nil +} + +func env(b *buildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("ENV accepts two arguments") } @@ -14,12 +24,12 @@ func env(b *buildFile, args ...string) error { // handling. This routine gets much shorter with the denormalization here. key := args[0] b.env[key] = args[1] - b.config.Env = append(b.config.Env, strings.Join("=", key, b.env[key])) + b.config.Env = append(b.config.Env, strings.Join([]string{key, b.env[key]}, "=")) - return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", value)) + return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.env[key])) } -func maintainer(b *buildFile, args ...string) error { +func maintainer(b *buildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("MAINTAINER requires only one argument") } @@ -28,7 +38,7 @@ func maintainer(b *buildFile, args ...string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) } -func add(b *buildFile, args ...string) error { +func add(b *buildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("ADD requires two arguments") } @@ -36,10 +46,199 @@ func add(b *buildFile, args ...string) error { return b.runContextCommand(args, true, true, "ADD") } -func dispatchCopy(b *buildFile, args ...string) error { +func dispatchCopy(b *buildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("COPY requires two arguments") } return b.runContextCommand(args, false, false, "COPY") } + +func from(b *buildFile, args []string) error { + if len(args) != 1 { + return fmt.Errorf("FROM requires one argument") + } + + name := args[0] + + image, err := b.options.Daemon.Repositories().LookupImage(name) + if err != nil { + if b.options.Daemon.Graph().IsNotExist(err) { + image, err = b.pullImage(name) + } + + // note that the top level err will still be !nil here if IsNotExist is + // not the error. This approach just simplifies hte logic a bit. + if err != nil { + return err + } + } + + return b.processImageFrom(image) +} + +func onbuild(b *buildFile, args []string) error { + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + trigger := strings.Join(args, " ") + + b.config.OnBuild = append(b.config.OnBuild, trigger) + return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) +} + +func workdir(b *buildFile, args []string) error { + if len(args) != 1 { + return fmt.Errorf("WORKDIR requires exactly one argument") + } + + workdir := args[0] + + if workdir[0] == '/' { + b.config.WorkingDir = workdir + } else { + if b.config.WorkingDir == "" { + b.config.WorkingDir = "/" + } + b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir) + } + + return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +func run(b *buildFile, args []string) error { + if len(args) == 1 { // literal string command, not an exec array + args = append([]string{"/bin/sh", "-c"}, args[0]) + } + + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + config, _, _, err := runconfig.Parse(append([]string{b.image}, args...), nil) + if err != nil { + return err + } + + cmd := b.config.Cmd + // set Cmd manually, this is special case only for Dockerfiles + b.config.Cmd = config.Cmd + runconfig.Merge(b.config, config) + + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + + utils.Debugf("Command to be executed: %v", b.config.Cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + c, err := b.create() + if err != nil { + return err + } + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { + return err + } + + return nil +} + +func cmd(b *buildFile, args []string) error { + if len(args) < 2 { + args = append([]string{"/bin/sh", "-c"}, args...) + } + + b.config.Cmd = args + if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { + return err + } + + b.cmdSet = true + return nil +} + +func entrypoint(b *buildFile, args []string) error { + b.config.Entrypoint = args + + // if there is no cmd in current Dockerfile - cleanup cmd + if !b.cmdSet { + b.config.Cmd = nil + } + if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { + return err + } + return nil +} + +func expose(b *buildFile, args []string) error { + portsTab := args + + if b.config.ExposedPorts == nil { + b.config.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) + if err != nil { + return err + } + + for port := range ports { + if _, exists := b.config.ExposedPorts[port]; !exists { + b.config.ExposedPorts[port] = struct{}{} + } + } + b.config.PortSpecs = nil + + return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) +} + +func user(b *buildFile, args []string) error { + if len(args) != 1 { + return fmt.Errorf("USER requires exactly one argument") + } + + b.config.User = args[0] + return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) +} + +func volume(b *buildFile, args []string) error { + if len(args) != 1 { + return fmt.Errorf("Volume cannot be empty") + } + + volume := args + + if b.config.Volumes == nil { + b.config.Volumes = map[string]struct{}{} + } + for _, v := range volume { + b.config.Volumes[v] = struct{}{} + } + if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { + return err + } + return nil +} + +func insert(b *buildFile, args []string) error { + return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") +} diff --git a/builder/evaluator/evaluator.go b/builder/evaluator/evaluator.go index 4669386b26..7b74db1d86 100644 --- a/builder/evaluator/evaluator.go +++ b/builder/evaluator/evaluator.go @@ -1,38 +1,54 @@ package evaluator import ( + "bytes" + "errors" "fmt" "io" - "regexp" + "io/ioutil" + "os" + "path" "strings" - "github.com/erikh/buildfile/parser" - + "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) var ( - evaluateTable = map[string]func(*buildFile, ...string) error{ - "env": env, - "maintainer": maintainer, - "add": add, - "copy": dispatchCopy, // copy() is a go builtin - //"onbuild": parseMaybeJSON, - //"workdir": parseString, - //"docker-version": parseString, - //"run": parseMaybeJSON, - //"cmd": parseMaybeJSON, - //"entrypoint": parseMaybeJSON, - //"expose": parseMaybeJSON, - //"volume": parseMaybeJSON, - } + ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) +var evaluateTable map[string]func(*buildFile, []string) error + +func init() { + evaluateTable = map[string]func(*buildFile, []string) error{ + "env": env, + "maintainer": maintainer, + "add": add, + "copy": dispatchCopy, // copy() is a go builtin + "from": from, + "onbuild": onbuild, + "workdir": workdir, + "docker-version": nullDispatch, // we don't care about docker-version + "run": run, + "cmd": cmd, + "entrypoint": entrypoint, + "expose": expose, + "volume": volume, + "user": user, + "insert": insert, + } +} + +type envMap map[string]string +type uniqueMap map[string]struct{} + type buildFile struct { dockerfile *parser.Node env envMap @@ -40,48 +56,86 @@ type buildFile struct { config *runconfig.Config options *BuildOpts maintainer string + + // cmdSet indicates is CMD was set in current Dockerfile + cmdSet bool + + context *tarsum.TarSum + contextPath string + tmpContainers uniqueMap + tmpImages uniqueMap } type BuildOpts struct { - Daemon *daemon.Daemon - Engine *engine.Engine - OutStream io.Writer - ErrStream io.Writer - Verbose bool - UtilizeCache bool - Remove bool - ForceRm bool + Daemon *daemon.Daemon + Engine *engine.Engine + OutStream io.Writer + ErrStream io.Writer + Verbose bool + UtilizeCache bool + Remove bool + ForceRemove bool + AuthConfig *registry.AuthConfig + AuthConfigFile *registry.ConfigFile + + // Deprecated, original writer used for ImagePull. To be removed. OutOld io.Writer StreamFormatter *utils.StreamFormatter - Auth *registry.AuthConfig - AuthConfigFile *registry.ConfigFile } -func NewBuildFile(file io.ReadWriteCloser, opts *BuildOpts) (*buildFile, error) { - defer file.Close() - ast, err := parser.Parse(file) - if err != nil { - return nil, err - } - +func NewBuilder(opts *BuildOpts) (*buildFile, error) { return &buildFile{ - dockerfile: ast, - env: envMap{}, - config: initRunConfig(), - options: opts, + dockerfile: nil, + env: envMap{}, + config: initRunConfig(), + options: opts, + tmpContainers: make(uniqueMap), + tmpImages: make(uniqueMap), }, nil } -func (b *buildFile) Run() error { - node := b.dockerfile +func (b *buildFile) Run(context io.Reader) (string, error) { + err := b.readContext(context) - for i, n := range node.Children { + if err != nil { + return "", err + } + + filename := path.Join(b.contextPath, "Dockerfile") + if _, err := os.Stat(filename); os.IsNotExist(err) { + return "", fmt.Errorf("Cannot build a directory without a Dockerfile") + } + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + if len(fileBytes) == 0 { + return "", ErrDockerfileEmpty + } + ast, err := parser.Parse(bytes.NewReader(fileBytes)) + if err != nil { + return "", err + } + + b.dockerfile = ast + + for i, n := range b.dockerfile.Children { if err := b.dispatch(i, n); err != nil { - return err + if b.options.ForceRemove { + b.clearTmp(b.tmpContainers) + } + return "", err + } else if b.options.Remove { + b.clearTmp(b.tmpContainers) } } - return nil + if b.image == "" { + return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") + } + + fmt.Fprintf(b.options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) + return b.image, nil } func initRunConfig() *runconfig.Config { @@ -94,7 +148,7 @@ func initRunConfig() *runconfig.Config { // FIXME(erikh) this should also be a type in runconfig Volumes: map[string]struct{}{}, - Entrypoint: []string{}, + Entrypoint: []string{"/bin/sh", "-c"}, OnBuild: []string{}, } } @@ -102,17 +156,24 @@ func initRunConfig() *runconfig.Config { func (b *buildFile) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value strs := []string{} - for ast.Next != nil { - ast = ast.Next - strs = append(strs, replaceEnv(b, stripQuotes(ast.Value))) + + if cmd == "onbuild" { + fmt.Fprintf(b.options.OutStream, "%#v\n", ast.Next.Children[0].Value) + ast = ast.Next.Children[0] + strs = append(strs, ast.Value) } - fmt.Fprintf(b.outStream, "Step %d : %s\n", i, cmd, expression) + for ast.Next != nil { + ast = ast.Next + strs = append(strs, replaceEnv(b, ast.Value)) + } + + fmt.Fprintf(b.options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " ")) // XXX yes, we skip any cmds that are not valid; the parser should have // picked these out already. if f, ok := evaluateTable[cmd]; ok { - return f(b, strs...) + return f(b, strs) } return nil diff --git a/builder/evaluator/internals.go b/builder/evaluator/internals.go index 8f81624381..719a6d3639 100644 --- a/builder/evaluator/internals.go +++ b/builder/evaluator/internals.go @@ -1,6 +1,33 @@ package evaluator -func (b *buildFile) addContext(context io.Reader) (string, error) { +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon" + imagepkg "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) + +func (b *buildFile) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err @@ -17,7 +44,7 @@ func (b *buildFile) addContext(context io.Reader) (string, error) { } b.contextPath = tmpdirPath - return tmpdirPath + return nil } func (b *buildFile) commit(id string, autoCmd []string, comment string) error { @@ -38,15 +65,15 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { return nil } - container, warnings, err := b.daemon.Create(b.config, "") + container, warnings, err := b.options.Daemon.Create(b.config, "") if err != nil { return err } for _, warning := range warnings { - fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) + fmt.Fprintf(b.options.OutStream, " ---> [Warning] %s\n", warning) } b.tmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) + fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) id = container.ID if err := container.Mount(); err != nil { @@ -54,7 +81,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { } defer container.Unmount() } - container := b.daemon.Get(id) + container := b.options.Daemon.Get(id) if container == nil { return fmt.Errorf("An error occured while creating the container") } @@ -63,7 +90,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { autoConfig := *b.config autoConfig.Cmd = autoCmd // Commit the container - image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + image, err := b.options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) if err != nil { return err } @@ -72,24 +99,17 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { return nil } -func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error { +func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { + + if len(args) != 2 { return fmt.Errorf("Invalid %s format", cmdName) } - orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) - if err != nil { - return err - } - - dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) - if err != nil { - return err - } + orig := args[0] + dest := args[1] cmd := b.config.Cmd b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} @@ -178,7 +198,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp } // Hash path and check the cache - if b.utilizeCache { + if b.options.UtilizeCache { var ( hash string sums = b.context.GetSums() @@ -222,7 +242,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp } // Create the container - container, _, err := b.daemon.Create(b.config, "") + container, _, err := b.options.Daemon.Create(b.config, "") if err != nil { return err } @@ -245,3 +265,295 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp } return nil } + +func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) { + remote, tag := parsers.ParseRepositoryTag(name) + pullRegistryAuth := b.options.AuthConfig + if len(b.options.AuthConfigFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return nil, err + } + resolvedAuth := b.options.AuthConfigFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } + job := b.options.Engine.Job("pull", remote, tag) + job.SetenvBool("json", b.options.StreamFormatter.Json()) + job.SetenvBool("parallel", true) + job.SetenvJson("authConfig", pullRegistryAuth) + job.Stdout.Add(b.options.OutOld) + if err := job.Run(); err != nil { + return nil, err + } + image, err := b.options.Daemon.Repositories().LookupImage(name) + if err != nil { + return nil, err + } + + return image, nil +} + +func (b *buildFile) processImageFrom(img *imagepkg.Image) error { + b.image = img.ID + b.config = &runconfig.Config{} + if img.Config != nil { + b.config = img.Config + } + if b.config.Env == nil || len(b.config.Env) == 0 { + b.config.Env = append(b.config.Env, "PATH="+daemon.DefaultPathEnv) + } + // Process ONBUILD triggers if they exist + if nTriggers := len(b.config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.options.ErrStream, "# Executing %d build triggers\n", nTriggers) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. + onBuildTriggers := b.config.OnBuild + b.config.OnBuild = []string{} + + // FIXME rewrite this so that builder/parser is used; right now steps in + // onbuild are muted because we have no good way to represent the step + // number + for _, step := range onBuildTriggers { + splitStep := strings.Split(step, " ") + stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) + switch stepInstruction { + case "ONBUILD": + return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) + case "MAINTAINER", "FROM": + return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) + } + + // FIXME we have to run the evaluator manually here. This does not belong + // in this function. + + if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok { + if err := f(b, splitStep[1:]); err != nil { + return err + } + } else { + return fmt.Errorf("%s doesn't appear to be a valid Dockerfile instruction", splitStep[0]) + } + } + + return nil +} + +// probeCache checks to see if image-caching is enabled (`b.options.UtilizeCache`) +// and if so attempts to look up the current `b.image` and `b.config` pair +// in the current server `b.options.Daemon`. If an image is found, probeCache returns +// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there +// is any error, it returns `(false, err)`. +func (b *buildFile) probeCache() (bool, error) { + if b.options.UtilizeCache { + if cache, err := b.options.Daemon.ImageGetCached(b.image, b.config); err != nil { + return false, err + } else if cache != nil { + fmt.Fprintf(b.options.OutStream, " ---> Using cache\n") + utils.Debugf("[BUILDER] Use cached version") + b.image = cache.ID + return true, nil + } else { + utils.Debugf("[BUILDER] Cache miss") + } + } + return false, nil +} + +func (b *buildFile) create() (*daemon.Container, error) { + if b.image == "" { + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.config.Image = b.image + + // Create the container + c, _, err := b.options.Daemon.Create(b.config, "") + if err != nil { + return nil, err + } + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + c.Path = b.config.Cmd[0] + c.Args = b.config.Cmd[1:] + + return c, nil +} + +func (b *buildFile) run(c *daemon.Container) error { + var errCh chan error + if b.options.Verbose { + errCh = utils.Go(func() error { + // FIXME: call the 'attach' job so that daemon.Attach can be made private + // + // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach + // but without hijacking for stdin. Also, with attach there can be race + // condition because of some output already was printed before it. + return <-b.options.Daemon.Attach(c, nil, nil, b.options.OutStream, b.options.ErrStream) + }) + } + + //start the container + if err := c.Start(); err != nil { + return err + } + + if errCh != nil { + if err := <-errCh; err != nil { + return err + } + } + + // Wait for it to finish + if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 { + err := &utils.JSONError{ + Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), + Code: ret, + } + return err + } + + return nil +} + +func (b *buildFile) checkPathForAddition(orig string) error { + origPath := path.Join(b.contextPath, orig) + if p, err := filepath.EvalSymlinks(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } else { + origPath = p + } + if !strings.HasPrefix(origPath, b.contextPath) { + return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) + } + _, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} + +func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error { + var ( + err error + destExists = true + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) + ) + + if destPath != container.RootfsPath() { + destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) + if err != nil { + return err + } + } + + // Preserve the trailing '/' + if strings.HasSuffix(dest, "/") || dest == "." { + destPath = destPath + "/" + } + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + destExists = false + } + + fi, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + + if fi.IsDir() { + return copyAsDirectory(origPath, destPath, destExists) + } + + // If we are adding a remote file (or we've been told not to decompress), do not try to untar it + if decompress { + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + if err := archive.UntarPath(origPath, tarDest); err == nil { + return nil + } else if err != io.EOF { + utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + } + } + + if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { + return err + } + if err := archive.CopyWithTar(origPath, destPath); err != nil { + return err + } + + resPath := destPath + if destExists && destStat.IsDir() { + resPath = path.Join(destPath, path.Base(origPath)) + } + + return fixPermissions(resPath, 0, 0) +} + +func copyAsDirectory(source, destination string, destinationExists bool) error { + if err := archive.CopyWithTar(source, destination); err != nil { + return err + } + + if destinationExists { + files, err := ioutil.ReadDir(source) + if err != nil { + return err + } + + for _, file := range files { + if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { + return err + } + } + return nil + } + + return fixPermissions(destination, 0, 0) +} + +func fixPermissions(destination string, uid, gid int) error { + return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { + return err + } + return nil + }) +} + +func (b *buildFile) clearTmp(containers map[string]struct{}) { + for c := range containers { + tmp := b.options.Daemon.Get(c) + if err := b.options.Daemon.Destroy(tmp); err != nil { + fmt.Fprintf(b.options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + } else { + delete(containers, c) + fmt.Fprintf(b.options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + } + } +} diff --git a/builder/evaluator/support.go b/builder/evaluator/support.go index 41f9c6a5ba..da9f64695e 100644 --- a/builder/evaluator/support.go +++ b/builder/evaluator/support.go @@ -6,17 +6,9 @@ import ( ) var ( - TOKEN_ESCAPED_QUOTE = regexp.MustCompile(`\\"`) - TOKEN_ESCAPED_ESCAPE = regexp.MustCompile(`\\\\`) TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") ) -func stripQuotes(str string) string { - str = str[1 : len(str)-1] - str = TOKEN_ESCAPED_QUOTE.ReplaceAllString(str, `"`) - return TOKEN_ESCAPED_ESCAPE.ReplaceAllString(str, `\`) -} - func replaceEnv(b *buildFile, str string) string { for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { match = match[strings.Index(match, "$"):] diff --git a/builder/parser/dumper/main.go b/builder/parser/dumper/main.go index 96c0db2d05..aea7ee74cb 100644 --- a/builder/parser/dumper/main.go +++ b/builder/parser/dumper/main.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/erikh/buildfile/parser" + "github.com/docker/docker/builder/parser" ) func main() { diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index d9716698ec..9ae2a3191f 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -27,13 +27,11 @@ func parseEnv(rest string) (*Node, error) { node := blankNode() rootnode := node strs := TOKEN_WHITESPACE.Split(rest, 2) - node.Value = QuoteString(strs[0]) + node.Value = strs[0] node.Next = blankNode() - node.Next.Value = QuoteString(strs[1]) + node.Next.Value = strs[1] return rootnode, nil - - return node, nil } // parses a whitespace-delimited set of arguments. The result is effectively a @@ -41,18 +39,25 @@ func parseEnv(rest string) (*Node, error) { func parseStringsWhitespaceDelimited(rest string) (*Node, error) { node := blankNode() rootnode := node + prevnode := node for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp - node.Value = QuoteString(str) + prevnode = node + node.Value = str node.Next = blankNode() node = node.Next } + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + return rootnode, nil } // parsestring just wraps the string in quotes and returns a working node. func parseString(rest string) (*Node, error) { - return &Node{QuoteString(rest), nil, nil}, nil + return &Node{rest, nil, nil}, nil } // parseJSON converts JSON arrays to an AST. @@ -61,6 +66,7 @@ func parseJSON(rest string) (*Node, error) { myJson []interface{} next = blankNode() orignext = next + prevnode = next ) if err := json.Unmarshal([]byte(rest), &myJson); err != nil { @@ -72,11 +78,14 @@ func parseJSON(rest string) (*Node, error) { case float64: str = strconv.FormatFloat(str.(float64), 'G', -1, 64) } - next.Value = QuoteString(str.(string)) + next.Value = str.(string) next.Next = blankNode() + prevnode = next next = next.Next } + prevnode.Next = nil + return orignext, nil } @@ -94,6 +103,6 @@ func parseMaybeJSON(rest string) (*Node, error) { } node := blankNode() - node.Value = QuoteString(rest) + node.Value = rest return node, nil } diff --git a/builder/parser/parser.go b/builder/parser/parser.go index c2715d43c7..08f67dbb2c 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -43,7 +43,7 @@ type Node struct { var ( dispatch map[string]func(string) (*Node, error) - TOKEN_WHITESPACE = regexp.MustCompile(`\s+`) + TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`) TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) ) @@ -70,6 +70,7 @@ func init() { "entrypoint": parseMaybeJSON, "expose": parseStringsWhitespaceDelimited, "volume": parseMaybeJSON, + "insert": parseIgnore, } } diff --git a/builder/parser/testfiles/brimstone-docker-consul/result b/builder/parser/testfiles/brimstone-docker-consul/result index 739dc6423f..e7fee03985 100644 --- a/builder/parser/testfiles/brimstone-docker-consul/result +++ b/builder/parser/testfiles/brimstone-docker-consul/result @@ -1,5 +1,5 @@ (from "brimstone/ubuntu:14.04") -(cmd) +(cmd "") (entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") (expose "8500" "8600" "8400" "8301" "8302") (run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists") diff --git a/builder/parser/utils.go b/builder/parser/utils.go index 08357b42db..08d3e454dd 100644 --- a/builder/parser/utils.go +++ b/builder/parser/utils.go @@ -41,7 +41,7 @@ func (node *Node) Dump() string { if len(n.Children) > 0 { str += " " + n.Dump() } else { - str += " " + n.Value + str += " " + QuoteString(n.Value) } } } From 3f5f6b038f56bc89d39a74fbb9c765daf98ae4d8 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Wed, 6 Aug 2014 22:56:44 -0700 Subject: [PATCH 04/14] builder: comments / documentation Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/evaluator/dispatchers.go | 81 +++++++++++++++++++++++- builder/evaluator/evaluator.go | 102 +++++++++++++++++++++++-------- builder/evaluator/internals.go | 3 + builder/evaluator/support.go | 1 + builder/parser/line_parsers.go | 11 ++++ builder/parser/parser.go | 32 +++------- 6 files changed, 181 insertions(+), 49 deletions(-) diff --git a/builder/evaluator/dispatchers.go b/builder/evaluator/dispatchers.go index eefcb629be..e7db2f008b 100644 --- a/builder/evaluator/dispatchers.go +++ b/builder/evaluator/dispatchers.go @@ -1,5 +1,12 @@ package evaluator +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + import ( "fmt" "path/filepath" @@ -10,11 +17,16 @@ import ( "github.com/docker/docker/utils" ) -// dispatch with no layer / parsing. +// dispatch with no layer / parsing. This is effectively not a command. func nullDispatch(b *buildFile, args []string) error { return nil } +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// func env(b *buildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("ENV accepts two arguments") @@ -29,6 +41,9 @@ func env(b *buildFile, args []string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.env[key])) } +// MAINTAINER some text +// +// Sets the maintainer metadata. func maintainer(b *buildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("MAINTAINER requires only one argument") @@ -38,6 +53,11 @@ func maintainer(b *buildFile, args []string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) } +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// func add(b *buildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("ADD requires two arguments") @@ -46,6 +66,10 @@ func add(b *buildFile, args []string) error { return b.runContextCommand(args, true, true, "ADD") } +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// func dispatchCopy(b *buildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("COPY requires two arguments") @@ -54,6 +78,10 @@ func dispatchCopy(b *buildFile, args []string) error { return b.runContextCommand(args, false, false, "COPY") } +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// func from(b *buildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("FROM requires one argument") @@ -77,6 +105,15 @@ func from(b *buildFile, args []string) error { return b.processImageFrom(image) } +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// func onbuild(b *buildFile, args []string) error { triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) switch triggerInstruction { @@ -92,6 +129,10 @@ func onbuild(b *buildFile, args []string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) } +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// func workdir(b *buildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("WORKDIR requires exactly one argument") @@ -111,6 +152,15 @@ func workdir(b *buildFile, args []string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) } +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' in the event there is only one argument. The difference in +// processing: +// +// RUN echo hi # sh -c echo hi +// RUN [ "echo", "hi" ] # echo hi +// func run(b *buildFile, args []string) error { if len(args) == 1 { // literal string command, not an exec array args = append([]string{"/bin/sh", "-c"}, args[0]) @@ -162,6 +212,11 @@ func run(b *buildFile, args []string) error { return nil } +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// func cmd(b *buildFile, args []string) error { if len(args) < 2 { args = append([]string{"/bin/sh", "-c"}, args...) @@ -176,6 +231,14 @@ func cmd(b *buildFile, args []string) error { return nil } +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will +// accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.config.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// func entrypoint(b *buildFile, args []string) error { b.config.Entrypoint = args @@ -189,6 +252,11 @@ func entrypoint(b *buildFile, args []string) error { return nil } +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.config.ExposedPorts for runconfig. +// func expose(b *buildFile, args []string) error { portsTab := args @@ -211,6 +279,11 @@ func expose(b *buildFile, args []string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) } +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// func user(b *buildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("USER requires exactly one argument") @@ -220,6 +293,11 @@ func user(b *buildFile, args []string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) } +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON form, but either +// way requires exactly one argument. +// func volume(b *buildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("Volume cannot be empty") @@ -239,6 +317,7 @@ func volume(b *buildFile, args []string) error { return nil } +// INSERT is no longer accepted, but we still parse it. func insert(b *buildFile, args []string) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } diff --git a/builder/evaluator/evaluator.go b/builder/evaluator/evaluator.go index 7b74db1d86..2b22d47317 100644 --- a/builder/evaluator/evaluator.go +++ b/builder/evaluator/evaluator.go @@ -1,3 +1,22 @@ +// evaluator is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of resposibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). package evaluator import ( @@ -49,32 +68,40 @@ func init() { type envMap map[string]string type uniqueMap map[string]struct{} +// internal struct, used to maintain configuration of the Dockerfile's +// processing as it evaluates the parsing result. type buildFile struct { - dockerfile *parser.Node - env envMap - image string - config *runconfig.Config - options *BuildOpts - maintainer string + dockerfile *parser.Node // the syntax tree of the dockerfile + env envMap // map of environment variables + image string // image name for commit processing + config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + options *BuildOpts // see below + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + context *tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) - // cmdSet indicates is CMD was set in current Dockerfile - cmdSet bool - - context *tarsum.TarSum - contextPath string - tmpContainers uniqueMap - tmpImages uniqueMap + // both of these are controlled by the Remove and ForceRemove options in BuildOpts + tmpContainers uniqueMap // a map of containers used for removes + tmpImages uniqueMap // a map of images used for removes } type BuildOpts struct { - Daemon *daemon.Daemon - Engine *engine.Engine - OutStream io.Writer - ErrStream io.Writer - Verbose bool - UtilizeCache bool - Remove bool - ForceRemove bool + Daemon *daemon.Daemon + Engine *engine.Engine + + // effectively stdio for the run. Because it is not stdio, I said + // "Effectively". Do not use stdio anywhere in this package for any reason. + OutStream io.Writer + ErrStream io.Writer + + Verbose bool + UtilizeCache bool + + // controls how images and containers are handled between steps. + Remove bool + ForceRemove bool + AuthConfig *registry.AuthConfig AuthConfigFile *registry.ConfigFile @@ -83,6 +110,7 @@ type BuildOpts struct { StreamFormatter *utils.StreamFormatter } +// Create a new builder. func NewBuilder(opts *BuildOpts) (*buildFile, error) { return &buildFile{ dockerfile: nil, @@ -94,10 +122,20 @@ func NewBuilder(opts *BuildOpts) (*buildFile, error) { }, nil } +// Run the builder with the context. This is the lynchpin of this package. This +// will (barring errors): +// +// * call readContext() which will set up the temporary directory and unpack +// the context into it. +// * read the dockerfile +// * parse the dockerfile +// * walk the parse tree and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Print a happy message and return the image ID. +// func (b *buildFile) Run(context io.Reader) (string, error) { - err := b.readContext(context) - - if err != nil { + if err := b.readContext(context); err != nil { return "", err } @@ -131,7 +169,7 @@ func (b *buildFile) Run(context io.Reader) (string, error) { } if b.image == "" { - return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n") } fmt.Fprintf(b.options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) @@ -153,6 +191,20 @@ func initRunConfig() *runconfig.Config { } } +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the buildFile object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. func (b *buildFile) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value strs := []string{} diff --git a/builder/evaluator/internals.go b/builder/evaluator/internals.go index 719a6d3639..b55b0b967c 100644 --- a/builder/evaluator/internals.go +++ b/builder/evaluator/internals.go @@ -1,5 +1,8 @@ package evaluator +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + import ( "crypto/sha256" "encoding/hex" diff --git a/builder/evaluator/support.go b/builder/evaluator/support.go index da9f64695e..21dd7ccd3c 100644 --- a/builder/evaluator/support.go +++ b/builder/evaluator/support.go @@ -9,6 +9,7 @@ var ( TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") ) +// handle environment replacement. Used in dispatcher. func replaceEnv(b *buildFile, str string) string { for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { match = match[strings.Index(match, "$"):] diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 9ae2a3191f..71e704791f 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -1,5 +1,11 @@ package parser +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + import ( "encoding/json" "strconv" @@ -12,6 +18,11 @@ func parseIgnore(rest string) (*Node, error) { return blankNode(), nil } +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// func parseSubCommand(rest string) (*Node, error) { _, child, err := parseLine(rest) if err != nil { diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 08f67dbb2c..03196c7da9 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -8,32 +8,17 @@ import ( "strings" ) -// Node is the building block of the AST this package will create. +// Node is a structure used to represent a parse tree. // -// Nodes are structured to have a value, next, and child, the latter two of -// which are Nodes themselves. +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: // -// This terminology is unfortunately rather confusing, so here's a diagram. -// Anything after the ; is a comment. +// (value next (child child-next child-next-next) next-next) // -// ( -// (run "foo") ; value run, and next is a value foo. -// (run "1" "2" "3") ; -// (something (really cool)) -// ) -// -// Will give you something like this: -// -// &Node{ -// Value:"", -// Child: &Node{Value: "run", Next: &Node{Value: "foo"}, Child: nil}, -// Next: &Node{Value:"", Child: &Node{Value:"run", Next: &Node{Value:`"1"`.... -// -// ... and so on. -// -// The short and fast rule is that anything that starts with ( is a child of -// something. Anything which follows a previous statement is a next of -// something. +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. // type Node struct { Value string // actual content @@ -79,6 +64,7 @@ func blankNode() *Node { return &Node{"", nil, []*Node{}} } +// parse a line and return the remainder. func parseLine(line string) (string, *Node, error) { if line = stripComments(line); line == "" { return "", nil, nil From 4122a981ae12851f71bbf6ec46e03bba288e4143 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Thu, 7 Aug 2014 00:42:10 -0700 Subject: [PATCH 05/14] builder: negative test support, fix for shykes's broken dockerfile Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/parser/line_parsers.go | 9 ++++++ builder/parser/parser_test.go | 29 +++++++++++++++++-- .../shykes-nested-json/Dockerfile | 1 + 3 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 builder/parser/testfiles-negative/shykes-nested-json/Dockerfile diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 71e704791f..ec748106fc 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -8,10 +8,15 @@ package parser import ( "encoding/json" + "errors" "strconv" "strings" ) +var ( + dockerFileErrJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.") +) + // ignore the current argument. This will still leave a command parsed, but // will not incorporate the arguments into the ast. func parseIgnore(rest string) (*Node, error) { @@ -86,6 +91,8 @@ func parseJSON(rest string) (*Node, error) { for _, str := range myJson { switch str.(type) { + case []interface{}: + return nil, dockerFileErrJSONNesting case float64: str = strconv.FormatFloat(str.(float64), 'G', -1, 64) } @@ -110,6 +117,8 @@ func parseMaybeJSON(rest string) (*Node, error) { node, err := parseJSON(rest) if err == nil { return node, nil + } else if err == dockerFileErrJSONNesting { + return nil, err } } diff --git a/builder/parser/parser_test.go b/builder/parser/parser_test.go index 1482a011fe..3969b01fca 100644 --- a/builder/parser/parser_test.go +++ b/builder/parser/parser_test.go @@ -8,9 +8,10 @@ import ( ) const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" -func TestTestData(t *testing.T) { - f, err := os.Open(testDir) +func getDirs(t *testing.T, dir string) []os.FileInfo { + f, err := os.Open(dir) if err != nil { t.Fatal(err) } @@ -22,7 +23,29 @@ func TestTestData(t *testing.T) { t.Fatal(err) } - for _, dir := range dirs { + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error()) + } + + _, err = Parse(df) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s: %s", dir.Name(), err.Error()) + } + + df.Close() + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile") resultfile := filepath.Join(testDir, dir.Name(), "result") diff --git a/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000000..d1be4596c7 --- /dev/null +++ b/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] From 21b15ac920d5e51f636d7febf07a4a52f5e61bff Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 8 Aug 2014 13:44:57 -0700 Subject: [PATCH 06/14] builder: handle certain classes of JSON errors gracefully Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/parser/line_parsers.go | 6 ++++-- builder/parser/parser_test.go | 2 +- .../Dockerfile | 1 + .../jeztah-invalid-json-json-inside-string-double/result | 1 + .../jeztah-invalid-json-json-inside-string/Dockerfile | 1 + .../testfiles/jeztah-invalid-json-json-inside-string/result | 1 + .../testfiles/jeztah-invalid-json-single-quotes/Dockerfile | 1 + .../testfiles/jeztah-invalid-json-single-quotes/result | 1 + .../jeztah-invalid-json-unterminated-bracket/Dockerfile | 1 + .../jeztah-invalid-json-unterminated-bracket/result | 1 + .../jeztah-invalid-json-unterminated-string/Dockerfile | 1 + .../jeztah-invalid-json-unterminated-string/result | 1 + 12 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index ec748106fc..1e460697ca 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -91,10 +91,11 @@ func parseJSON(rest string) (*Node, error) { for _, str := range myJson { switch str.(type) { - case []interface{}: - return nil, dockerFileErrJSONNesting + case string: case float64: str = strconv.FormatFloat(str.(float64), 'G', -1, 64) + default: + return nil, dockerFileErrJSONNesting } next.Value = str.(string) next.Next = blankNode() @@ -115,6 +116,7 @@ func parseMaybeJSON(rest string) (*Node, error) { if strings.HasPrefix(rest, "[") { node, err := parseJSON(rest) + if err == nil { return node, nil } else if err == dockerFileErrJSONNesting { diff --git a/builder/parser/parser_test.go b/builder/parser/parser_test.go index 3969b01fca..871da477c1 100644 --- a/builder/parser/parser_test.go +++ b/builder/parser/parser_test.go @@ -37,7 +37,7 @@ func TestTestNegative(t *testing.T) { _, err = Parse(df) if err == nil { - t.Fatalf("No error parsing broken dockerfile for %s: %s", dir.Name(), err.Error()) + t.Fatalf("No error parsing broken dockerfile for %s", dir.Name()) } df.Close() diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000000..39fe27d99c --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000000..bfd84ae489 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000000..eaae081a06 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000000..f8f7b5017b --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000000..c3ac63c07a --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000000..0623f8bf45 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000000..5fd4afa522 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000000..d621ddcff3 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") \ No newline at end of file diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000000..30cc4bb48f --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000000..acedd80c45 --- /dev/null +++ b/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") \ No newline at end of file From 135f54ccbfb6f638b4ab0be67a215adbad246ecb Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sun, 10 Aug 2014 04:01:10 -0700 Subject: [PATCH 07/14] builder: Remove blankNode(), use &Node{} instead. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/parser/line_parsers.go | 16 ++++++++-------- builder/parser/parser.go | 9 ++------- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 1e460697ca..ff1f3483e9 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -20,7 +20,7 @@ var ( // ignore the current argument. This will still leave a command parsed, but // will not incorporate the arguments into the ast. func parseIgnore(rest string) (*Node, error) { - return blankNode(), nil + return &Node{}, nil } // used for onbuild. Could potentially be used for anything that represents a @@ -40,11 +40,11 @@ func parseSubCommand(rest string) (*Node, error) { // parse environment like statements. Note that this does *not* handle // variable interpolation, which will be handled in the evaluator. func parseEnv(rest string) (*Node, error) { - node := blankNode() + node := &Node{} rootnode := node strs := TOKEN_WHITESPACE.Split(rest, 2) node.Value = strs[0] - node.Next = blankNode() + node.Next = &Node{} node.Next.Value = strs[1] return rootnode, nil @@ -53,13 +53,13 @@ func parseEnv(rest string) (*Node, error) { // parses a whitespace-delimited set of arguments. The result is effectively a // linked list of string arguments. func parseStringsWhitespaceDelimited(rest string) (*Node, error) { - node := blankNode() + node := &Node{} rootnode := node prevnode := node for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp prevnode = node node.Value = str - node.Next = blankNode() + node.Next = &Node{} node = node.Next } @@ -80,7 +80,7 @@ func parseString(rest string) (*Node, error) { func parseJSON(rest string) (*Node, error) { var ( myJson []interface{} - next = blankNode() + next = &Node{} orignext = next prevnode = next ) @@ -98,7 +98,7 @@ func parseJSON(rest string) (*Node, error) { return nil, dockerFileErrJSONNesting } next.Value = str.(string) - next.Next = blankNode() + next.Next = &Node{} prevnode = next next = next.Next } @@ -124,7 +124,7 @@ func parseMaybeJSON(rest string) (*Node, error) { } } - node := blankNode() + node := &Node{} node.Value = rest return node, nil } diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 03196c7da9..17e0f72aae 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -59,11 +59,6 @@ func init() { } } -// empty node. Useful for managing structure. -func blankNode() *Node { - return &Node{"", nil, []*Node{}} -} - // parse a line and return the remainder. func parseLine(line string) (string, *Node, error) { if line = stripComments(line); line == "" { @@ -77,7 +72,7 @@ func parseLine(line string) (string, *Node, error) { cmd, args := splitCommand(line) - node := blankNode() + node := &Node{} node.Value = cmd sexp, err := fullDispatch(cmd, args) @@ -96,7 +91,7 @@ func Parse(rwc io.Reader) (*Node, error) { var child *Node var line string var err error - root := blankNode() + root := &Node{} scanner := bufio.NewScanner(rwc) for scanner.Scan() { From 248f4c4f751b76f330278b04d2297bfcf97eec48 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sun, 10 Aug 2014 04:05:34 -0700 Subject: [PATCH 08/14] builder/parser: Rewrite Parse() to use := instead of var Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/parser/parser.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 17e0f72aae..cb9d28206d 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -88,14 +88,11 @@ func parseLine(line string) (string, *Node, error) { // The main parse routine. Handles an io.ReadWriteCloser and returns the root // of the AST. func Parse(rwc io.Reader) (*Node, error) { - var child *Node - var line string - var err error root := &Node{} scanner := bufio.NewScanner(rwc) for scanner.Scan() { - line, child, err = parseLine(strings.TrimSpace(scanner.Text())) + line, child, err := parseLine(strings.TrimSpace(scanner.Text())) if err != nil { return nil, err } From 3dfe5ddfb98a36ddcf2480720255a3a00f5fbc1d Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sun, 10 Aug 2014 04:07:28 -0700 Subject: [PATCH 09/14] builder: Remove spurious .gitignores Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/.gitignore | 2 -- builder/parser/dumper/.gitignore | 2 -- 2 files changed, 4 deletions(-) delete mode 100644 builder/.gitignore delete mode 100644 builder/parser/dumper/.gitignore diff --git a/builder/.gitignore b/builder/.gitignore deleted file mode 100644 index 34b1c07205..0000000000 --- a/builder/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -main -gopath diff --git a/builder/parser/dumper/.gitignore b/builder/parser/dumper/.gitignore deleted file mode 100644 index d80aecaec6..0000000000 --- a/builder/parser/dumper/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -dumper -Dockerfile From 1ae4c00a19edb7d3b41c050489e56339833d8e01 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Mon, 11 Aug 2014 08:44:31 -0700 Subject: [PATCH 10/14] builder: fix references to jobs in daemon, make builder a first class package referring to evaluator Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/builder.go | 34 + builder/evaluator/dispatchers.go | 106 ++-- builder/evaluator/evaluator.go | 88 +-- builder/evaluator/internals.go | 130 ++-- builder/evaluator/support.go | 4 +- builder/job.go | 119 ++++ daemon/build.go | 1006 ------------------------------ daemon/daemon.go | 1 - docker/daemon.go | 5 + 9 files changed, 309 insertions(+), 1184 deletions(-) create mode 100644 builder/builder.go create mode 100644 builder/job.go delete mode 100644 daemon/build.go diff --git a/builder/builder.go b/builder/builder.go new file mode 100644 index 0000000000..1720b7b99f --- /dev/null +++ b/builder/builder.go @@ -0,0 +1,34 @@ +package builder + +import ( + "github.com/docker/docker/builder/evaluator" + "github.com/docker/docker/nat" + "github.com/docker/docker/runconfig" +) + +// Create a new builder. +func NewBuilder(opts *evaluator.BuildOpts) *evaluator.BuildFile { + return &evaluator.BuildFile{ + Dockerfile: nil, + Env: evaluator.EnvMap{}, + Config: initRunConfig(), + Options: opts, + TmpContainers: evaluator.UniqueMap{}, + TmpImages: evaluator.UniqueMap{}, + } +} + +func initRunConfig() *runconfig.Config { + return &runconfig.Config{ + PortSpecs: []string{}, + // FIXME(erikh) this should be a type that lives in runconfig + ExposedPorts: map[nat.Port]struct{}{}, + Env: []string{}, + Cmd: []string{}, + + // FIXME(erikh) this should also be a type in runconfig + Volumes: map[string]struct{}{}, + Entrypoint: []string{"/bin/sh", "-c"}, + OnBuild: []string{}, + } +} diff --git a/builder/evaluator/dispatchers.go b/builder/evaluator/dispatchers.go index e7db2f008b..d05777981e 100644 --- a/builder/evaluator/dispatchers.go +++ b/builder/evaluator/dispatchers.go @@ -18,7 +18,7 @@ import ( ) // dispatch with no layer / parsing. This is effectively not a command. -func nullDispatch(b *buildFile, args []string) error { +func nullDispatch(b *BuildFile, args []string) error { return nil } @@ -27,7 +27,7 @@ func nullDispatch(b *buildFile, args []string) error { // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // -func env(b *buildFile, args []string) error { +func env(b *BuildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("ENV accepts two arguments") } @@ -35,22 +35,22 @@ func env(b *buildFile, args []string) error { // the duplication here is intended to ease the replaceEnv() call's env // handling. This routine gets much shorter with the denormalization here. key := args[0] - b.env[key] = args[1] - b.config.Env = append(b.config.Env, strings.Join([]string{key, b.env[key]}, "=")) + b.Env[key] = args[1] + b.Config.Env = append(b.Config.Env, strings.Join([]string{key, b.Env[key]}, "=")) - return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.env[key])) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.Env[key])) } // MAINTAINER some text // // Sets the maintainer metadata. -func maintainer(b *buildFile, args []string) error { +func maintainer(b *BuildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("MAINTAINER requires only one argument") } b.maintainer = args[0] - return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) + return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) } // ADD foo /path @@ -58,7 +58,7 @@ func maintainer(b *buildFile, args []string) error { // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // -func add(b *buildFile, args []string) error { +func add(b *BuildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("ADD requires two arguments") } @@ -70,7 +70,7 @@ func add(b *buildFile, args []string) error { // // Same as 'ADD' but without the tar and remote url handling. // -func dispatchCopy(b *buildFile, args []string) error { +func dispatchCopy(b *BuildFile, args []string) error { if len(args) != 2 { return fmt.Errorf("COPY requires two arguments") } @@ -82,16 +82,16 @@ func dispatchCopy(b *buildFile, args []string) error { // // This sets the image the dockerfile will build on top of. // -func from(b *buildFile, args []string) error { +func from(b *BuildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("FROM requires one argument") } name := args[0] - image, err := b.options.Daemon.Repositories().LookupImage(name) + image, err := b.Options.Daemon.Repositories().LookupImage(name) if err != nil { - if b.options.Daemon.Graph().IsNotExist(err) { + if b.Options.Daemon.Graph().IsNotExist(err) { image, err = b.pullImage(name) } @@ -114,7 +114,7 @@ func from(b *buildFile, args []string) error { // special cases. search for 'OnBuild' in internals.go for additional special // cases. // -func onbuild(b *buildFile, args []string) error { +func onbuild(b *BuildFile, args []string) error { triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) switch triggerInstruction { case "ONBUILD": @@ -125,15 +125,15 @@ func onbuild(b *buildFile, args []string) error { trigger := strings.Join(args, " ") - b.config.OnBuild = append(b.config.OnBuild, trigger) - return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) + b.Config.OnBuild = append(b.Config.OnBuild, trigger) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) } // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. // -func workdir(b *buildFile, args []string) error { +func workdir(b *BuildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("WORKDIR requires exactly one argument") } @@ -141,15 +141,15 @@ func workdir(b *buildFile, args []string) error { workdir := args[0] if workdir[0] == '/' { - b.config.WorkingDir = workdir + b.Config.WorkingDir = workdir } else { - if b.config.WorkingDir == "" { - b.config.WorkingDir = "/" + if b.Config.WorkingDir == "" { + b.Config.WorkingDir = "/" } - b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir) + b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir) } - return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) + return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) } // RUN some command yo @@ -161,7 +161,7 @@ func workdir(b *buildFile, args []string) error { // RUN echo hi # sh -c echo hi // RUN [ "echo", "hi" ] # echo hi // -func run(b *buildFile, args []string) error { +func run(b *BuildFile, args []string) error { if len(args) == 1 { // literal string command, not an exec array args = append([]string{"/bin/sh", "-c"}, args[0]) } @@ -175,14 +175,14 @@ func run(b *buildFile, args []string) error { return err } - cmd := b.config.Cmd + cmd := b.Config.Cmd // set Cmd manually, this is special case only for Dockerfiles - b.config.Cmd = config.Cmd - runconfig.Merge(b.config, config) + b.Config.Cmd = config.Cmd + runconfig.Merge(b.Config, config) - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) - utils.Debugf("Command to be executed: %v", b.config.Cmd) + utils.Debugf("Command to be executed: %v", b.Config.Cmd) hit, err := b.probeCache() if err != nil { @@ -217,13 +217,13 @@ func run(b *buildFile, args []string) error { // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func cmd(b *buildFile, args []string) error { +func cmd(b *BuildFile, args []string) error { if len(args) < 2 { args = append([]string{"/bin/sh", "-c"}, args...) } - b.config.Cmd = args - if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { + b.Config.Cmd = args + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { return err } @@ -236,17 +236,17 @@ func cmd(b *buildFile, args []string) error { // Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will // accept the CMD as the arguments to /usr/sbin/nginx. // -// Handles command processing similar to CMD and RUN, only b.config.Entrypoint +// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint // is initialized at NewBuilder time instead of through argument parsing. // -func entrypoint(b *buildFile, args []string) error { - b.config.Entrypoint = args +func entrypoint(b *BuildFile, args []string) error { + b.Config.Entrypoint = args // if there is no cmd in current Dockerfile - cleanup cmd if !b.cmdSet { - b.config.Cmd = nil + b.Config.Cmd = nil } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { return err } return nil @@ -255,28 +255,28 @@ func entrypoint(b *buildFile, args []string) error { // EXPOSE 6667/tcp 7000/tcp // // Expose ports for links and port mappings. This all ends up in -// b.config.ExposedPorts for runconfig. +// b.Config.ExposedPorts for runconfig. // -func expose(b *buildFile, args []string) error { +func expose(b *BuildFile, args []string) error { portsTab := args - if b.config.ExposedPorts == nil { - b.config.ExposedPorts = make(nat.PortSet) + if b.Config.ExposedPorts == nil { + b.Config.ExposedPorts = make(nat.PortSet) } - ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...)) if err != nil { return err } for port := range ports { - if _, exists := b.config.ExposedPorts[port]; !exists { - b.config.ExposedPorts[port] = struct{}{} + if _, exists := b.Config.ExposedPorts[port]; !exists { + b.Config.ExposedPorts[port] = struct{}{} } } - b.config.PortSpecs = nil + b.Config.PortSpecs = nil - return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) + return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) } // USER foo @@ -284,13 +284,13 @@ func expose(b *buildFile, args []string) error { // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // -func user(b *buildFile, args []string) error { +func user(b *BuildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("USER requires exactly one argument") } - b.config.User = args[0] - return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) + b.Config.User = args[0] + return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args)) } // VOLUME /foo @@ -298,26 +298,26 @@ func user(b *buildFile, args []string) error { // Expose the volume /foo for use. Will also accept the JSON form, but either // way requires exactly one argument. // -func volume(b *buildFile, args []string) error { +func volume(b *BuildFile, args []string) error { if len(args) != 1 { return fmt.Errorf("Volume cannot be empty") } volume := args - if b.config.Volumes == nil { - b.config.Volumes = map[string]struct{}{} + if b.Config.Volumes == nil { + b.Config.Volumes = map[string]struct{}{} } for _, v := range volume { - b.config.Volumes[v] = struct{}{} + b.Config.Volumes[v] = struct{}{} } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { return err } return nil } // INSERT is no longer accepted, but we still parse it. -func insert(b *buildFile, args []string) error { +func insert(b *BuildFile, args []string) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } diff --git a/builder/evaluator/evaluator.go b/builder/evaluator/evaluator.go index 2b22d47317..2eb2ba8b36 100644 --- a/builder/evaluator/evaluator.go +++ b/builder/evaluator/evaluator.go @@ -32,21 +32,23 @@ import ( "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" - "github.com/docker/docker/nat" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) +type EnvMap map[string]string +type UniqueMap map[string]struct{} + var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) -var evaluateTable map[string]func(*buildFile, []string) error +var evaluateTable map[string]func(*BuildFile, []string) error func init() { - evaluateTable = map[string]func(*buildFile, []string) error{ + evaluateTable = map[string]func(*BuildFile, []string) error{ "env": env, "maintainer": maintainer, "add": add, @@ -65,25 +67,24 @@ func init() { } } -type envMap map[string]string -type uniqueMap map[string]struct{} - // internal struct, used to maintain configuration of the Dockerfile's // processing as it evaluates the parsing result. -type buildFile struct { - dockerfile *parser.Node // the syntax tree of the dockerfile - env envMap // map of environment variables - image string // image name for commit processing - config *runconfig.Config // runconfig for cmd, run, entrypoint etc. - options *BuildOpts // see below - maintainer string // maintainer name. could probably be removed. - cmdSet bool // indicates is CMD was set in current Dockerfile - context *tarsum.TarSum // the context is a tarball that is uploaded by the client - contextPath string // the path of the temporary directory the local context is unpacked to (server side) +type BuildFile struct { + Dockerfile *parser.Node // the syntax tree of the dockerfile + Env EnvMap // map of environment variables + Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + Options *BuildOpts // see below // both of these are controlled by the Remove and ForceRemove options in BuildOpts - tmpContainers uniqueMap // a map of containers used for removes - tmpImages uniqueMap // a map of images used for removes + TmpContainers UniqueMap // a map of containers used for removes + TmpImages UniqueMap // a map of images used for removes + + image string // image name for commit processing + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + context *tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) + } type BuildOpts struct { @@ -110,18 +111,6 @@ type BuildOpts struct { StreamFormatter *utils.StreamFormatter } -// Create a new builder. -func NewBuilder(opts *BuildOpts) (*buildFile, error) { - return &buildFile{ - dockerfile: nil, - env: envMap{}, - config: initRunConfig(), - options: opts, - tmpContainers: make(uniqueMap), - tmpImages: make(uniqueMap), - }, nil -} - // Run the builder with the context. This is the lynchpin of this package. This // will (barring errors): // @@ -134,7 +123,7 @@ func NewBuilder(opts *BuildOpts) (*buildFile, error) { // processing. // * Print a happy message and return the image ID. // -func (b *buildFile) Run(context io.Reader) (string, error) { +func (b *BuildFile) Run(context io.Reader) (string, error) { if err := b.readContext(context); err != nil { return "", err } @@ -155,16 +144,16 @@ func (b *buildFile) Run(context io.Reader) (string, error) { return "", err } - b.dockerfile = ast + b.Dockerfile = ast - for i, n := range b.dockerfile.Children { + for i, n := range b.Dockerfile.Children { if err := b.dispatch(i, n); err != nil { - if b.options.ForceRemove { - b.clearTmp(b.tmpContainers) + if b.Options.ForceRemove { + b.clearTmp(b.TmpContainers) } return "", err - } else if b.options.Remove { - b.clearTmp(b.tmpContainers) + } else if b.Options.Remove { + b.clearTmp(b.TmpContainers) } } @@ -172,32 +161,17 @@ func (b *buildFile) Run(context io.Reader) (string, error) { return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n") } - fmt.Fprintf(b.options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) + fmt.Fprintf(b.Options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) return b.image, nil } -func initRunConfig() *runconfig.Config { - return &runconfig.Config{ - PortSpecs: []string{}, - // FIXME(erikh) this should be a type that lives in runconfig - ExposedPorts: map[nat.Port]struct{}{}, - Env: []string{}, - Cmd: []string{}, - - // FIXME(erikh) this should also be a type in runconfig - Volumes: map[string]struct{}{}, - Entrypoint: []string{"/bin/sh", "-c"}, - OnBuild: []string{}, - } -} - // This method is the entrypoint to all statement handling routines. // // Almost all nodes will have this structure: // Child[Node, Node, Node] where Child is from parser.Node.Children and each // node comes from parser.Node.Next. This forms a "line" with a statement and // arguments and we process them in this normalized form by hitting -// evaluateTable with the leaf nodes of the command and the buildFile object. +// evaluateTable with the leaf nodes of the command and the BuildFile object. // // ONBUILD is a special case; in this case the parser will emit: // Child[Node, Child[Node, Node...]] where the first node is the literal @@ -205,12 +179,12 @@ func initRunConfig() *runconfig.Config { // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to // deal with that, at least until it becomes more of a general concern with new // features. -func (b *buildFile) dispatch(stepN int, ast *parser.Node) error { +func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value strs := []string{} if cmd == "onbuild" { - fmt.Fprintf(b.options.OutStream, "%#v\n", ast.Next.Children[0].Value) + fmt.Fprintf(b.Options.OutStream, "%#v\n", ast.Next.Children[0].Value) ast = ast.Next.Children[0] strs = append(strs, ast.Value) } @@ -220,7 +194,7 @@ func (b *buildFile) dispatch(stepN int, ast *parser.Node) error { strs = append(strs, replaceEnv(b, ast.Value)) } - fmt.Fprintf(b.options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " ")) + fmt.Fprintf(b.Options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " ")) // XXX yes, we skip any cmds that are not valid; the parser should have // picked these out already. diff --git a/builder/evaluator/internals.go b/builder/evaluator/internals.go index b55b0b967c..5ceb2f88a2 100644 --- a/builder/evaluator/internals.go +++ b/builder/evaluator/internals.go @@ -30,7 +30,7 @@ import ( "github.com/docker/docker/utils" ) -func (b *buildFile) readContext(context io.Reader) error { +func (b *BuildFile) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err @@ -50,15 +50,15 @@ func (b *buildFile) readContext(context io.Reader) error { return nil } -func (b *buildFile) commit(id string, autoCmd []string, comment string) error { +func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to commit") } - b.config.Image = b.image + b.Config.Image = b.image if id == "" { - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { @@ -68,15 +68,15 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { return nil } - container, warnings, err := b.options.Daemon.Create(b.config, "") + container, warnings, err := b.Options.Daemon.Create(b.Config, "") if err != nil { return err } for _, warning := range warnings { - fmt.Fprintf(b.options.OutStream, " ---> [Warning] %s\n", warning) + fmt.Fprintf(b.Options.OutStream, " ---> [Warning] %s\n", warning) } - b.tmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) + b.TmpContainers[container.ID] = struct{}{} + fmt.Fprintf(b.Options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) id = container.ID if err := container.Mount(); err != nil { @@ -84,25 +84,25 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { } defer container.Unmount() } - container := b.options.Daemon.Get(id) + container := b.Options.Daemon.Get(id) if container == nil { return fmt.Errorf("An error occured while creating the container") } // Note: Actually copy the struct - autoConfig := *b.config + autoConfig := *b.Config autoConfig.Cmd = autoCmd // Commit the container - image, err := b.options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + image, err := b.Options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) if err != nil { return err } - b.tmpImages[image.ID] = struct{}{} + b.TmpImages[image.ID] = struct{}{} b.image = image.ID return nil } -func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { +func (b *BuildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } @@ -114,10 +114,10 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco orig := args[0] dest := args[1] - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - b.config.Image = b.image + cmd := b.Config.Cmd + b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} + defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + b.Config.Image = b.image var ( origPath = orig @@ -201,7 +201,7 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco } // Hash path and check the cache - if b.options.UtilizeCache { + if b.Options.UtilizeCache { var ( hash string sums = b.context.GetSums() @@ -233,7 +233,7 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco hash = "file:" + h } } - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} + b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} hit, err := b.probeCache() if err != nil { return err @@ -245,11 +245,11 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco } // Create the container - container, _, err := b.options.Daemon.Create(b.config, "") + container, _, err := b.Options.Daemon.Create(b.Config, "") if err != nil { return err } - b.tmpContainers[container.ID] = struct{}{} + b.TmpContainers[container.ID] = struct{}{} if err := container.Mount(); err != nil { return err @@ -269,27 +269,27 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco return nil } -func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) { +func (b *BuildFile) pullImage(name string) (*imagepkg.Image, error) { remote, tag := parsers.ParseRepositoryTag(name) - pullRegistryAuth := b.options.AuthConfig - if len(b.options.AuthConfigFile.Configs) > 0 { + pullRegistryAuth := b.Options.AuthConfig + if len(b.Options.AuthConfigFile.Configs) > 0 { // The request came with a full auth config file, we prefer to use that endpoint, _, err := registry.ResolveRepositoryName(remote) if err != nil { return nil, err } - resolvedAuth := b.options.AuthConfigFile.ResolveAuthConfig(endpoint) + resolvedAuth := b.Options.AuthConfigFile.ResolveAuthConfig(endpoint) pullRegistryAuth = &resolvedAuth } - job := b.options.Engine.Job("pull", remote, tag) - job.SetenvBool("json", b.options.StreamFormatter.Json()) + job := b.Options.Engine.Job("pull", remote, tag) + job.SetenvBool("json", b.Options.StreamFormatter.Json()) job.SetenvBool("parallel", true) job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.options.OutOld) + job.Stdout.Add(b.Options.OutOld) if err := job.Run(); err != nil { return nil, err } - image, err := b.options.Daemon.Repositories().LookupImage(name) + image, err := b.Options.Daemon.Repositories().LookupImage(name) if err != nil { return nil, err } @@ -297,23 +297,23 @@ func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) { return image, nil } -func (b *buildFile) processImageFrom(img *imagepkg.Image) error { +func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { b.image = img.ID - b.config = &runconfig.Config{} + b.Config = &runconfig.Config{} if img.Config != nil { - b.config = img.Config + b.Config = img.Config } - if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "PATH="+daemon.DefaultPathEnv) + if b.Config.Env == nil || len(b.Config.Env) == 0 { + b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) } // Process ONBUILD triggers if they exist - if nTriggers := len(b.config.OnBuild); nTriggers != 0 { - fmt.Fprintf(b.options.ErrStream, "# Executing %d build triggers\n", nTriggers) + if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.Options.ErrStream, "# Executing %d build triggers\n", nTriggers) } // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. - onBuildTriggers := b.config.OnBuild - b.config.OnBuild = []string{} + onBuildTriggers := b.Config.OnBuild + b.Config.OnBuild = []string{} // FIXME rewrite this so that builder/parser is used; right now steps in // onbuild are muted because we have no good way to represent the step @@ -343,17 +343,17 @@ func (b *buildFile) processImageFrom(img *imagepkg.Image) error { return nil } -// probeCache checks to see if image-caching is enabled (`b.options.UtilizeCache`) -// and if so attempts to look up the current `b.image` and `b.config` pair -// in the current server `b.options.Daemon`. If an image is found, probeCache returns +// probeCache checks to see if image-caching is enabled (`b.Options.UtilizeCache`) +// and if so attempts to look up the current `b.image` and `b.Config` pair +// in the current server `b.Options.Daemon`. If an image is found, probeCache returns // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there // is any error, it returns `(false, err)`. -func (b *buildFile) probeCache() (bool, error) { - if b.options.UtilizeCache { - if cache, err := b.options.Daemon.ImageGetCached(b.image, b.config); err != nil { +func (b *BuildFile) probeCache() (bool, error) { + if b.Options.UtilizeCache { + if cache, err := b.Options.Daemon.ImageGetCached(b.image, b.Config); err != nil { return false, err } else if cache != nil { - fmt.Fprintf(b.options.OutStream, " ---> Using cache\n") + fmt.Fprintf(b.Options.OutStream, " ---> Using cache\n") utils.Debugf("[BUILDER] Use cached version") b.image = cache.ID return true, nil @@ -364,37 +364,37 @@ func (b *buildFile) probeCache() (bool, error) { return false, nil } -func (b *buildFile) create() (*daemon.Container, error) { +func (b *BuildFile) create() (*daemon.Container, error) { if b.image == "" { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } - b.config.Image = b.image + b.Config.Image = b.image // Create the container - c, _, err := b.options.Daemon.Create(b.config, "") + c, _, err := b.Options.Daemon.Create(b.Config, "") if err != nil { return nil, err } - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + b.TmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) // override the entry point that may have been picked up from the base image - c.Path = b.config.Cmd[0] - c.Args = b.config.Cmd[1:] + c.Path = b.Config.Cmd[0] + c.Args = b.Config.Cmd[1:] return c, nil } -func (b *buildFile) run(c *daemon.Container) error { +func (b *BuildFile) run(c *daemon.Container) error { var errCh chan error - if b.options.Verbose { + if b.Options.Verbose { errCh = utils.Go(func() error { // FIXME: call the 'attach' job so that daemon.Attach can be made private // // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach // but without hijacking for stdin. Also, with attach there can be race // condition because of some output already was printed before it. - return <-b.options.Daemon.Attach(c, nil, nil, b.options.OutStream, b.options.ErrStream) + return <-b.Options.Daemon.Attach(c, nil, nil, b.Options.OutStream, b.Options.ErrStream) }) } @@ -412,7 +412,7 @@ func (b *buildFile) run(c *daemon.Container) error { // Wait for it to finish if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 { err := &utils.JSONError{ - Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), + Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret), Code: ret, } return err @@ -421,7 +421,7 @@ func (b *buildFile) run(c *daemon.Container) error { return nil } -func (b *buildFile) checkPathForAddition(orig string) error { +func (b *BuildFile) checkPathForAddition(orig string) error { origPath := path.Join(b.contextPath, orig) if p, err := filepath.EvalSymlinks(origPath); err != nil { if os.IsNotExist(err) { @@ -444,7 +444,7 @@ func (b *buildFile) checkPathForAddition(orig string) error { return nil } -func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error { +func (b *BuildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error { var ( err error destExists = true @@ -549,14 +549,14 @@ func fixPermissions(destination string, uid, gid int) error { }) } -func (b *buildFile) clearTmp(containers map[string]struct{}) { +func (b *BuildFile) clearTmp(containers map[string]struct{}) { for c := range containers { - tmp := b.options.Daemon.Get(c) - if err := b.options.Daemon.Destroy(tmp); err != nil { - fmt.Fprintf(b.options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + tmp := b.Options.Daemon.Get(c) + if err := b.Options.Daemon.Destroy(tmp); err != nil { + fmt.Fprintf(b.Options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) } else { delete(containers, c) - fmt.Fprintf(b.options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + fmt.Fprintf(b.Options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) } } } diff --git a/builder/evaluator/support.go b/builder/evaluator/support.go index 21dd7ccd3c..766fd0208a 100644 --- a/builder/evaluator/support.go +++ b/builder/evaluator/support.go @@ -10,12 +10,12 @@ var ( ) // handle environment replacement. Used in dispatcher. -func replaceEnv(b *buildFile, str string) string { +func replaceEnv(b *BuildFile, str string) string { for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { match = match[strings.Index(match, "$"):] matchKey := strings.Trim(match, "${}") - for envKey, envValue := range b.env { + for envKey, envValue := range b.Env { if matchKey == envKey { str = strings.Replace(str, match, envValue, -1) } diff --git a/builder/job.go b/builder/job.go new file mode 100644 index 0000000000..98a24df898 --- /dev/null +++ b/builder/job.go @@ -0,0 +1,119 @@ +package builder + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/docker/docker/archive" + "github.com/docker/docker/builder/evaluator" + "github.com/docker/docker/daemon" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +type BuilderJob struct { + Engine *engine.Engine + Daemon *daemon.Daemon +} + +func (b *BuilderJob) Install() { + b.Engine.Register("build", b.CmdBuild) +} + +func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} + tag string + context io.ReadCloser + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + repoName, tag = parsers.ParseRepositoryTag(repoName) + + if remoteURL == "" { + context = ioutil.NopCloser(job.Stdin) + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := archive.Generate("Dockerfile", string(dockerFile)) + if err != nil { + return job.Error(err) + } + context = c + } + defer context.Close() + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + + opts := &evaluator.BuildOpts{ + Daemon: b.Daemon, + Engine: b.Engine, + OutStream: &utils.StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + ErrStream: &utils.StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + Verbose: !suppressOutput, + UtilizeCache: !noCache, + Remove: rm, + ForceRemove: forceRm, + OutOld: job.Stdout, + StreamFormatter: sf, + AuthConfig: authConfig, + AuthConfigFile: configFile, + } + + id, err := NewBuilder(opts).Run(context) + if err != nil { + return job.Error(err) + } + + if repoName != "" { + b.Daemon.Repositories().Set(repoName, tag, id, false) + } + return engine.StatusOK +} diff --git a/daemon/build.go b/daemon/build.go deleted file mode 100644 index 6812139663..0000000000 --- a/daemon/build.go +++ /dev/null @@ -1,1006 +0,0 @@ -package daemon - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" - "syscall" - "time" - - "github.com/docker/docker/archive" - "github.com/docker/docker/engine" - "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/log" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" -) - -func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status { - if len(job.Args) != 0 { - return job.Errorf("Usage: %s\n", job.Name) - } - var ( - remoteURL = job.Getenv("remote") - repoName = job.Getenv("t") - suppressOutput = job.GetenvBool("q") - noCache = job.GetenvBool("nocache") - rm = job.GetenvBool("rm") - forceRm = job.GetenvBool("forcerm") - authConfig = ®istry.AuthConfig{} - configFile = ®istry.ConfigFile{} - tag string - context io.ReadCloser - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("configFile", configFile) - repoName, tag = parsers.ParseRepositoryTag(repoName) - - if remoteURL == "" { - context = ioutil.NopCloser(job.Stdin) - } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return job.Errorf("Error trying to use git: %s (%s)", err, output) - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return job.Error(err) - } - context = c - } else if utils.IsURL(remoteURL) { - f, err := utils.Download(remoteURL) - if err != nil { - return job.Error(err) - } - defer f.Body.Close() - dockerFile, err := ioutil.ReadAll(f.Body) - if err != nil { - return job.Error(err) - } - c, err := archive.Generate("Dockerfile", string(dockerFile)) - if err != nil { - return job.Error(err) - } - context = c - } - defer context.Close() - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - b := NewBuildFile(daemon, daemon.eng, - &utils.StdoutFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - &utils.StderrFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile) - id, err := b.Build(context) - if err != nil { - return job.Error(err) - } - if repoName != "" { - daemon.Repositories().Set(repoName, tag, id, false) - } - return engine.StatusOK -} - -var ( - ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") -) - -type BuildFile interface { - Build(io.Reader) (string, error) - CmdFrom(string) error - CmdRun(string) error -} - -type buildFile struct { - daemon *Daemon - eng *engine.Engine - - image string - maintainer string - config *runconfig.Config - - contextPath string - context *tarsum.TarSum - - verbose bool - utilizeCache bool - rm bool - forceRm bool - - authConfig *registry.AuthConfig - configFile *registry.ConfigFile - - tmpContainers map[string]struct{} - tmpImages map[string]struct{} - - outStream io.Writer - errStream io.Writer - - // Deprecated, original writer used for ImagePull. To be removed. - outOld io.Writer - sf *utils.StreamFormatter - - // cmdSet indicates is CMD was set in current Dockerfile - cmdSet bool -} - -func (b *buildFile) clearTmp(containers map[string]struct{}) { - for c := range containers { - tmp := b.daemon.Get(c) - if err := b.daemon.Destroy(tmp); err != nil { - fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) - } else { - delete(containers, c) - fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) - } - } -} - -func (b *buildFile) CmdFrom(name string) error { - image, err := b.daemon.Repositories().LookupImage(name) - if err != nil { - if b.daemon.Graph().IsNotExist(err) { - remote, tag := parsers.ParseRepositoryTag(name) - pullRegistryAuth := b.authConfig - if len(b.configFile.Configs) > 0 { - // The request came with a full auth config file, we prefer to use that - endpoint, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) - pullRegistryAuth = &resolvedAuth - } - job := b.eng.Job("pull", remote, tag) - job.SetenvBool("json", b.sf.Json()) - job.SetenvBool("parallel", true) - job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.outOld) - if err := job.Run(); err != nil { - return err - } - image, err = b.daemon.Repositories().LookupImage(name) - if err != nil { - return err - } - } else { - return err - } - } - b.image = image.ID - b.config = &runconfig.Config{} - if image.Config != nil { - b.config = image.Config - } - if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv) - } - // Process ONBUILD triggers if they exist - if nTriggers := len(b.config.OnBuild); nTriggers != 0 { - fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) - } - - // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. - onBuildTriggers := b.config.OnBuild - b.config.OnBuild = []string{} - - for n, step := range onBuildTriggers { - splitStep := strings.Split(step, " ") - stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) - switch stepInstruction { - case "ONBUILD": - return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) - case "MAINTAINER", "FROM": - return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) - } - if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { - return err - } - } - return nil -} - -// The ONBUILD command declares a build instruction to be executed in any future build -// using the current image as a base. -func (b *buildFile) CmdOnbuild(trigger string) error { - splitTrigger := strings.Split(trigger, " ") - triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - b.config.OnBuild = append(b.config.OnBuild, trigger) - return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) -} - -func (b *buildFile) CmdMaintainer(name string) error { - b.maintainer = name - return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) -} - -// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) -// and if so attempts to look up the current `b.image` and `b.config` pair -// in the current server `b.daemon`. If an image is found, probeCache returns -// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there -// is any error, it returns `(false, err)`. -func (b *buildFile) probeCache() (bool, error) { - if b.utilizeCache { - if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil { - return false, err - } else if cache != nil { - fmt.Fprintf(b.outStream, " ---> Using cache\n") - log.Debugf("[BUILDER] Use cached version") - b.image = cache.ID - return true, nil - } else { - log.Debugf("[BUILDER] Cache miss") - } - } - return false, nil -} - -func (b *buildFile) CmdRun(args string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) - if err != nil { - return err - } - - cmd := b.config.Cmd - // set Cmd manually, this is special case only for Dockerfiles - b.config.Cmd = config.Cmd - runconfig.Merge(b.config, config) - - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - log.Debugf("Command to be executed: %v", b.config.Cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - c, err := b.create() - if err != nil { - return err - } - // Ensure that we keep the container mounted until the commit - // to avoid unmounting and then mounting directly again - c.Mount() - defer c.Unmount() - - err = b.run(c) - if err != nil { - return err - } - if err := b.commit(c.ID, cmd, "run"); err != nil { - return err - } - - return nil -} - -func (b *buildFile) FindEnvKey(key string) int { - for k, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if key == envParts[0] { - return k - } - } - return -1 -} - -func (b *buildFile) ReplaceEnvMatches(value string) (string, error) { - exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") - if err != nil { - return value, err - } - matches := exp.FindAllString(value, -1) - for _, match := range matches { - match = match[strings.Index(match, "$"):] - matchKey := strings.Trim(match, "${}") - - for _, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - envKey := envParts[0] - envValue := envParts[1] - - if envKey == matchKey { - value = strings.Replace(value, match, envValue, -1) - break - } - } - } - return value, nil -} - -func (b *buildFile) CmdEnv(args string) error { - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid ENV format") - } - key := strings.Trim(tmp[0], " \t") - value := strings.Trim(tmp[1], " \t") - - envKey := b.FindEnvKey(key) - replacedValue, err := b.ReplaceEnvMatches(value) - if err != nil { - return err - } - replacedVar := fmt.Sprintf("%s=%s", key, replacedValue) - - if envKey >= 0 { - b.config.Env[envKey] = replacedVar - } else { - b.config.Env = append(b.config.Env, replacedVar) - } - return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar)) -} - -func (b *buildFile) buildCmdFromJson(args string) []string { - var cmd []string - if err := json.Unmarshal([]byte(args), &cmd); err != nil { - log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) - cmd = []string{"/bin/sh", "-c", args} - } - return cmd -} - -func (b *buildFile) CmdCmd(args string) error { - cmd := b.buildCmdFromJson(args) - b.config.Cmd = cmd - if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { - return err - } - b.cmdSet = true - return nil -} - -func (b *buildFile) CmdEntrypoint(args string) error { - entrypoint := b.buildCmdFromJson(args) - b.config.Entrypoint = entrypoint - // if there is no cmd in current Dockerfile - cleanup cmd - if !b.cmdSet { - b.config.Cmd = nil - } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdExpose(args string) error { - portsTab := strings.Split(args, " ") - - if b.config.ExposedPorts == nil { - b.config.ExposedPorts = make(nat.PortSet) - } - ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) - if err != nil { - return err - } - for port := range ports { - if _, exists := b.config.ExposedPorts[port]; !exists { - b.config.ExposedPorts[port] = struct{}{} - } - } - b.config.PortSpecs = nil - - return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) -} - -func (b *buildFile) CmdUser(args string) error { - b.config.User = args - return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) -} - -func (b *buildFile) CmdInsert(args string) error { - return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") -} - -func (b *buildFile) CmdCopy(args string) error { - return b.runContextCommand(args, false, false, "COPY") -} - -func (b *buildFile) CmdWorkdir(workdir string) error { - if workdir[0] == '/' { - b.config.WorkingDir = workdir - } else { - if b.config.WorkingDir == "" { - b.config.WorkingDir = "/" - } - b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir) - } - return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) -} - -func (b *buildFile) CmdVolume(args string) error { - if args == "" { - return fmt.Errorf("Volume cannot be empty") - } - - var volume []string - if err := json.Unmarshal([]byte(args), &volume); err != nil { - volume = []string{args} - } - if b.config.Volumes == nil { - b.config.Volumes = map[string]struct{}{} - } - for _, v := range volume { - b.config.Volumes[v] = struct{}{} - } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { - return err - } - return nil -} - -func (b *buildFile) checkPathForAddition(orig string) error { - origPath := path.Join(b.contextPath, orig) - origPath, err := filepath.EvalSymlinks(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - if !strings.HasPrefix(origPath, b.contextPath) { - return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) - } - if _, err := os.Stat(origPath); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - return nil -} - -func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error { - var ( - err error - destExists = true - origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) - ) - - if destPath != container.RootfsPath() { - destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) - if err != nil { - return err - } - } - - // Preserve the trailing '/' - if strings.HasSuffix(dest, "/") || dest == "." { - destPath = destPath + "/" - } - - destStat, err := os.Stat(destPath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - destExists = false - } - - fi, err := os.Stat(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - - if fi.IsDir() { - return copyAsDirectory(origPath, destPath, destExists) - } - - // If we are adding a remote file (or we've been told not to decompress), do not try to untar it - if decompress { - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in / . - tarDest := destPath - if strings.HasSuffix(tarDest, "/") { - tarDest = filepath.Dir(destPath) - } - - // try to successfully untar the orig - if err := archive.UntarPath(origPath, tarDest); err == nil { - return nil - } else if err != io.EOF { - log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) - } - } - - if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { - return err - } - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - - resPath := destPath - if destExists && destStat.IsDir() { - resPath = path.Join(destPath, path.Base(origPath)) - } - - return fixPermissions(resPath, 0, 0) -} - -func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use %s", cmdName) - } - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid %s format", cmdName) - } - - orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) - if err != nil { - return err - } - - dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) - if err != nil { - return err - } - - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - b.config.Image = b.image - - var ( - origPath = orig - destPath = dest - remoteHash string - isRemote bool - decompress = true - ) - - isRemote = utils.IsURL(orig) - if isRemote && !allowRemote { - return fmt.Errorf("Source can't be an URL for %s", cmdName) - } else if utils.IsURL(orig) { - // Initiate the download - resp, err := utils.Download(orig) - if err != nil { - return err - } - - // Create a tmp dir - tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") - if err != nil { - return err - } - - // Create a tmp file within our tmp dir - tmpFileName := path.Join(tmpDirName, "tmp") - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return err - } - defer os.RemoveAll(tmpDirName) - - // Download and dump result to tmp file - if _, err := io.Copy(tmpFile, resp.Body); err != nil { - tmpFile.Close() - return err - } - tmpFile.Close() - - // Remove the mtime of the newly created tmp file - if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { - return err - } - - origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) - - // Process the checksum - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return err - } - tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true} - if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { - return err - } - remoteHash = tarSum.Sum(nil) - r.Close() - - // If the destination is a directory, figure out the filename. - if strings.HasSuffix(dest, "/") { - u, err := url.Parse(orig) - if err != nil { - return err - } - path := u.Path - if strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - parts := strings.Split(path, "/") - filename := parts[len(parts)-1] - if filename == "" { - return fmt.Errorf("cannot determine filename from url: %s", u) - } - destPath = dest + filename - } - } - - if err := b.checkPathForAddition(origPath); err != nil { - return err - } - - // Hash path and check the cache - if b.utilizeCache { - var ( - hash string - sums = b.context.GetSums() - ) - - if remoteHash != "" { - hash = remoteHash - } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { - return err - } else if fi.IsDir() { - var subfiles []string - for file, sum := range sums { - absFile := path.Join(b.contextPath, file) - absOrigPath := path.Join(b.contextPath, origPath) - if strings.HasPrefix(absFile, absOrigPath) { - subfiles = append(subfiles, sum) - } - } - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) - } else { - if origPath[0] == '/' && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "./") - if h, ok := sums[origPath]; ok { - hash = "file:" + h - } - } - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} - hit, err := b.probeCache() - if err != nil { - return err - } - // If we do not have a hash, never use the cache - if hit && hash != "" { - return nil - } - } - - // Create the container - container, _, err := b.daemon.Create(b.config, "") - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - - if !allowDecompression || isRemote { - decompress = false - } - if err := b.addContext(container, origPath, destPath, decompress); err != nil { - return err - } - - if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdAdd(args string) error { - return b.runContextCommand(args, true, true, "ADD") -} - -func (b *buildFile) create() (*Container, error) { - if b.image == "" { - return nil, fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.config.Image = b.image - - // Create the container - c, _, err := b.daemon.Create(b.config, "") - if err != nil { - return nil, err - } - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - c.Path = b.config.Cmd[0] - c.Args = b.config.Cmd[1:] - - return c, nil -} - -func (b *buildFile) run(c *Container) error { - var errCh chan error - if b.verbose { - errCh = utils.Go(func() error { - // FIXME: call the 'attach' job so that daemon.Attach can be made private - // - // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach - // but without hijacking for stdin. Also, with attach there can be race - // condition because of some output already was printed before it. - return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream) - }) - } - - //start the container - if err := c.Start(); err != nil { - return err - } - - if errCh != nil { - if err := <-errCh; err != nil { - return err - } - } - - // Wait for it to finish - if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 { - err := &utils.JSONError{ - Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), - Code: ret, - } - return err - } - - return nil -} - -// Commit the container with the autorun command -func (b *buildFile) commit(id string, autoCmd []string, comment string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.config.Image = b.image - if id == "" { - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - container, warnings, err := b.daemon.Create(b.config, "") - if err != nil { - return err - } - for _, warning := range warnings { - fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) - } - b.tmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) - id = container.ID - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - } - container := b.daemon.Get(id) - if container == nil { - return fmt.Errorf("An error occured while creating the container") - } - - // Note: Actually copy the struct - autoConfig := *b.config - autoConfig.Cmd = autoCmd - // Commit the container - image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) - if err != nil { - return err - } - b.tmpImages[image.ID] = struct{}{} - b.image = image.ID - return nil -} - -// Long lines can be split with a backslash -var lineContinuation = regexp.MustCompile(`\\\s*\n`) - -func (b *buildFile) Build(context io.Reader) (string, error) { - tmpdirPath, err := ioutil.TempDir("", "docker-build") - if err != nil { - return "", err - } - - decompressedStream, err := archive.DecompressStream(context) - if err != nil { - return "", err - } - - b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true} - if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { - return "", err - } - defer os.RemoveAll(tmpdirPath) - - b.contextPath = tmpdirPath - filename := path.Join(tmpdirPath, "Dockerfile") - if _, err := os.Stat(filename); os.IsNotExist(err) { - return "", fmt.Errorf("Can't build a directory with no Dockerfile") - } - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return "", err - } - if len(fileBytes) == 0 { - return "", ErrDockerfileEmpty - } - var ( - dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "") - stepN = 0 - ) - for _, line := range strings.Split(dockerfile, "\n") { - line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") - if len(line) == 0 { - continue - } - if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { - if b.forceRm { - b.clearTmp(b.tmpContainers) - } - return "", err - } else if b.rm { - b.clearTmp(b.tmpContainers) - } - stepN++ - } - if b.image != "" { - fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) - return b.image, nil - } - return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") -} - -// BuildStep parses a single build step from `instruction` and executes it in the current context. -func (b *buildFile) BuildStep(name, expression string) error { - fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) - tmp := strings.SplitN(expression, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid Dockerfile format") - } - instruction := strings.ToLower(strings.Trim(tmp[0], " ")) - arguments := strings.Trim(tmp[1], " ") - - method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) - if !exists { - fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) - return nil - } - - ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() - if ret != nil { - return ret.(error) - } - - fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) - return nil -} - -func stripComments(raw []byte) string { - var ( - out []string - lines = strings.Split(string(raw), "\n") - ) - for _, l := range lines { - if len(l) == 0 || l[0] == '#' { - continue - } - out = append(out, l) - } - return strings.Join(out, "\n") -} - -func copyAsDirectory(source, destination string, destinationExists bool) error { - if err := archive.CopyWithTar(source, destination); err != nil { - return err - } - - if destinationExists { - files, err := ioutil.ReadDir(source) - if err != nil { - return err - } - - for _, file := range files { - if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { - return err - } - } - return nil - } - - return fixPermissions(destination, 0, 0) -} - -func fixPermissions(destination string, uid, gid int) error { - return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { - if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { - return err - } - return nil - }) -} - -func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { - return &buildFile{ - daemon: d, - eng: eng, - config: &runconfig.Config{}, - outStream: outStream, - errStream: errStream, - tmpContainers: make(map[string]struct{}), - tmpImages: make(map[string]struct{}), - verbose: verbose, - utilizeCache: utilizeCache, - rm: rm, - forceRm: forceRm, - sf: sf, - authConfig: auth, - configFile: authConfigFile, - outOld: outOld, - } -} diff --git a/daemon/daemon.go b/daemon/daemon.go index 8ff79801c8..0a4d6e0bc5 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -101,7 +101,6 @@ func (daemon *Daemon) Install(eng *engine.Engine) error { // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ for name, method := range map[string]engine.Handler{ "attach": daemon.ContainerAttach, - "build": daemon.CmdBuild, "commit": daemon.ContainerCommit, "container_changes": daemon.ContainerChanges, "container_copy": daemon.ContainerCopy, diff --git a/docker/daemon.go b/docker/daemon.go index dc9d56d1d9..eef17efdc4 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -5,6 +5,7 @@ package main import ( "log" + "github.com/docker/docker/builder" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" _ "github.com/docker/docker/daemon/execdriver/lxc" @@ -48,6 +49,10 @@ func mainDaemon() { if err := d.Install(eng); err != nil { log.Fatal(err) } + + b := &builder.BuilderJob{eng, d} + b.Install() + // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { From cb51681a6db4b7c62d91998ba3b1d3b98c09c61c Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Wed, 13 Aug 2014 03:07:41 -0700 Subject: [PATCH 11/14] builder: Fix handling of ENV references that reference themselves, plus tests. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/builder.go | 19 +------ builder/evaluator/dispatchers.go | 65 ++++++++++++------------ builder/evaluator/evaluator.go | 22 ++++---- builder/evaluator/internals.go | 22 ++++---- builder/evaluator/support.go | 28 ++++++++-- builder/parser/line_parsers.go | 47 +++++++++-------- builder/parser/parser.go | 14 ++--- builder/parser/utils.go | 10 ++-- integration-cli/docker_cli_build_test.go | 9 +++- 9 files changed, 127 insertions(+), 109 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index 1720b7b99f..d99d1ad9b6 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -2,7 +2,6 @@ package builder import ( "github.com/docker/docker/builder/evaluator" - "github.com/docker/docker/nat" "github.com/docker/docker/runconfig" ) @@ -10,25 +9,9 @@ import ( func NewBuilder(opts *evaluator.BuildOpts) *evaluator.BuildFile { return &evaluator.BuildFile{ Dockerfile: nil, - Env: evaluator.EnvMap{}, - Config: initRunConfig(), + Config: &runconfig.Config{}, Options: opts, TmpContainers: evaluator.UniqueMap{}, TmpImages: evaluator.UniqueMap{}, } } - -func initRunConfig() *runconfig.Config { - return &runconfig.Config{ - PortSpecs: []string{}, - // FIXME(erikh) this should be a type that lives in runconfig - ExposedPorts: map[nat.Port]struct{}{}, - Env: []string{}, - Cmd: []string{}, - - // FIXME(erikh) this should also be a type in runconfig - Volumes: map[string]struct{}{}, - Entrypoint: []string{"/bin/sh", "-c"}, - OnBuild: []string{}, - } -} diff --git a/builder/evaluator/dispatchers.go b/builder/evaluator/dispatchers.go index d05777981e..23e16b000e 100644 --- a/builder/evaluator/dispatchers.go +++ b/builder/evaluator/dispatchers.go @@ -13,12 +13,12 @@ import ( "strings" "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" ) // dispatch with no layer / parsing. This is effectively not a command. -func nullDispatch(b *BuildFile, args []string) error { +func nullDispatch(b *BuildFile, args []string, attributes map[string]bool) error { return nil } @@ -27,24 +27,28 @@ func nullDispatch(b *BuildFile, args []string) error { // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // -func env(b *BuildFile, args []string) error { +func env(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 2 { return fmt.Errorf("ENV accepts two arguments") } - // the duplication here is intended to ease the replaceEnv() call's env - // handling. This routine gets much shorter with the denormalization here. - key := args[0] - b.Env[key] = args[1] - b.Config.Env = append(b.Config.Env, strings.Join([]string{key, b.Env[key]}, "=")) + fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) - return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.Env[key])) + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if args[0] == envParts[0] { + b.Config.Env[i] = fullEnv + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + } + } + b.Config.Env = append(b.Config.Env, fullEnv) + return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) } // MAINTAINER some text // // Sets the maintainer metadata. -func maintainer(b *BuildFile, args []string) error { +func maintainer(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("MAINTAINER requires only one argument") } @@ -58,7 +62,7 @@ func maintainer(b *BuildFile, args []string) error { // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // -func add(b *BuildFile, args []string) error { +func add(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 2 { return fmt.Errorf("ADD requires two arguments") } @@ -70,7 +74,7 @@ func add(b *BuildFile, args []string) error { // // Same as 'ADD' but without the tar and remote url handling. // -func dispatchCopy(b *BuildFile, args []string) error { +func dispatchCopy(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 2 { return fmt.Errorf("COPY requires two arguments") } @@ -82,7 +86,7 @@ func dispatchCopy(b *BuildFile, args []string) error { // // This sets the image the dockerfile will build on top of. // -func from(b *BuildFile, args []string) error { +func from(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("FROM requires one argument") } @@ -114,7 +118,7 @@ func from(b *BuildFile, args []string) error { // special cases. search for 'OnBuild' in internals.go for additional special // cases. // -func onbuild(b *BuildFile, args []string) error { +func onbuild(b *BuildFile, args []string, attributes map[string]bool) error { triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) switch triggerInstruction { case "ONBUILD": @@ -133,7 +137,7 @@ func onbuild(b *BuildFile, args []string) error { // // Set the working directory for future RUN/CMD/etc statements. // -func workdir(b *BuildFile, args []string) error { +func workdir(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("WORKDIR requires exactly one argument") } @@ -161,10 +165,8 @@ func workdir(b *BuildFile, args []string) error { // RUN echo hi # sh -c echo hi // RUN [ "echo", "hi" ] # echo hi // -func run(b *BuildFile, args []string) error { - if len(args) == 1 { // literal string command, not an exec array - args = append([]string{"/bin/sh", "-c"}, args[0]) - } +func run(b *BuildFile, args []string, attributes map[string]bool) error { + args = handleJsonArgs(args, attributes) if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to run") @@ -182,7 +184,7 @@ func run(b *BuildFile, args []string) error { defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) - utils.Debugf("Command to be executed: %v", b.Config.Cmd) + log.Debugf("Command to be executed: %v", b.Config.Cmd) hit, err := b.probeCache() if err != nil { @@ -196,6 +198,7 @@ func run(b *BuildFile, args []string) error { if err != nil { return err } + // Ensure that we keep the container mounted until the commit // to avoid unmounting and then mounting directly again c.Mount() @@ -217,12 +220,9 @@ func run(b *BuildFile, args []string) error { // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func cmd(b *BuildFile, args []string) error { - if len(args) < 2 { - args = append([]string{"/bin/sh", "-c"}, args...) - } +func cmd(b *BuildFile, args []string, attributes map[string]bool) error { + b.Config.Cmd = handleJsonArgs(args, attributes) - b.Config.Cmd = args if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { return err } @@ -239,14 +239,15 @@ func cmd(b *BuildFile, args []string) error { // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint // is initialized at NewBuilder time instead of through argument parsing. // -func entrypoint(b *BuildFile, args []string) error { - b.Config.Entrypoint = args +func entrypoint(b *BuildFile, args []string, attributes map[string]bool) error { + b.Config.Entrypoint = handleJsonArgs(args, attributes) // if there is no cmd in current Dockerfile - cleanup cmd if !b.cmdSet { b.Config.Cmd = nil } - if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil { return err } return nil @@ -257,7 +258,7 @@ func entrypoint(b *BuildFile, args []string) error { // Expose ports for links and port mappings. This all ends up in // b.Config.ExposedPorts for runconfig. // -func expose(b *BuildFile, args []string) error { +func expose(b *BuildFile, args []string, attributes map[string]bool) error { portsTab := args if b.Config.ExposedPorts == nil { @@ -284,7 +285,7 @@ func expose(b *BuildFile, args []string) error { // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // -func user(b *BuildFile, args []string) error { +func user(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("USER requires exactly one argument") } @@ -298,7 +299,7 @@ func user(b *BuildFile, args []string) error { // Expose the volume /foo for use. Will also accept the JSON form, but either // way requires exactly one argument. // -func volume(b *BuildFile, args []string) error { +func volume(b *BuildFile, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("Volume cannot be empty") } @@ -318,6 +319,6 @@ func volume(b *BuildFile, args []string) error { } // INSERT is no longer accepted, but we still parse it. -func insert(b *BuildFile, args []string) error { +func insert(b *BuildFile, args []string, attributes map[string]bool) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } diff --git a/builder/evaluator/evaluator.go b/builder/evaluator/evaluator.go index 2eb2ba8b36..dbf4e30839 100644 --- a/builder/evaluator/evaluator.go +++ b/builder/evaluator/evaluator.go @@ -38,17 +38,16 @@ import ( "github.com/docker/docker/utils" ) -type EnvMap map[string]string type UniqueMap map[string]struct{} var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) -var evaluateTable map[string]func(*BuildFile, []string) error +var evaluateTable map[string]func(*BuildFile, []string, map[string]bool) error func init() { - evaluateTable = map[string]func(*BuildFile, []string) error{ + evaluateTable = map[string]func(*BuildFile, []string, map[string]bool) error{ "env": env, "maintainer": maintainer, "add": add, @@ -71,7 +70,6 @@ func init() { // processing as it evaluates the parsing result. type BuildFile struct { Dockerfile *parser.Node // the syntax tree of the dockerfile - Env EnvMap // map of environment variables Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. Options *BuildOpts // see below @@ -152,7 +150,9 @@ func (b *BuildFile) Run(context io.Reader) (string, error) { b.clearTmp(b.TmpContainers) } return "", err - } else if b.Options.Remove { + } + fmt.Fprintf(b.Options.OutStream, " ---> %s\n", utils.TruncateID(b.image)) + if b.Options.Remove { b.clearTmp(b.TmpContainers) } } @@ -181,25 +181,29 @@ func (b *BuildFile) Run(context io.Reader) (string, error) { // features. func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value + attrs := ast.Attributes strs := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) if cmd == "onbuild" { fmt.Fprintf(b.Options.OutStream, "%#v\n", ast.Next.Children[0].Value) ast = ast.Next.Children[0] - strs = append(strs, ast.Value) + strs = append(strs, b.replaceEnv(ast.Value)) + msg += " " + ast.Value } for ast.Next != nil { ast = ast.Next - strs = append(strs, replaceEnv(b, ast.Value)) + strs = append(strs, b.replaceEnv(ast.Value)) + msg += " " + ast.Value } - fmt.Fprintf(b.Options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " ")) + fmt.Fprintf(b.Options.OutStream, "%s\n", msg) // XXX yes, we skip any cmds that are not valid; the parser should have // picked these out already. if f, ok := evaluateTable[cmd]; ok { - return f(b, strs) + return f(b, strs, attrs) } return nil diff --git a/builder/evaluator/internals.go b/builder/evaluator/internals.go index 5ceb2f88a2..9519c683b3 100644 --- a/builder/evaluator/internals.go +++ b/builder/evaluator/internals.go @@ -21,12 +21,12 @@ import ( "github.com/docker/docker/archive" "github.com/docker/docker/daemon" imagepkg "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) @@ -299,13 +299,15 @@ func (b *BuildFile) pullImage(name string) (*imagepkg.Image, error) { func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { b.image = img.ID - b.Config = &runconfig.Config{} + if img.Config != nil { b.Config = img.Config } + if b.Config.Env == nil || len(b.Config.Env) == 0 { b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) } + // Process ONBUILD triggers if they exist if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { fmt.Fprintf(b.Options.ErrStream, "# Executing %d build triggers\n", nTriggers) @@ -332,7 +334,7 @@ func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { // in this function. if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok { - if err := f(b, splitStep[1:]); err != nil { + if err := f(b, splitStep[1:], nil); err != nil { return err } } else { @@ -354,11 +356,11 @@ func (b *BuildFile) probeCache() (bool, error) { return false, err } else if cache != nil { fmt.Fprintf(b.Options.OutStream, " ---> Using cache\n") - utils.Debugf("[BUILDER] Use cached version") + log.Debugf("[BUILDER] Use cached version") b.image = cache.ID return true, nil } else { - utils.Debugf("[BUILDER] Cache miss") + log.Debugf("[BUILDER] Cache miss") } } return false, nil @@ -423,19 +425,17 @@ func (b *BuildFile) run(c *daemon.Container) error { func (b *BuildFile) checkPathForAddition(orig string) error { origPath := path.Join(b.contextPath, orig) - if p, err := filepath.EvalSymlinks(origPath); err != nil { + origPath, err := filepath.EvalSymlinks(origPath) + if err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err - } else { - origPath = p } if !strings.HasPrefix(origPath, b.contextPath) { return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) } - _, err := os.Stat(origPath) - if err != nil { + if _, err := os.Stat(origPath); err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } @@ -499,7 +499,7 @@ func (b *BuildFile) addContext(container *daemon.Container, orig, dest string, d if err := archive.UntarPath(origPath, tarDest); err == nil { return nil } else if err != io.EOF { - utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) } } diff --git a/builder/evaluator/support.go b/builder/evaluator/support.go index 766fd0208a..b543676ecf 100644 --- a/builder/evaluator/support.go +++ b/builder/evaluator/support.go @@ -10,17 +10,37 @@ var ( ) // handle environment replacement. Used in dispatcher. -func replaceEnv(b *BuildFile, str string) string { +func (b *BuildFile) replaceEnv(str string) string { for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { match = match[strings.Index(match, "$"):] matchKey := strings.Trim(match, "${}") - for envKey, envValue := range b.Env { - if matchKey == envKey { - str = strings.Replace(str, match, envValue, -1) + for _, keyval := range b.Config.Env { + tmp := strings.SplitN(keyval, "=", 2) + if tmp[0] == matchKey { + str = strings.Replace(str, match, tmp[1], -1) } } } return str } + +func (b *BuildFile) FindEnvKey(key string) int { + for k, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if key == envParts[0] { + return k + } + } + return -1 +} + +func handleJsonArgs(args []string, attributes map[string]bool) []string { + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return append([]string{"/bin/sh", "-c", strings.Join(args, " ")}) +} diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index ff1f3483e9..999d97603d 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -14,13 +14,13 @@ import ( ) var ( - dockerFileErrJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.") + errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.") ) // ignore the current argument. This will still leave a command parsed, but // will not incorporate the arguments into the ast. -func parseIgnore(rest string) (*Node, error) { - return &Node{}, nil +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil } // used for onbuild. Could potentially be used for anything that represents a @@ -28,18 +28,18 @@ func parseIgnore(rest string) (*Node, error) { // // ONBUILD RUN foo bar -> (onbuild (run foo bar)) // -func parseSubCommand(rest string) (*Node, error) { +func parseSubCommand(rest string) (*Node, map[string]bool, error) { _, child, err := parseLine(rest) if err != nil { - return nil, err + return nil, nil, err } - return &Node{Children: []*Node{child}}, nil + return &Node{Children: []*Node{child}}, nil, nil } // parse environment like statements. Note that this does *not* handle // variable interpolation, which will be handled in the evaluator. -func parseEnv(rest string) (*Node, error) { +func parseEnv(rest string) (*Node, map[string]bool, error) { node := &Node{} rootnode := node strs := TOKEN_WHITESPACE.Split(rest, 2) @@ -47,12 +47,12 @@ func parseEnv(rest string) (*Node, error) { node.Next = &Node{} node.Next.Value = strs[1] - return rootnode, nil + return rootnode, nil, nil } // parses a whitespace-delimited set of arguments. The result is effectively a // linked list of string arguments. -func parseStringsWhitespaceDelimited(rest string) (*Node, error) { +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { node := &Node{} rootnode := node prevnode := node @@ -68,16 +68,18 @@ func parseStringsWhitespaceDelimited(rest string) (*Node, error) { // chain. prevnode.Next = nil - return rootnode, nil + return rootnode, nil, nil } // parsestring just wraps the string in quotes and returns a working node. -func parseString(rest string) (*Node, error) { - return &Node{rest, nil, nil}, nil +func parseString(rest string) (*Node, map[string]bool, error) { + n := &Node{} + n.Value = rest + return n, nil, nil } // parseJSON converts JSON arrays to an AST. -func parseJSON(rest string) (*Node, error) { +func parseJSON(rest string) (*Node, map[string]bool, error) { var ( myJson []interface{} next = &Node{} @@ -86,7 +88,7 @@ func parseJSON(rest string) (*Node, error) { ) if err := json.Unmarshal([]byte(rest), &myJson); err != nil { - return nil, err + return nil, nil, err } for _, str := range myJson { @@ -95,7 +97,7 @@ func parseJSON(rest string) (*Node, error) { case float64: str = strconv.FormatFloat(str.(float64), 'G', -1, 64) default: - return nil, dockerFileErrJSONNesting + return nil, nil, errDockerfileJSONNesting } next.Value = str.(string) next.Next = &Node{} @@ -105,26 +107,27 @@ func parseJSON(rest string) (*Node, error) { prevnode.Next = nil - return orignext, nil + return orignext, map[string]bool{"json": true}, nil } // parseMaybeJSON determines if the argument appears to be a JSON array. If // so, passes to parseJSON; if not, quotes the result and returns a single // node. -func parseMaybeJSON(rest string) (*Node, error) { +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { rest = strings.TrimSpace(rest) if strings.HasPrefix(rest, "[") { - node, err := parseJSON(rest) + node, attrs, err := parseJSON(rest) if err == nil { - return node, nil - } else if err == dockerFileErrJSONNesting { - return nil, err + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err } } node := &Node{} node.Value = rest - return node, nil + return node, nil, nil } diff --git a/builder/parser/parser.go b/builder/parser/parser.go index cb9d28206d..47ffc9a678 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -21,13 +21,14 @@ import ( // works a little more effectively than a "proper" parse tree for our needs. // type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node } var ( - dispatch map[string]func(string) (*Node, error) + dispatch map[string]func(string) (*Node, map[string]bool, error) TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`) TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) @@ -40,7 +41,7 @@ func init() { // reformulating the arguments according to the rules in the parser // functions. Errors are propogated up by Parse() and the resulting AST can // be incorporated directly into the existing AST as a next. - dispatch = map[string]func(string) (*Node, error){ + dispatch = map[string]func(string) (*Node, map[string]bool, error){ "user": parseString, "onbuild": parseSubCommand, "workdir": parseString, @@ -75,12 +76,13 @@ func parseLine(line string) (string, *Node, error) { node := &Node{} node.Value = cmd - sexp, err := fullDispatch(cmd, args) + sexp, attrs, err := fullDispatch(cmd, args) if err != nil { return "", nil, err } node.Next = sexp + node.Attributes = attrs return "", node, nil } diff --git a/builder/parser/utils.go b/builder/parser/utils.go index 08d3e454dd..53cda5808b 100644 --- a/builder/parser/utils.go +++ b/builder/parser/utils.go @@ -51,17 +51,17 @@ func (node *Node) Dump() string { // performs the dispatch based on the two primal strings, cmd and args. Please // look at the dispatch table in parser.go to see how these dispatchers work. -func fullDispatch(cmd, args string) (*Node, error) { +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { if _, ok := dispatch[cmd]; !ok { - return nil, fmt.Errorf("'%s' is not a valid dockerfile command", cmd) + return nil, nil, fmt.Errorf("'%s' is not a valid dockerfile command", cmd) } - sexp, err := dispatch[cmd](args) + sexp, attrs, err := dispatch[cmd](args) if err != nil { - return nil, err + return nil, nil, err } - return sexp, nil + return sexp, attrs, nil } // splitCommand takes a single line of text and parses out the cmd and args, diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index bcff199c85..e6572a1bf4 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -685,10 +685,11 @@ func TestBuildRelativeWorkdir(t *testing.T) { func TestBuildEnv(t *testing.T) { name := "testbuildenv" - expected := "[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" defer deleteImages(name) _, err := buildImage(name, `FROM busybox + ENV PATH /test:$PATH ENV PORT 2375 RUN [ $(env | grep PORT) = 'PORT=2375' ]`, true) @@ -1708,6 +1709,9 @@ func TestBuildEnvUsage(t *testing.T) { name := "testbuildenvusage" defer deleteImages(name) dockerfile := `FROM busybox +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] ENV FOO /foo/baz ENV BAR /bar ENV BAZ $BAR @@ -1717,7 +1721,8 @@ RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] ENV FROM hello/docker/world ENV TO /docker/world/hello ADD $FROM $TO -RUN [ "$(cat $TO)" = "hello" ]` +RUN [ "$(cat $TO)" = "hello" ] +` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) From a1522ec01c21807e657e840760a8186f482f2271 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 15 Aug 2014 09:29:35 -0700 Subject: [PATCH 12/14] builder: move the evaluator package code directly into builder Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/builder.go | 11 +++++------ builder/{evaluator => }/dispatchers.go | 2 +- builder/{evaluator => }/evaluator.go | 4 ++-- builder/{evaluator => }/internals.go | 2 +- builder/job.go | 3 +-- builder/{evaluator => }/support.go | 2 +- 6 files changed, 11 insertions(+), 13 deletions(-) rename builder/{evaluator => }/dispatchers.go (99%) rename builder/{evaluator => }/evaluator.go (98%) rename builder/{evaluator => }/internals.go (99%) rename builder/{evaluator => }/support.go (98%) diff --git a/builder/builder.go b/builder/builder.go index d99d1ad9b6..e655f9da60 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -1,17 +1,16 @@ package builder import ( - "github.com/docker/docker/builder/evaluator" "github.com/docker/docker/runconfig" ) -// Create a new builder. -func NewBuilder(opts *evaluator.BuildOpts) *evaluator.BuildFile { - return &evaluator.BuildFile{ +// Create a new builder. See +func NewBuilder(opts *BuildOpts) *BuildFile { + return &BuildFile{ Dockerfile: nil, Config: &runconfig.Config{}, Options: opts, - TmpContainers: evaluator.UniqueMap{}, - TmpImages: evaluator.UniqueMap{}, + TmpContainers: UniqueMap{}, + TmpImages: UniqueMap{}, } } diff --git a/builder/evaluator/dispatchers.go b/builder/dispatchers.go similarity index 99% rename from builder/evaluator/dispatchers.go rename to builder/dispatchers.go index 23e16b000e..fe0b5be579 100644 --- a/builder/evaluator/dispatchers.go +++ b/builder/dispatchers.go @@ -1,4 +1,4 @@ -package evaluator +package builder // This file contains the dispatchers for each command. Note that // `nullDispatch` is not actually a command, but support for commands we parse diff --git a/builder/evaluator/evaluator.go b/builder/evaluator.go similarity index 98% rename from builder/evaluator/evaluator.go rename to builder/evaluator.go index dbf4e30839..01805f892d 100644 --- a/builder/evaluator/evaluator.go +++ b/builder/evaluator.go @@ -1,4 +1,4 @@ -// evaluator is the evaluation step in the Dockerfile parse/evaluate pipeline. +// builder is the evaluation step in the Dockerfile parse/evaluate pipeline. // // It incorporates a dispatch table based on the parser.Node values (see the // parser package for more information) that are yielded from the parser itself. @@ -17,7 +17,7 @@ // before and after each step, such as creating an image ID and removing temporary // containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which // includes its own set of steps (usually only one of them). -package evaluator +package builder import ( "bytes" diff --git a/builder/evaluator/internals.go b/builder/internals.go similarity index 99% rename from builder/evaluator/internals.go rename to builder/internals.go index 9519c683b3..b1d5b21f03 100644 --- a/builder/evaluator/internals.go +++ b/builder/internals.go @@ -1,4 +1,4 @@ -package evaluator +package builder // internals for handling commands. Covers many areas and a lot of // non-contiguous functionality. Please read the comments. diff --git a/builder/job.go b/builder/job.go index 98a24df898..e1233e0a08 100644 --- a/builder/job.go +++ b/builder/job.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/docker/docker/archive" - "github.com/docker/docker/builder/evaluator" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers" @@ -86,7 +85,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { sf := utils.NewStreamFormatter(job.GetenvBool("json")) - opts := &evaluator.BuildOpts{ + opts := &BuildOpts{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ diff --git a/builder/evaluator/support.go b/builder/support.go similarity index 98% rename from builder/evaluator/support.go rename to builder/support.go index b543676ecf..08cfa8defa 100644 --- a/builder/evaluator/support.go +++ b/builder/support.go @@ -1,4 +1,4 @@ -package evaluator +package builder import ( "regexp" From 305f73508022d0a3af65faa327aa798610969875 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 19 Aug 2014 11:14:21 +0000 Subject: [PATCH 13/14] builder: several fixups from comments Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/builder.go | 3 +-- builder/evaluator.go | 11 +++++------ builder/internals.go | 9 ++++----- builder/parser/line_parsers.go | 16 +++++++--------- builder/parser/parser.go | 3 +-- builder/support.go | 11 +---------- 6 files changed, 19 insertions(+), 34 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index e655f9da60..685f27b061 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -10,7 +10,6 @@ func NewBuilder(opts *BuildOpts) *BuildFile { Dockerfile: nil, Config: &runconfig.Config{}, Options: opts, - TmpContainers: UniqueMap{}, - TmpImages: UniqueMap{}, + TmpContainers: map[string]struct{}{}, } } diff --git a/builder/evaluator.go b/builder/evaluator.go index 01805f892d..ec8edc98c8 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -38,8 +38,6 @@ import ( "github.com/docker/docker/utils" ) -type UniqueMap map[string]struct{} - var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) @@ -74,8 +72,7 @@ type BuildFile struct { Options *BuildOpts // see below // both of these are controlled by the Remove and ForceRemove options in BuildOpts - TmpContainers UniqueMap // a map of containers used for removes - TmpImages UniqueMap // a map of images used for removes + TmpContainers map[string]struct{} // a map of containers used for removes image string // image name for commit processing maintainer string // maintainer name. could probably be removed. @@ -147,13 +144,13 @@ func (b *BuildFile) Run(context io.Reader) (string, error) { for i, n := range b.Dockerfile.Children { if err := b.dispatch(i, n); err != nil { if b.Options.ForceRemove { - b.clearTmp(b.TmpContainers) + b.clearTmp() } return "", err } fmt.Fprintf(b.Options.OutStream, " ---> %s\n", utils.TruncateID(b.image)) if b.Options.Remove { - b.clearTmp(b.TmpContainers) + b.clearTmp() } } @@ -206,5 +203,7 @@ func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error { return f(b, strs, attrs) } + fmt.Fprintf(b.Options.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd)) + return nil } diff --git a/builder/internals.go b/builder/internals.go index b1d5b21f03..51b2b63ab9 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -97,7 +97,6 @@ func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { if err != nil { return err } - b.TmpImages[image.ID] = struct{}{} b.image = image.ID return nil } @@ -304,7 +303,7 @@ func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { b.Config = img.Config } - if b.Config.Env == nil || len(b.Config.Env) == 0 { + if len(b.Config.Env) == 0 { b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) } @@ -549,13 +548,13 @@ func fixPermissions(destination string, uid, gid int) error { }) } -func (b *BuildFile) clearTmp(containers map[string]struct{}) { - for c := range containers { +func (b *BuildFile) clearTmp() { + for c := range b.TmpContainers { tmp := b.Options.Daemon.Get(c) if err := b.Options.Daemon.Destroy(tmp); err != nil { fmt.Fprintf(b.Options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) } else { - delete(containers, c) + delete(b.TmpContainers, c) fmt.Fprintf(b.Options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) } } diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 999d97603d..93fa23ee85 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -116,18 +116,16 @@ func parseJSON(rest string) (*Node, map[string]bool, error) { func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { rest = strings.TrimSpace(rest) - if strings.HasPrefix(rest, "[") { - node, attrs, err := parseJSON(rest) + node, attrs, err := parseJSON(rest) - if err == nil { - return node, attrs, nil - } - if err == errDockerfileJSONNesting { - return nil, nil, err - } + if err == nil { + return node, attrs, nil + } + if err == errDockerfileJSONNesting { + return nil, nil, err } - node := &Node{} + node = &Node{} node.Value = rest return node, nil, nil } diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 47ffc9a678..8315412bd7 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -100,8 +100,7 @@ func Parse(rwc io.Reader) (*Node, error) { } if line != "" && child == nil { - for { - scanner.Scan() + for scanner.Scan() { newline := strings.TrimSpace(scanner.Text()) if newline == "" { diff --git a/builder/support.go b/builder/support.go index 08cfa8defa..bae97e370f 100644 --- a/builder/support.go +++ b/builder/support.go @@ -19,6 +19,7 @@ func (b *BuildFile) replaceEnv(str string) string { tmp := strings.SplitN(keyval, "=", 2) if tmp[0] == matchKey { str = strings.Replace(str, match, tmp[1], -1) + break } } } @@ -26,16 +27,6 @@ func (b *BuildFile) replaceEnv(str string) string { return str } -func (b *BuildFile) FindEnvKey(key string) int { - for k, envVar := range b.Config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if key == envParts[0] { - return k - } - } - return -1 -} - func handleJsonArgs(args []string, attributes map[string]bool) []string { if attributes != nil && attributes["json"] { return args From 2ef1dec7e8d405f237b68ca104d457d78b8caed1 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 26 Aug 2014 12:25:44 -0700 Subject: [PATCH 14/14] builder: Refactors according to @tiborvass's comments Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/builder.go | 15 -------- builder/dispatchers.go | 34 ++++++++--------- builder/evaluator.go | 84 ++++++++++++++++++++++-------------------- builder/internals.go | 82 +++++++++++++++++++++-------------------- builder/job.go | 4 +- builder/support.go | 2 +- 6 files changed, 106 insertions(+), 115 deletions(-) delete mode 100644 builder/builder.go diff --git a/builder/builder.go b/builder/builder.go deleted file mode 100644 index 685f27b061..0000000000 --- a/builder/builder.go +++ /dev/null @@ -1,15 +0,0 @@ -package builder - -import ( - "github.com/docker/docker/runconfig" -) - -// Create a new builder. See -func NewBuilder(opts *BuildOpts) *BuildFile { - return &BuildFile{ - Dockerfile: nil, - Config: &runconfig.Config{}, - Options: opts, - TmpContainers: map[string]struct{}{}, - } -} diff --git a/builder/dispatchers.go b/builder/dispatchers.go index fe0b5be579..8abdb51d8a 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -18,7 +18,7 @@ import ( ) // dispatch with no layer / parsing. This is effectively not a command. -func nullDispatch(b *BuildFile, args []string, attributes map[string]bool) error { +func nullDispatch(b *Builder, args []string, attributes map[string]bool) error { return nil } @@ -27,7 +27,7 @@ func nullDispatch(b *BuildFile, args []string, attributes map[string]bool) error // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // -func env(b *BuildFile, args []string, attributes map[string]bool) error { +func env(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 2 { return fmt.Errorf("ENV accepts two arguments") } @@ -48,7 +48,7 @@ func env(b *BuildFile, args []string, attributes map[string]bool) error { // MAINTAINER some text // // Sets the maintainer metadata. -func maintainer(b *BuildFile, args []string, attributes map[string]bool) error { +func maintainer(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("MAINTAINER requires only one argument") } @@ -62,7 +62,7 @@ func maintainer(b *BuildFile, args []string, attributes map[string]bool) error { // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // -func add(b *BuildFile, args []string, attributes map[string]bool) error { +func add(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 2 { return fmt.Errorf("ADD requires two arguments") } @@ -74,7 +74,7 @@ func add(b *BuildFile, args []string, attributes map[string]bool) error { // // Same as 'ADD' but without the tar and remote url handling. // -func dispatchCopy(b *BuildFile, args []string, attributes map[string]bool) error { +func dispatchCopy(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 2 { return fmt.Errorf("COPY requires two arguments") } @@ -86,16 +86,16 @@ func dispatchCopy(b *BuildFile, args []string, attributes map[string]bool) error // // This sets the image the dockerfile will build on top of. // -func from(b *BuildFile, args []string, attributes map[string]bool) error { +func from(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("FROM requires one argument") } name := args[0] - image, err := b.Options.Daemon.Repositories().LookupImage(name) + image, err := b.Daemon.Repositories().LookupImage(name) if err != nil { - if b.Options.Daemon.Graph().IsNotExist(err) { + if b.Daemon.Graph().IsNotExist(err) { image, err = b.pullImage(name) } @@ -118,7 +118,7 @@ func from(b *BuildFile, args []string, attributes map[string]bool) error { // special cases. search for 'OnBuild' in internals.go for additional special // cases. // -func onbuild(b *BuildFile, args []string, attributes map[string]bool) error { +func onbuild(b *Builder, args []string, attributes map[string]bool) error { triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) switch triggerInstruction { case "ONBUILD": @@ -137,7 +137,7 @@ func onbuild(b *BuildFile, args []string, attributes map[string]bool) error { // // Set the working directory for future RUN/CMD/etc statements. // -func workdir(b *BuildFile, args []string, attributes map[string]bool) error { +func workdir(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("WORKDIR requires exactly one argument") } @@ -165,7 +165,7 @@ func workdir(b *BuildFile, args []string, attributes map[string]bool) error { // RUN echo hi # sh -c echo hi // RUN [ "echo", "hi" ] # echo hi // -func run(b *BuildFile, args []string, attributes map[string]bool) error { +func run(b *Builder, args []string, attributes map[string]bool) error { args = handleJsonArgs(args, attributes) if b.image == "" { @@ -220,7 +220,7 @@ func run(b *BuildFile, args []string, attributes map[string]bool) error { // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func cmd(b *BuildFile, args []string, attributes map[string]bool) error { +func cmd(b *Builder, args []string, attributes map[string]bool) error { b.Config.Cmd = handleJsonArgs(args, attributes) if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { @@ -239,7 +239,7 @@ func cmd(b *BuildFile, args []string, attributes map[string]bool) error { // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint // is initialized at NewBuilder time instead of through argument parsing. // -func entrypoint(b *BuildFile, args []string, attributes map[string]bool) error { +func entrypoint(b *Builder, args []string, attributes map[string]bool) error { b.Config.Entrypoint = handleJsonArgs(args, attributes) // if there is no cmd in current Dockerfile - cleanup cmd @@ -258,7 +258,7 @@ func entrypoint(b *BuildFile, args []string, attributes map[string]bool) error { // Expose ports for links and port mappings. This all ends up in // b.Config.ExposedPorts for runconfig. // -func expose(b *BuildFile, args []string, attributes map[string]bool) error { +func expose(b *Builder, args []string, attributes map[string]bool) error { portsTab := args if b.Config.ExposedPorts == nil { @@ -285,7 +285,7 @@ func expose(b *BuildFile, args []string, attributes map[string]bool) error { // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // -func user(b *BuildFile, args []string, attributes map[string]bool) error { +func user(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("USER requires exactly one argument") } @@ -299,7 +299,7 @@ func user(b *BuildFile, args []string, attributes map[string]bool) error { // Expose the volume /foo for use. Will also accept the JSON form, but either // way requires exactly one argument. // -func volume(b *BuildFile, args []string, attributes map[string]bool) error { +func volume(b *Builder, args []string, attributes map[string]bool) error { if len(args) != 1 { return fmt.Errorf("Volume cannot be empty") } @@ -319,6 +319,6 @@ func volume(b *BuildFile, args []string, attributes map[string]bool) error { } // INSERT is no longer accepted, but we still parse it. -func insert(b *BuildFile, args []string, attributes map[string]bool) error { +func insert(b *Builder, args []string, attributes map[string]bool) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } diff --git a/builder/evaluator.go b/builder/evaluator.go index ec8edc98c8..33d8f080e0 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -20,11 +20,9 @@ package builder import ( - "bytes" "errors" "fmt" "io" - "io/ioutil" "os" "path" "strings" @@ -42,10 +40,10 @@ var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) -var evaluateTable map[string]func(*BuildFile, []string, map[string]bool) error +var evaluateTable map[string]func(*Builder, []string, map[string]bool) error func init() { - evaluateTable = map[string]func(*BuildFile, []string, map[string]bool) error{ + evaluateTable = map[string]func(*Builder, []string, map[string]bool) error{ "env": env, "maintainer": maintainer, "add": add, @@ -66,23 +64,7 @@ func init() { // internal struct, used to maintain configuration of the Dockerfile's // processing as it evaluates the parsing result. -type BuildFile struct { - Dockerfile *parser.Node // the syntax tree of the dockerfile - Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. - Options *BuildOpts // see below - - // both of these are controlled by the Remove and ForceRemove options in BuildOpts - TmpContainers map[string]struct{} // a map of containers used for removes - - image string // image name for commit processing - maintainer string // maintainer name. could probably be removed. - cmdSet bool // indicates is CMD was set in current Dockerfile - context *tarsum.TarSum // the context is a tarball that is uploaded by the client - contextPath string // the path of the temporary directory the local context is unpacked to (server side) - -} - -type BuildOpts struct { +type Builder struct { Daemon *daemon.Daemon Engine *engine.Engine @@ -104,6 +86,19 @@ type BuildOpts struct { // Deprecated, original writer used for ImagePull. To be removed. OutOld io.Writer StreamFormatter *utils.StreamFormatter + + Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. + + // both of these are controlled by the Remove and ForceRemove options in BuildOpts + TmpContainers map[string]struct{} // a map of containers used for removes + + dockerfile *parser.Node // the syntax tree of the dockerfile + image string // image name for commit processing + maintainer string // maintainer name. could probably be removed. + cmdSet bool // indicates is CMD was set in current Dockerfile + context *tarsum.TarSum // the context is a tarball that is uploaded by the client + contextPath string // the path of the temporary directory the local context is unpacked to (server side) + } // Run the builder with the context. This is the lynchpin of this package. This @@ -118,38 +113,48 @@ type BuildOpts struct { // processing. // * Print a happy message and return the image ID. // -func (b *BuildFile) Run(context io.Reader) (string, error) { +func (b *Builder) Run(context io.Reader) (string, error) { if err := b.readContext(context); err != nil { return "", err } filename := path.Join(b.contextPath, "Dockerfile") - if _, err := os.Stat(filename); os.IsNotExist(err) { + + fi, err := os.Stat(filename) + if os.IsNotExist(err) { return "", fmt.Errorf("Cannot build a directory without a Dockerfile") } - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return "", err - } - if len(fileBytes) == 0 { + if fi.Size() == 0 { return "", ErrDockerfileEmpty } - ast, err := parser.Parse(bytes.NewReader(fileBytes)) + + f, err := os.Open(filename) if err != nil { return "", err } - b.Dockerfile = ast + defer f.Close() - for i, n := range b.Dockerfile.Children { + ast, err := parser.Parse(f) + if err != nil { + return "", err + } + + b.dockerfile = ast + + // some initializations that would not have been supplied by the caller. + b.Config = &runconfig.Config{} + b.TmpContainers = map[string]struct{}{} + + for i, n := range b.dockerfile.Children { if err := b.dispatch(i, n); err != nil { - if b.Options.ForceRemove { + if b.ForceRemove { b.clearTmp() } return "", err } - fmt.Fprintf(b.Options.OutStream, " ---> %s\n", utils.TruncateID(b.image)) - if b.Options.Remove { + fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image)) + if b.Remove { b.clearTmp() } } @@ -158,7 +163,7 @@ func (b *BuildFile) Run(context io.Reader) (string, error) { return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n") } - fmt.Fprintf(b.Options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) + fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) return b.image, nil } @@ -168,7 +173,7 @@ func (b *BuildFile) Run(context io.Reader) (string, error) { // Child[Node, Node, Node] where Child is from parser.Node.Children and each // node comes from parser.Node.Next. This forms a "line" with a statement and // arguments and we process them in this normalized form by hitting -// evaluateTable with the leaf nodes of the command and the BuildFile object. +// evaluateTable with the leaf nodes of the command and the Builder object. // // ONBUILD is a special case; in this case the parser will emit: // Child[Node, Child[Node, Node...]] where the first node is the literal @@ -176,14 +181,13 @@ func (b *BuildFile) Run(context io.Reader) (string, error) { // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to // deal with that, at least until it becomes more of a general concern with new // features. -func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error { +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value attrs := ast.Attributes strs := []string{} msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) if cmd == "onbuild" { - fmt.Fprintf(b.Options.OutStream, "%#v\n", ast.Next.Children[0].Value) ast = ast.Next.Children[0] strs = append(strs, b.replaceEnv(ast.Value)) msg += " " + ast.Value @@ -195,7 +199,7 @@ func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error { msg += " " + ast.Value } - fmt.Fprintf(b.Options.OutStream, "%s\n", msg) + fmt.Fprintln(b.OutStream, msg) // XXX yes, we skip any cmds that are not valid; the parser should have // picked these out already. @@ -203,7 +207,7 @@ func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error { return f(b, strs, attrs) } - fmt.Fprintf(b.Options.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd)) + fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd)) return nil } diff --git a/builder/internals.go b/builder/internals.go index 51b2b63ab9..1767d7d9dd 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -30,7 +30,7 @@ import ( "github.com/docker/docker/utils" ) -func (b *BuildFile) readContext(context io.Reader) error { +func (b *Builder) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err @@ -50,7 +50,7 @@ func (b *BuildFile) readContext(context io.Reader) error { return nil } -func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { +func (b *Builder) commit(id string, autoCmd []string, comment string) error { if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to commit") } @@ -68,15 +68,15 @@ func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { return nil } - container, warnings, err := b.Options.Daemon.Create(b.Config, "") + container, warnings, err := b.Daemon.Create(b.Config, "") if err != nil { return err } for _, warning := range warnings { - fmt.Fprintf(b.Options.OutStream, " ---> [Warning] %s\n", warning) + fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) } b.TmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.Options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) id = container.ID if err := container.Mount(); err != nil { @@ -84,7 +84,7 @@ func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { } defer container.Unmount() } - container := b.Options.Daemon.Get(id) + container := b.Daemon.Get(id) if container == nil { return fmt.Errorf("An error occured while creating the container") } @@ -93,7 +93,7 @@ func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { autoConfig := *b.Config autoConfig.Cmd = autoCmd // Commit the container - image, err := b.Options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) + image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) if err != nil { return err } @@ -101,7 +101,7 @@ func (b *BuildFile) commit(id string, autoCmd []string, comment string) error { return nil } -func (b *BuildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } @@ -200,7 +200,7 @@ func (b *BuildFile) runContextCommand(args []string, allowRemote bool, allowDeco } // Hash path and check the cache - if b.Options.UtilizeCache { + if b.UtilizeCache { var ( hash string sums = b.context.GetSums() @@ -244,7 +244,7 @@ func (b *BuildFile) runContextCommand(args []string, allowRemote bool, allowDeco } // Create the container - container, _, err := b.Options.Daemon.Create(b.Config, "") + container, _, err := b.Daemon.Create(b.Config, "") if err != nil { return err } @@ -268,27 +268,27 @@ func (b *BuildFile) runContextCommand(args []string, allowRemote bool, allowDeco return nil } -func (b *BuildFile) pullImage(name string) (*imagepkg.Image, error) { +func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { remote, tag := parsers.ParseRepositoryTag(name) - pullRegistryAuth := b.Options.AuthConfig - if len(b.Options.AuthConfigFile.Configs) > 0 { + pullRegistryAuth := b.AuthConfig + if len(b.AuthConfigFile.Configs) > 0 { // The request came with a full auth config file, we prefer to use that endpoint, _, err := registry.ResolveRepositoryName(remote) if err != nil { return nil, err } - resolvedAuth := b.Options.AuthConfigFile.ResolveAuthConfig(endpoint) + resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint) pullRegistryAuth = &resolvedAuth } - job := b.Options.Engine.Job("pull", remote, tag) - job.SetenvBool("json", b.Options.StreamFormatter.Json()) + job := b.Engine.Job("pull", remote, tag) + job.SetenvBool("json", b.StreamFormatter.Json()) job.SetenvBool("parallel", true) job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.Options.OutOld) + job.Stdout.Add(b.OutOld) if err := job.Run(); err != nil { return nil, err } - image, err := b.Options.Daemon.Repositories().LookupImage(name) + image, err := b.Daemon.Repositories().LookupImage(name) if err != nil { return nil, err } @@ -296,7 +296,7 @@ func (b *BuildFile) pullImage(name string) (*imagepkg.Image, error) { return image, nil } -func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { +func (b *Builder) processImageFrom(img *imagepkg.Image) error { b.image = img.ID if img.Config != nil { @@ -309,7 +309,7 @@ func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { // Process ONBUILD triggers if they exist if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { - fmt.Fprintf(b.Options.ErrStream, "# Executing %d build triggers\n", nTriggers) + fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) } // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. @@ -330,7 +330,8 @@ func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { } // FIXME we have to run the evaluator manually here. This does not belong - // in this function. + // in this function. Once removed, the init() in evaluator.go should no + // longer be necessary. if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok { if err := f(b, splitStep[1:], nil); err != nil { @@ -344,17 +345,17 @@ func (b *BuildFile) processImageFrom(img *imagepkg.Image) error { return nil } -// probeCache checks to see if image-caching is enabled (`b.Options.UtilizeCache`) +// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) // and if so attempts to look up the current `b.image` and `b.Config` pair -// in the current server `b.Options.Daemon`. If an image is found, probeCache returns +// in the current server `b.Daemon`. If an image is found, probeCache returns // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there // is any error, it returns `(false, err)`. -func (b *BuildFile) probeCache() (bool, error) { - if b.Options.UtilizeCache { - if cache, err := b.Options.Daemon.ImageGetCached(b.image, b.Config); err != nil { +func (b *Builder) probeCache() (bool, error) { + if b.UtilizeCache { + if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil { return false, err } else if cache != nil { - fmt.Fprintf(b.Options.OutStream, " ---> Using cache\n") + fmt.Fprintf(b.OutStream, " ---> Using cache\n") log.Debugf("[BUILDER] Use cached version") b.image = cache.ID return true, nil @@ -365,19 +366,20 @@ func (b *BuildFile) probeCache() (bool, error) { return false, nil } -func (b *BuildFile) create() (*daemon.Container, error) { +func (b *Builder) create() (*daemon.Container, error) { if b.image == "" { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } b.Config.Image = b.image // Create the container - c, _, err := b.Options.Daemon.Create(b.Config, "") + c, _, err := b.Daemon.Create(b.Config, "") if err != nil { return nil, err } + b.TmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.Options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) // override the entry point that may have been picked up from the base image c.Path = b.Config.Cmd[0] @@ -386,16 +388,16 @@ func (b *BuildFile) create() (*daemon.Container, error) { return c, nil } -func (b *BuildFile) run(c *daemon.Container) error { +func (b *Builder) run(c *daemon.Container) error { var errCh chan error - if b.Options.Verbose { + if b.Verbose { errCh = utils.Go(func() error { // FIXME: call the 'attach' job so that daemon.Attach can be made private // // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach // but without hijacking for stdin. Also, with attach there can be race // condition because of some output already was printed before it. - return <-b.Options.Daemon.Attach(c, nil, nil, b.Options.OutStream, b.Options.ErrStream) + return <-b.Daemon.Attach(c, nil, nil, b.OutStream, b.ErrStream) }) } @@ -422,7 +424,7 @@ func (b *BuildFile) run(c *daemon.Container) error { return nil } -func (b *BuildFile) checkPathForAddition(orig string) error { +func (b *Builder) checkPathForAddition(orig string) error { origPath := path.Join(b.contextPath, orig) origPath, err := filepath.EvalSymlinks(origPath) if err != nil { @@ -443,7 +445,7 @@ func (b *BuildFile) checkPathForAddition(orig string) error { return nil } -func (b *BuildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error { +func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { var ( err error destExists = true @@ -548,14 +550,14 @@ func fixPermissions(destination string, uid, gid int) error { }) } -func (b *BuildFile) clearTmp() { +func (b *Builder) clearTmp() { for c := range b.TmpContainers { - tmp := b.Options.Daemon.Get(c) - if err := b.Options.Daemon.Destroy(tmp); err != nil { - fmt.Fprintf(b.Options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + tmp := b.Daemon.Get(c) + if err := b.Daemon.Destroy(tmp); err != nil { + fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) } else { delete(b.TmpContainers, c) - fmt.Fprintf(b.Options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) } } } diff --git a/builder/job.go b/builder/job.go index e1233e0a08..1aa2c3b6b9 100644 --- a/builder/job.go +++ b/builder/job.go @@ -85,7 +85,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { sf := utils.NewStreamFormatter(job.GetenvBool("json")) - opts := &BuildOpts{ + builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ @@ -106,7 +106,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { AuthConfigFile: configFile, } - id, err := NewBuilder(opts).Run(context) + id, err := builder.Run(context) if err != nil { return job.Error(err) } diff --git a/builder/support.go b/builder/support.go index bae97e370f..de5d57b501 100644 --- a/builder/support.go +++ b/builder/support.go @@ -10,7 +10,7 @@ var ( ) // handle environment replacement. Used in dispatcher. -func (b *BuildFile) replaceEnv(str string) string { +func (b *Builder) replaceEnv(str string) string { for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) { match = match[strings.Index(match, "$"):] matchKey := strings.Trim(match, "${}")