mirror of https://github.com/docker/docs.git
Merge pull request #7461 from erikh/rewrite_dockerfile_parser
Cleanup: Refactor Dockerfile parser
This commit is contained in:
commit
d9f8d3ea9f
|
@ -0,0 +1,324 @@
|
||||||
|
package builder
|
||||||
|
|
||||||
|
// This file contains the dispatchers for each command. Note that
|
||||||
|
// `nullDispatch` is not actually a command, but support for commands we parse
|
||||||
|
// but do nothing with.
|
||||||
|
//
|
||||||
|
// See evaluator.go for a higher level discussion of the whole evaluator
|
||||||
|
// package.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/nat"
|
||||||
|
"github.com/docker/docker/pkg/log"
|
||||||
|
"github.com/docker/docker/runconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dispatch with no layer / parsing. This is effectively not a command.
|
||||||
|
func nullDispatch(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ENV foo bar
|
||||||
|
//
|
||||||
|
// Sets the environment variable foo to bar, also makes interpolation
|
||||||
|
// in the dockerfile available from the next statement on via ${foo}.
|
||||||
|
//
|
||||||
|
func env(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 2 {
|
||||||
|
return fmt.Errorf("ENV accepts two arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
fullEnv := fmt.Sprintf("%s=%s", args[0], args[1])
|
||||||
|
|
||||||
|
for i, envVar := range b.Config.Env {
|
||||||
|
envParts := strings.SplitN(envVar, "=", 2)
|
||||||
|
if args[0] == envParts[0] {
|
||||||
|
b.Config.Env[i] = fullEnv
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Config.Env = append(b.Config.Env, fullEnv)
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MAINTAINER some text <maybe@an.email.address>
|
||||||
|
//
|
||||||
|
// Sets the maintainer metadata.
|
||||||
|
func maintainer(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return fmt.Errorf("MAINTAINER requires only one argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
b.maintainer = args[0]
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ADD foo /path
|
||||||
|
//
|
||||||
|
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
|
||||||
|
// exist here. If you do not wish to have this automatic handling, use COPY.
|
||||||
|
//
|
||||||
|
func add(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 2 {
|
||||||
|
return fmt.Errorf("ADD requires two arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.runContextCommand(args, true, true, "ADD")
|
||||||
|
}
|
||||||
|
|
||||||
|
// COPY foo /path
|
||||||
|
//
|
||||||
|
// Same as 'ADD' but without the tar and remote url handling.
|
||||||
|
//
|
||||||
|
func dispatchCopy(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 2 {
|
||||||
|
return fmt.Errorf("COPY requires two arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.runContextCommand(args, false, false, "COPY")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FROM imagename
|
||||||
|
//
|
||||||
|
// This sets the image the dockerfile will build on top of.
|
||||||
|
//
|
||||||
|
func from(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return fmt.Errorf("FROM requires one argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
name := args[0]
|
||||||
|
|
||||||
|
image, err := b.Daemon.Repositories().LookupImage(name)
|
||||||
|
if err != nil {
|
||||||
|
if b.Daemon.Graph().IsNotExist(err) {
|
||||||
|
image, err = b.pullImage(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// note that the top level err will still be !nil here if IsNotExist is
|
||||||
|
// not the error. This approach just simplifies hte logic a bit.
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.processImageFrom(image)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ONBUILD RUN echo yo
|
||||||
|
//
|
||||||
|
// ONBUILD triggers run when the image is used in a FROM statement.
|
||||||
|
//
|
||||||
|
// ONBUILD handling has a lot of special-case functionality, the heading in
|
||||||
|
// evaluator.go and comments around dispatch() in the same file explain the
|
||||||
|
// special cases. search for 'OnBuild' in internals.go for additional special
|
||||||
|
// cases.
|
||||||
|
//
|
||||||
|
func onbuild(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
|
||||||
|
switch triggerInstruction {
|
||||||
|
case "ONBUILD":
|
||||||
|
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
||||||
|
case "MAINTAINER", "FROM":
|
||||||
|
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
|
||||||
|
}
|
||||||
|
|
||||||
|
trigger := strings.Join(args, " ")
|
||||||
|
|
||||||
|
b.Config.OnBuild = append(b.Config.OnBuild, trigger)
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WORKDIR /tmp
|
||||||
|
//
|
||||||
|
// Set the working directory for future RUN/CMD/etc statements.
|
||||||
|
//
|
||||||
|
func workdir(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return fmt.Errorf("WORKDIR requires exactly one argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
workdir := args[0]
|
||||||
|
|
||||||
|
if workdir[0] == '/' {
|
||||||
|
b.Config.WorkingDir = workdir
|
||||||
|
} else {
|
||||||
|
if b.Config.WorkingDir == "" {
|
||||||
|
b.Config.WorkingDir = "/"
|
||||||
|
}
|
||||||
|
b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RUN some command yo
|
||||||
|
//
|
||||||
|
// run a command and commit the image. Args are automatically prepended with
|
||||||
|
// 'sh -c' in the event there is only one argument. The difference in
|
||||||
|
// processing:
|
||||||
|
//
|
||||||
|
// RUN echo hi # sh -c echo hi
|
||||||
|
// RUN [ "echo", "hi" ] # echo hi
|
||||||
|
//
|
||||||
|
func run(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
args = handleJsonArgs(args, attributes)
|
||||||
|
|
||||||
|
if b.image == "" {
|
||||||
|
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||||
|
}
|
||||||
|
|
||||||
|
config, _, _, err := runconfig.Parse(append([]string{b.image}, args...), nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := b.Config.Cmd
|
||||||
|
// set Cmd manually, this is special case only for Dockerfiles
|
||||||
|
b.Config.Cmd = config.Cmd
|
||||||
|
runconfig.Merge(b.Config, config)
|
||||||
|
|
||||||
|
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
||||||
|
|
||||||
|
log.Debugf("Command to be executed: %v", b.Config.Cmd)
|
||||||
|
|
||||||
|
hit, err := b.probeCache()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if hit {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := b.create()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that we keep the container mounted until the commit
|
||||||
|
// to avoid unmounting and then mounting directly again
|
||||||
|
c.Mount()
|
||||||
|
defer c.Unmount()
|
||||||
|
|
||||||
|
err = b.run(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := b.commit(c.ID, cmd, "run"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CMD foo
|
||||||
|
//
|
||||||
|
// Set the default command to run in the container (which may be empty).
|
||||||
|
// Argument handling is the same as RUN.
|
||||||
|
//
|
||||||
|
func cmd(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
b.Config.Cmd = handleJsonArgs(args, attributes)
|
||||||
|
|
||||||
|
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.cmdSet = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ENTRYPOINT /usr/sbin/nginx
|
||||||
|
//
|
||||||
|
// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
|
||||||
|
// accept the CMD as the arguments to /usr/sbin/nginx.
|
||||||
|
//
|
||||||
|
// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
|
||||||
|
// is initialized at NewBuilder time instead of through argument parsing.
|
||||||
|
//
|
||||||
|
func entrypoint(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
b.Config.Entrypoint = handleJsonArgs(args, attributes)
|
||||||
|
|
||||||
|
// if there is no cmd in current Dockerfile - cleanup cmd
|
||||||
|
if !b.cmdSet {
|
||||||
|
b.Config.Cmd = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPOSE 6667/tcp 7000/tcp
|
||||||
|
//
|
||||||
|
// Expose ports for links and port mappings. This all ends up in
|
||||||
|
// b.Config.ExposedPorts for runconfig.
|
||||||
|
//
|
||||||
|
func expose(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
portsTab := args
|
||||||
|
|
||||||
|
if b.Config.ExposedPorts == nil {
|
||||||
|
b.Config.ExposedPorts = make(nat.PortSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for port := range ports {
|
||||||
|
if _, exists := b.Config.ExposedPorts[port]; !exists {
|
||||||
|
b.Config.ExposedPorts[port] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Config.PortSpecs = nil
|
||||||
|
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
|
||||||
|
}
|
||||||
|
|
||||||
|
// USER foo
|
||||||
|
//
|
||||||
|
// Set the user to 'foo' for future commands and when running the
|
||||||
|
// ENTRYPOINT/CMD at container run time.
|
||||||
|
//
|
||||||
|
func user(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return fmt.Errorf("USER requires exactly one argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Config.User = args[0]
|
||||||
|
return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VOLUME /foo
|
||||||
|
//
|
||||||
|
// Expose the volume /foo for use. Will also accept the JSON form, but either
|
||||||
|
// way requires exactly one argument.
|
||||||
|
//
|
||||||
|
func volume(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
if len(args) != 1 {
|
||||||
|
return fmt.Errorf("Volume cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
volume := args
|
||||||
|
|
||||||
|
if b.Config.Volumes == nil {
|
||||||
|
b.Config.Volumes = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
for _, v := range volume {
|
||||||
|
b.Config.Volumes[v] = struct{}{}
|
||||||
|
}
|
||||||
|
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// INSERT is no longer accepted, but we still parse it.
|
||||||
|
func insert(b *Builder, args []string, attributes map[string]bool) error {
|
||||||
|
return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
||||||
|
}
|
|
@ -0,0 +1,213 @@
|
||||||
|
// builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
|
||||||
|
//
|
||||||
|
// It incorporates a dispatch table based on the parser.Node values (see the
|
||||||
|
// parser package for more information) that are yielded from the parser itself.
|
||||||
|
// Calling NewBuilder with the BuildOpts struct can be used to customize the
|
||||||
|
// experience for execution purposes only. Parsing is controlled in the parser
|
||||||
|
// package, and this division of resposibility should be respected.
|
||||||
|
//
|
||||||
|
// Please see the jump table targets for the actual invocations, most of which
|
||||||
|
// will call out to the functions in internals.go to deal with their tasks.
|
||||||
|
//
|
||||||
|
// ONBUILD is a special case, which is covered in the onbuild() func in
|
||||||
|
// dispatchers.go.
|
||||||
|
//
|
||||||
|
// The evaluator uses the concept of "steps", which are usually each processable
|
||||||
|
// line in the Dockerfile. Each step is numbered and certain actions are taken
|
||||||
|
// before and after each step, such as creating an image ID and removing temporary
|
||||||
|
// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which
|
||||||
|
// includes its own set of steps (usually only one of them).
|
||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/builder/parser"
|
||||||
|
"github.com/docker/docker/daemon"
|
||||||
|
"github.com/docker/docker/engine"
|
||||||
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
|
"github.com/docker/docker/registry"
|
||||||
|
"github.com/docker/docker/runconfig"
|
||||||
|
"github.com/docker/docker/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
||||||
|
)
|
||||||
|
|
||||||
|
var evaluateTable map[string]func(*Builder, []string, map[string]bool) error
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
evaluateTable = map[string]func(*Builder, []string, map[string]bool) error{
|
||||||
|
"env": env,
|
||||||
|
"maintainer": maintainer,
|
||||||
|
"add": add,
|
||||||
|
"copy": dispatchCopy, // copy() is a go builtin
|
||||||
|
"from": from,
|
||||||
|
"onbuild": onbuild,
|
||||||
|
"workdir": workdir,
|
||||||
|
"docker-version": nullDispatch, // we don't care about docker-version
|
||||||
|
"run": run,
|
||||||
|
"cmd": cmd,
|
||||||
|
"entrypoint": entrypoint,
|
||||||
|
"expose": expose,
|
||||||
|
"volume": volume,
|
||||||
|
"user": user,
|
||||||
|
"insert": insert,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// internal struct, used to maintain configuration of the Dockerfile's
|
||||||
|
// processing as it evaluates the parsing result.
|
||||||
|
type Builder struct {
|
||||||
|
Daemon *daemon.Daemon
|
||||||
|
Engine *engine.Engine
|
||||||
|
|
||||||
|
// effectively stdio for the run. Because it is not stdio, I said
|
||||||
|
// "Effectively". Do not use stdio anywhere in this package for any reason.
|
||||||
|
OutStream io.Writer
|
||||||
|
ErrStream io.Writer
|
||||||
|
|
||||||
|
Verbose bool
|
||||||
|
UtilizeCache bool
|
||||||
|
|
||||||
|
// controls how images and containers are handled between steps.
|
||||||
|
Remove bool
|
||||||
|
ForceRemove bool
|
||||||
|
|
||||||
|
AuthConfig *registry.AuthConfig
|
||||||
|
AuthConfigFile *registry.ConfigFile
|
||||||
|
|
||||||
|
// Deprecated, original writer used for ImagePull. To be removed.
|
||||||
|
OutOld io.Writer
|
||||||
|
StreamFormatter *utils.StreamFormatter
|
||||||
|
|
||||||
|
Config *runconfig.Config // runconfig for cmd, run, entrypoint etc.
|
||||||
|
|
||||||
|
// both of these are controlled by the Remove and ForceRemove options in BuildOpts
|
||||||
|
TmpContainers map[string]struct{} // a map of containers used for removes
|
||||||
|
|
||||||
|
dockerfile *parser.Node // the syntax tree of the dockerfile
|
||||||
|
image string // image name for commit processing
|
||||||
|
maintainer string // maintainer name. could probably be removed.
|
||||||
|
cmdSet bool // indicates is CMD was set in current Dockerfile
|
||||||
|
context *tarsum.TarSum // the context is a tarball that is uploaded by the client
|
||||||
|
contextPath string // the path of the temporary directory the local context is unpacked to (server side)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the builder with the context. This is the lynchpin of this package. This
|
||||||
|
// will (barring errors):
|
||||||
|
//
|
||||||
|
// * call readContext() which will set up the temporary directory and unpack
|
||||||
|
// the context into it.
|
||||||
|
// * read the dockerfile
|
||||||
|
// * parse the dockerfile
|
||||||
|
// * walk the parse tree and execute it by dispatching to handlers. If Remove
|
||||||
|
// or ForceRemove is set, additional cleanup around containers happens after
|
||||||
|
// processing.
|
||||||
|
// * Print a happy message and return the image ID.
|
||||||
|
//
|
||||||
|
func (b *Builder) Run(context io.Reader) (string, error) {
|
||||||
|
if err := b.readContext(context); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := path.Join(b.contextPath, "Dockerfile")
|
||||||
|
|
||||||
|
fi, err := os.Stat(filename)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return "", fmt.Errorf("Cannot build a directory without a Dockerfile")
|
||||||
|
}
|
||||||
|
if fi.Size() == 0 {
|
||||||
|
return "", ErrDockerfileEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
ast, err := parser.Parse(f)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.dockerfile = ast
|
||||||
|
|
||||||
|
// some initializations that would not have been supplied by the caller.
|
||||||
|
b.Config = &runconfig.Config{}
|
||||||
|
b.TmpContainers = map[string]struct{}{}
|
||||||
|
|
||||||
|
for i, n := range b.dockerfile.Children {
|
||||||
|
if err := b.dispatch(i, n); err != nil {
|
||||||
|
if b.ForceRemove {
|
||||||
|
b.clearTmp()
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image))
|
||||||
|
if b.Remove {
|
||||||
|
b.clearTmp()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.image == "" {
|
||||||
|
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||||
|
return b.image, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This method is the entrypoint to all statement handling routines.
|
||||||
|
//
|
||||||
|
// Almost all nodes will have this structure:
|
||||||
|
// Child[Node, Node, Node] where Child is from parser.Node.Children and each
|
||||||
|
// node comes from parser.Node.Next. This forms a "line" with a statement and
|
||||||
|
// arguments and we process them in this normalized form by hitting
|
||||||
|
// evaluateTable with the leaf nodes of the command and the Builder object.
|
||||||
|
//
|
||||||
|
// ONBUILD is a special case; in this case the parser will emit:
|
||||||
|
// Child[Node, Child[Node, Node...]] where the first node is the literal
|
||||||
|
// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent,
|
||||||
|
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
|
||||||
|
// deal with that, at least until it becomes more of a general concern with new
|
||||||
|
// features.
|
||||||
|
func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
|
||||||
|
cmd := ast.Value
|
||||||
|
attrs := ast.Attributes
|
||||||
|
strs := []string{}
|
||||||
|
msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
|
||||||
|
|
||||||
|
if cmd == "onbuild" {
|
||||||
|
ast = ast.Next.Children[0]
|
||||||
|
strs = append(strs, b.replaceEnv(ast.Value))
|
||||||
|
msg += " " + ast.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
for ast.Next != nil {
|
||||||
|
ast = ast.Next
|
||||||
|
strs = append(strs, b.replaceEnv(ast.Value))
|
||||||
|
msg += " " + ast.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(b.OutStream, msg)
|
||||||
|
|
||||||
|
// XXX yes, we skip any cmds that are not valid; the parser should have
|
||||||
|
// picked these out already.
|
||||||
|
if f, ok := evaluateTable[cmd]; ok {
|
||||||
|
return f(b, strs, attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,563 @@
|
||||||
|
package builder
|
||||||
|
|
||||||
|
// internals for handling commands. Covers many areas and a lot of
|
||||||
|
// non-contiguous functionality. Please read the comments.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/archive"
|
||||||
|
"github.com/docker/docker/daemon"
|
||||||
|
imagepkg "github.com/docker/docker/image"
|
||||||
|
"github.com/docker/docker/pkg/log"
|
||||||
|
"github.com/docker/docker/pkg/parsers"
|
||||||
|
"github.com/docker/docker/pkg/symlink"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
|
"github.com/docker/docker/registry"
|
||||||
|
"github.com/docker/docker/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *Builder) readContext(context io.Reader) error {
|
||||||
|
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
decompressedStream, err := archive.DecompressStream(context)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true}
|
||||||
|
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.contextPath = tmpdirPath
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) commit(id string, autoCmd []string, comment string) error {
|
||||||
|
if b.image == "" {
|
||||||
|
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
||||||
|
}
|
||||||
|
b.Config.Image = b.image
|
||||||
|
if id == "" {
|
||||||
|
cmd := b.Config.Cmd
|
||||||
|
b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
||||||
|
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
||||||
|
|
||||||
|
hit, err := b.probeCache()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if hit {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
container, warnings, err := b.Daemon.Create(b.Config, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, warning := range warnings {
|
||||||
|
fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
|
||||||
|
}
|
||||||
|
b.TmpContainers[container.ID] = struct{}{}
|
||||||
|
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
||||||
|
id = container.ID
|
||||||
|
|
||||||
|
if err := container.Mount(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer container.Unmount()
|
||||||
|
}
|
||||||
|
container := b.Daemon.Get(id)
|
||||||
|
if container == nil {
|
||||||
|
return fmt.Errorf("An error occured while creating the container")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: Actually copy the struct
|
||||||
|
autoConfig := *b.Config
|
||||||
|
autoConfig.Cmd = autoCmd
|
||||||
|
// Commit the container
|
||||||
|
image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.image = image.ID
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
||||||
|
if b.context == nil {
|
||||||
|
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(args) != 2 {
|
||||||
|
return fmt.Errorf("Invalid %s format", cmdName)
|
||||||
|
}
|
||||||
|
|
||||||
|
orig := args[0]
|
||||||
|
dest := args[1]
|
||||||
|
|
||||||
|
cmd := b.Config.Cmd
|
||||||
|
b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
|
||||||
|
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
||||||
|
b.Config.Image = b.image
|
||||||
|
|
||||||
|
var (
|
||||||
|
origPath = orig
|
||||||
|
destPath = dest
|
||||||
|
remoteHash string
|
||||||
|
isRemote bool
|
||||||
|
decompress = true
|
||||||
|
)
|
||||||
|
|
||||||
|
isRemote = utils.IsURL(orig)
|
||||||
|
if isRemote && !allowRemote {
|
||||||
|
return fmt.Errorf("Source can't be an URL for %s", cmdName)
|
||||||
|
} else if utils.IsURL(orig) {
|
||||||
|
// Initiate the download
|
||||||
|
resp, err := utils.Download(orig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a tmp dir
|
||||||
|
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a tmp file within our tmp dir
|
||||||
|
tmpFileName := path.Join(tmpDirName, "tmp")
|
||||||
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDirName)
|
||||||
|
|
||||||
|
// Download and dump result to tmp file
|
||||||
|
if _, err := io.Copy(tmpFile, resp.Body); err != nil {
|
||||||
|
tmpFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tmpFile.Close()
|
||||||
|
|
||||||
|
// Remove the mtime of the newly created tmp file
|
||||||
|
if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
||||||
|
|
||||||
|
// Process the checksum
|
||||||
|
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
|
||||||
|
if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
remoteHash = tarSum.Sum(nil)
|
||||||
|
r.Close()
|
||||||
|
|
||||||
|
// If the destination is a directory, figure out the filename.
|
||||||
|
if strings.HasSuffix(dest, "/") {
|
||||||
|
u, err := url.Parse(orig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
path := u.Path
|
||||||
|
if strings.HasSuffix(path, "/") {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
parts := strings.Split(path, "/")
|
||||||
|
filename := parts[len(parts)-1]
|
||||||
|
if filename == "" {
|
||||||
|
return fmt.Errorf("cannot determine filename from url: %s", u)
|
||||||
|
}
|
||||||
|
destPath = dest + filename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.checkPathForAddition(origPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash path and check the cache
|
||||||
|
if b.UtilizeCache {
|
||||||
|
var (
|
||||||
|
hash string
|
||||||
|
sums = b.context.GetSums()
|
||||||
|
)
|
||||||
|
|
||||||
|
if remoteHash != "" {
|
||||||
|
hash = remoteHash
|
||||||
|
} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if fi.IsDir() {
|
||||||
|
var subfiles []string
|
||||||
|
for file, sum := range sums {
|
||||||
|
absFile := path.Join(b.contextPath, file)
|
||||||
|
absOrigPath := path.Join(b.contextPath, origPath)
|
||||||
|
if strings.HasPrefix(absFile, absOrigPath) {
|
||||||
|
subfiles = append(subfiles, sum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(subfiles)
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||||
|
hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
} else {
|
||||||
|
if origPath[0] == '/' && len(origPath) > 1 {
|
||||||
|
origPath = origPath[1:]
|
||||||
|
}
|
||||||
|
origPath = strings.TrimPrefix(origPath, "./")
|
||||||
|
if h, ok := sums[origPath]; ok {
|
||||||
|
hash = "file:" + h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
|
||||||
|
hit, err := b.probeCache()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// If we do not have a hash, never use the cache
|
||||||
|
if hit && hash != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the container
|
||||||
|
container, _, err := b.Daemon.Create(b.Config, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.TmpContainers[container.ID] = struct{}{}
|
||||||
|
|
||||||
|
if err := container.Mount(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer container.Unmount()
|
||||||
|
|
||||||
|
if !allowDecompression || isRemote {
|
||||||
|
decompress = false
|
||||||
|
}
|
||||||
|
if err := b.addContext(container, origPath, destPath, decompress); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
|
||||||
|
remote, tag := parsers.ParseRepositoryTag(name)
|
||||||
|
pullRegistryAuth := b.AuthConfig
|
||||||
|
if len(b.AuthConfigFile.Configs) > 0 {
|
||||||
|
// The request came with a full auth config file, we prefer to use that
|
||||||
|
endpoint, _, err := registry.ResolveRepositoryName(remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint)
|
||||||
|
pullRegistryAuth = &resolvedAuth
|
||||||
|
}
|
||||||
|
job := b.Engine.Job("pull", remote, tag)
|
||||||
|
job.SetenvBool("json", b.StreamFormatter.Json())
|
||||||
|
job.SetenvBool("parallel", true)
|
||||||
|
job.SetenvJson("authConfig", pullRegistryAuth)
|
||||||
|
job.Stdout.Add(b.OutOld)
|
||||||
|
if err := job.Run(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
image, err := b.Daemon.Repositories().LookupImage(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return image, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) processImageFrom(img *imagepkg.Image) error {
|
||||||
|
b.image = img.ID
|
||||||
|
|
||||||
|
if img.Config != nil {
|
||||||
|
b.Config = img.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.Config.Env) == 0 {
|
||||||
|
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process ONBUILD triggers if they exist
|
||||||
|
if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
||||||
|
fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
|
||||||
|
onBuildTriggers := b.Config.OnBuild
|
||||||
|
b.Config.OnBuild = []string{}
|
||||||
|
|
||||||
|
// FIXME rewrite this so that builder/parser is used; right now steps in
|
||||||
|
// onbuild are muted because we have no good way to represent the step
|
||||||
|
// number
|
||||||
|
for _, step := range onBuildTriggers {
|
||||||
|
splitStep := strings.Split(step, " ")
|
||||||
|
stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
|
||||||
|
switch stepInstruction {
|
||||||
|
case "ONBUILD":
|
||||||
|
return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
|
||||||
|
case "MAINTAINER", "FROM":
|
||||||
|
return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME we have to run the evaluator manually here. This does not belong
|
||||||
|
// in this function. Once removed, the init() in evaluator.go should no
|
||||||
|
// longer be necessary.
|
||||||
|
|
||||||
|
if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok {
|
||||||
|
if err := f(b, splitStep[1:], nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("%s doesn't appear to be a valid Dockerfile instruction", splitStep[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
|
||||||
|
// and if so attempts to look up the current `b.image` and `b.Config` pair
|
||||||
|
// in the current server `b.Daemon`. If an image is found, probeCache returns
|
||||||
|
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
||||||
|
// is any error, it returns `(false, err)`.
|
||||||
|
func (b *Builder) probeCache() (bool, error) {
|
||||||
|
if b.UtilizeCache {
|
||||||
|
if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if cache != nil {
|
||||||
|
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
|
||||||
|
log.Debugf("[BUILDER] Use cached version")
|
||||||
|
b.image = cache.ID
|
||||||
|
return true, nil
|
||||||
|
} else {
|
||||||
|
log.Debugf("[BUILDER] Cache miss")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) create() (*daemon.Container, error) {
|
||||||
|
if b.image == "" {
|
||||||
|
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||||
|
}
|
||||||
|
b.Config.Image = b.image
|
||||||
|
|
||||||
|
// Create the container
|
||||||
|
c, _, err := b.Daemon.Create(b.Config, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.TmpContainers[c.ID] = struct{}{}
|
||||||
|
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
||||||
|
|
||||||
|
// override the entry point that may have been picked up from the base image
|
||||||
|
c.Path = b.Config.Cmd[0]
|
||||||
|
c.Args = b.Config.Cmd[1:]
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) run(c *daemon.Container) error {
|
||||||
|
var errCh chan error
|
||||||
|
if b.Verbose {
|
||||||
|
errCh = utils.Go(func() error {
|
||||||
|
// FIXME: call the 'attach' job so that daemon.Attach can be made private
|
||||||
|
//
|
||||||
|
// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
|
||||||
|
// but without hijacking for stdin. Also, with attach there can be race
|
||||||
|
// condition because of some output already was printed before it.
|
||||||
|
return <-b.Daemon.Attach(c, nil, nil, b.OutStream, b.ErrStream)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
//start the container
|
||||||
|
if err := c.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if errCh != nil {
|
||||||
|
if err := <-errCh; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for it to finish
|
||||||
|
if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
|
||||||
|
err := &utils.JSONError{
|
||||||
|
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
|
||||||
|
Code: ret,
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) checkPathForAddition(orig string) error {
|
||||||
|
origPath := path.Join(b.contextPath, orig)
|
||||||
|
origPath, err := filepath.EvalSymlinks(origPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("%s: no such file or directory", orig)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(origPath, b.contextPath) {
|
||||||
|
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(origPath); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("%s: no such file or directory", orig)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
destExists = true
|
||||||
|
origPath = path.Join(b.contextPath, orig)
|
||||||
|
destPath = path.Join(container.RootfsPath(), dest)
|
||||||
|
)
|
||||||
|
|
||||||
|
if destPath != container.RootfsPath() {
|
||||||
|
destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve the trailing '/'
|
||||||
|
if strings.HasSuffix(dest, "/") || dest == "." {
|
||||||
|
destPath = destPath + "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
destStat, err := os.Stat(destPath)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
destExists = false
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := os.Stat(origPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("%s: no such file or directory", orig)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.IsDir() {
|
||||||
|
return copyAsDirectory(origPath, destPath, destExists)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
|
||||||
|
if decompress {
|
||||||
|
// First try to unpack the source as an archive
|
||||||
|
// to support the untar feature we need to clean up the path a little bit
|
||||||
|
// because tar is very forgiving. First we need to strip off the archive's
|
||||||
|
// filename from the path but this is only added if it does not end in / .
|
||||||
|
tarDest := destPath
|
||||||
|
if strings.HasSuffix(tarDest, "/") {
|
||||||
|
tarDest = filepath.Dir(destPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to successfully untar the orig
|
||||||
|
if err := archive.UntarPath(origPath, tarDest); err == nil {
|
||||||
|
return nil
|
||||||
|
} else if err != io.EOF {
|
||||||
|
log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resPath := destPath
|
||||||
|
if destExists && destStat.IsDir() {
|
||||||
|
resPath = path.Join(destPath, path.Base(origPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fixPermissions(resPath, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyAsDirectory(source, destination string, destinationExists bool) error {
|
||||||
|
if err := archive.CopyWithTar(source, destination); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if destinationExists {
|
||||||
|
files, err := ioutil.ReadDir(source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fixPermissions(destination, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fixPermissions(destination string, uid, gid int) error {
|
||||||
|
return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Builder) clearTmp() {
|
||||||
|
for c := range b.TmpContainers {
|
||||||
|
tmp := b.Daemon.Get(c)
|
||||||
|
if err := b.Daemon.Destroy(tmp); err != nil {
|
||||||
|
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
|
||||||
|
} else {
|
||||||
|
delete(b.TmpContainers, c)
|
||||||
|
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,118 @@
|
||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/archive"
|
||||||
|
"github.com/docker/docker/daemon"
|
||||||
|
"github.com/docker/docker/engine"
|
||||||
|
"github.com/docker/docker/pkg/parsers"
|
||||||
|
"github.com/docker/docker/registry"
|
||||||
|
"github.com/docker/docker/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BuilderJob struct {
|
||||||
|
Engine *engine.Engine
|
||||||
|
Daemon *daemon.Daemon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuilderJob) Install() {
|
||||||
|
b.Engine.Register("build", b.CmdBuild)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
||||||
|
if len(job.Args) != 0 {
|
||||||
|
return job.Errorf("Usage: %s\n", job.Name)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
remoteURL = job.Getenv("remote")
|
||||||
|
repoName = job.Getenv("t")
|
||||||
|
suppressOutput = job.GetenvBool("q")
|
||||||
|
noCache = job.GetenvBool("nocache")
|
||||||
|
rm = job.GetenvBool("rm")
|
||||||
|
forceRm = job.GetenvBool("forcerm")
|
||||||
|
authConfig = ®istry.AuthConfig{}
|
||||||
|
configFile = ®istry.ConfigFile{}
|
||||||
|
tag string
|
||||||
|
context io.ReadCloser
|
||||||
|
)
|
||||||
|
job.GetenvJson("authConfig", authConfig)
|
||||||
|
job.GetenvJson("configFile", configFile)
|
||||||
|
repoName, tag = parsers.ParseRepositoryTag(repoName)
|
||||||
|
|
||||||
|
if remoteURL == "" {
|
||||||
|
context = ioutil.NopCloser(job.Stdin)
|
||||||
|
} else if utils.IsGIT(remoteURL) {
|
||||||
|
if !strings.HasPrefix(remoteURL, "git://") {
|
||||||
|
remoteURL = "https://" + remoteURL
|
||||||
|
}
|
||||||
|
root, err := ioutil.TempDir("", "docker-build-git")
|
||||||
|
if err != nil {
|
||||||
|
return job.Error(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(root)
|
||||||
|
|
||||||
|
if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
|
||||||
|
return job.Errorf("Error trying to use git: %s (%s)", err, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := archive.Tar(root, archive.Uncompressed)
|
||||||
|
if err != nil {
|
||||||
|
return job.Error(err)
|
||||||
|
}
|
||||||
|
context = c
|
||||||
|
} else if utils.IsURL(remoteURL) {
|
||||||
|
f, err := utils.Download(remoteURL)
|
||||||
|
if err != nil {
|
||||||
|
return job.Error(err)
|
||||||
|
}
|
||||||
|
defer f.Body.Close()
|
||||||
|
dockerFile, err := ioutil.ReadAll(f.Body)
|
||||||
|
if err != nil {
|
||||||
|
return job.Error(err)
|
||||||
|
}
|
||||||
|
c, err := archive.Generate("Dockerfile", string(dockerFile))
|
||||||
|
if err != nil {
|
||||||
|
return job.Error(err)
|
||||||
|
}
|
||||||
|
context = c
|
||||||
|
}
|
||||||
|
defer context.Close()
|
||||||
|
|
||||||
|
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
||||||
|
|
||||||
|
builder := &Builder{
|
||||||
|
Daemon: b.Daemon,
|
||||||
|
Engine: b.Engine,
|
||||||
|
OutStream: &utils.StdoutFormater{
|
||||||
|
Writer: job.Stdout,
|
||||||
|
StreamFormatter: sf,
|
||||||
|
},
|
||||||
|
ErrStream: &utils.StderrFormater{
|
||||||
|
Writer: job.Stdout,
|
||||||
|
StreamFormatter: sf,
|
||||||
|
},
|
||||||
|
Verbose: !suppressOutput,
|
||||||
|
UtilizeCache: !noCache,
|
||||||
|
Remove: rm,
|
||||||
|
ForceRemove: forceRm,
|
||||||
|
OutOld: job.Stdout,
|
||||||
|
StreamFormatter: sf,
|
||||||
|
AuthConfig: authConfig,
|
||||||
|
AuthConfigFile: configFile,
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := builder.Run(context)
|
||||||
|
if err != nil {
|
||||||
|
return job.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if repoName != "" {
|
||||||
|
b.Daemon.Repositories().Set(repoName, tag, id, false)
|
||||||
|
}
|
||||||
|
return engine.StatusOK
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/builder/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var f *os.File
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if len(os.Args) < 2 {
|
||||||
|
fmt.Println("please supply filename(s)")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fn := range os.Args[1:] {
|
||||||
|
f, err = os.Open(fn)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ast, err := parser.Parse(f)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
} else {
|
||||||
|
fmt.Print(ast.Dump())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,131 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
// line parsers are dispatch calls that parse a single unit of text into a
|
||||||
|
// Node object which contains the whole statement. Dockerfiles have varied
|
||||||
|
// (but not usually unique, see ONBUILD for a unique example) parsing rules
|
||||||
|
// per-command, and these unify the processing in a way that makes it
|
||||||
|
// manageable.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ignore the current argument. This will still leave a command parsed, but
|
||||||
|
// will not incorporate the arguments into the ast.
|
||||||
|
func parseIgnore(rest string) (*Node, map[string]bool, error) {
|
||||||
|
return &Node{}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// used for onbuild. Could potentially be used for anything that represents a
|
||||||
|
// statement with sub-statements.
|
||||||
|
//
|
||||||
|
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
|
||||||
|
//
|
||||||
|
func parseSubCommand(rest string) (*Node, map[string]bool, error) {
|
||||||
|
_, child, err := parseLine(rest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Node{Children: []*Node{child}}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse environment like statements. Note that this does *not* handle
|
||||||
|
// variable interpolation, which will be handled in the evaluator.
|
||||||
|
func parseEnv(rest string) (*Node, map[string]bool, error) {
|
||||||
|
node := &Node{}
|
||||||
|
rootnode := node
|
||||||
|
strs := TOKEN_WHITESPACE.Split(rest, 2)
|
||||||
|
node.Value = strs[0]
|
||||||
|
node.Next = &Node{}
|
||||||
|
node.Next.Value = strs[1]
|
||||||
|
|
||||||
|
return rootnode, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parses a whitespace-delimited set of arguments. The result is effectively a
|
||||||
|
// linked list of string arguments.
|
||||||
|
func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
|
||||||
|
node := &Node{}
|
||||||
|
rootnode := node
|
||||||
|
prevnode := node
|
||||||
|
for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp
|
||||||
|
prevnode = node
|
||||||
|
node.Value = str
|
||||||
|
node.Next = &Node{}
|
||||||
|
node = node.Next
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX to get around regexp.Split *always* providing an empty string at the
|
||||||
|
// end due to how our loop is constructed, nil out the last node in the
|
||||||
|
// chain.
|
||||||
|
prevnode.Next = nil
|
||||||
|
|
||||||
|
return rootnode, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsestring just wraps the string in quotes and returns a working node.
|
||||||
|
func parseString(rest string) (*Node, map[string]bool, error) {
|
||||||
|
n := &Node{}
|
||||||
|
n.Value = rest
|
||||||
|
return n, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseJSON converts JSON arrays to an AST.
|
||||||
|
func parseJSON(rest string) (*Node, map[string]bool, error) {
|
||||||
|
var (
|
||||||
|
myJson []interface{}
|
||||||
|
next = &Node{}
|
||||||
|
orignext = next
|
||||||
|
prevnode = next
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, str := range myJson {
|
||||||
|
switch str.(type) {
|
||||||
|
case string:
|
||||||
|
case float64:
|
||||||
|
str = strconv.FormatFloat(str.(float64), 'G', -1, 64)
|
||||||
|
default:
|
||||||
|
return nil, nil, errDockerfileJSONNesting
|
||||||
|
}
|
||||||
|
next.Value = str.(string)
|
||||||
|
next.Next = &Node{}
|
||||||
|
prevnode = next
|
||||||
|
next = next.Next
|
||||||
|
}
|
||||||
|
|
||||||
|
prevnode.Next = nil
|
||||||
|
|
||||||
|
return orignext, map[string]bool{"json": true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMaybeJSON determines if the argument appears to be a JSON array. If
|
||||||
|
// so, passes to parseJSON; if not, quotes the result and returns a single
|
||||||
|
// node.
|
||||||
|
func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
|
||||||
|
rest = strings.TrimSpace(rest)
|
||||||
|
|
||||||
|
node, attrs, err := parseJSON(rest)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return node, attrs, nil
|
||||||
|
}
|
||||||
|
if err == errDockerfileJSONNesting {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node = &Node{}
|
||||||
|
node.Value = rest
|
||||||
|
return node, nil, nil
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
// This package implements a parser and parse tree dumper for Dockerfiles.
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Node is a structure used to represent a parse tree.
|
||||||
|
//
|
||||||
|
// In the node there are three fields, Value, Next, and Children. Value is the
|
||||||
|
// current token's string value. Next is always the next non-child token, and
|
||||||
|
// children contains all the children. Here's an example:
|
||||||
|
//
|
||||||
|
// (value next (child child-next child-next-next) next-next)
|
||||||
|
//
|
||||||
|
// This data structure is frankly pretty lousy for handling complex languages,
|
||||||
|
// but lucky for us the Dockerfile isn't very complicated. This structure
|
||||||
|
// works a little more effectively than a "proper" parse tree for our needs.
|
||||||
|
//
|
||||||
|
type Node struct {
|
||||||
|
Value string // actual content
|
||||||
|
Next *Node // the next item in the current sexp
|
||||||
|
Children []*Node // the children of this sexp
|
||||||
|
Attributes map[string]bool // special attributes for this node
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dispatch map[string]func(string) (*Node, map[string]bool, error)
|
||||||
|
TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||||
|
TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`)
|
||||||
|
TOKEN_COMMENT = regexp.MustCompile(`^#.*$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Dispatch Table. see line_parsers.go for the parse functions.
|
||||||
|
// The command is parsed and mapped to the line parser. The line parser
|
||||||
|
// recieves the arguments but not the command, and returns an AST after
|
||||||
|
// reformulating the arguments according to the rules in the parser
|
||||||
|
// functions. Errors are propogated up by Parse() and the resulting AST can
|
||||||
|
// be incorporated directly into the existing AST as a next.
|
||||||
|
dispatch = map[string]func(string) (*Node, map[string]bool, error){
|
||||||
|
"user": parseString,
|
||||||
|
"onbuild": parseSubCommand,
|
||||||
|
"workdir": parseString,
|
||||||
|
"env": parseEnv,
|
||||||
|
"maintainer": parseString,
|
||||||
|
"docker-version": parseString,
|
||||||
|
"from": parseString,
|
||||||
|
"add": parseStringsWhitespaceDelimited,
|
||||||
|
"copy": parseStringsWhitespaceDelimited,
|
||||||
|
"run": parseMaybeJSON,
|
||||||
|
"cmd": parseMaybeJSON,
|
||||||
|
"entrypoint": parseMaybeJSON,
|
||||||
|
"expose": parseStringsWhitespaceDelimited,
|
||||||
|
"volume": parseMaybeJSON,
|
||||||
|
"insert": parseIgnore,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse a line and return the remainder.
|
||||||
|
func parseLine(line string) (string, *Node, error) {
|
||||||
|
if line = stripComments(line); line == "" {
|
||||||
|
return "", nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if TOKEN_LINE_CONTINUATION.MatchString(line) {
|
||||||
|
line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "")
|
||||||
|
return line, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd, args := splitCommand(line)
|
||||||
|
|
||||||
|
node := &Node{}
|
||||||
|
node.Value = cmd
|
||||||
|
|
||||||
|
sexp, attrs, err := fullDispatch(cmd, args)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Next = sexp
|
||||||
|
node.Attributes = attrs
|
||||||
|
|
||||||
|
return "", node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The main parse routine. Handles an io.ReadWriteCloser and returns the root
|
||||||
|
// of the AST.
|
||||||
|
func Parse(rwc io.Reader) (*Node, error) {
|
||||||
|
root := &Node{}
|
||||||
|
scanner := bufio.NewScanner(rwc)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line, child, err := parseLine(strings.TrimSpace(scanner.Text()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if line != "" && child == nil {
|
||||||
|
for scanner.Scan() {
|
||||||
|
newline := strings.TrimSpace(scanner.Text())
|
||||||
|
|
||||||
|
if newline == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
line, child, err = parseLine(line + newline)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if child != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if child != nil {
|
||||||
|
root.Children = append(root.Children, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testDir = "testfiles"
|
||||||
|
const negativeTestDir = "testfiles-negative"
|
||||||
|
|
||||||
|
func getDirs(t *testing.T, dir string) []os.FileInfo {
|
||||||
|
f, err := os.Open(dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
dirs, err := f.Readdir(0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dirs
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTestNegative(t *testing.T) {
|
||||||
|
for _, dir := range getDirs(t, negativeTestDir) {
|
||||||
|
dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile")
|
||||||
|
|
||||||
|
df, err := os.Open(dockerfile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = Parse(df)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("No error parsing broken dockerfile for %s", dir.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
df.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTestData(t *testing.T) {
|
||||||
|
for _, dir := range getDirs(t, testDir) {
|
||||||
|
dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile")
|
||||||
|
resultfile := filepath.Join(testDir, dir.Name(), "result")
|
||||||
|
|
||||||
|
df, err := os.Open(dockerfile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
rf, err := os.Open(resultfile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
ast, err := Parse(df)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := ioutil.ReadAll(rf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if ast.Dump() != string(content) {
|
||||||
|
t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
df.Close()
|
||||||
|
rf.Close()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
CMD [ "echo", [ "nested json" ] ]
|
|
@ -0,0 +1,25 @@
|
||||||
|
FROM brimstone/ubuntu:14.04
|
||||||
|
|
||||||
|
MAINTAINER brimstone@the.narro.ws
|
||||||
|
|
||||||
|
# TORUN -v /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
ENV GOPATH /go
|
||||||
|
|
||||||
|
# Set our command
|
||||||
|
ENTRYPOINT ["/usr/local/bin/consuldock"]
|
||||||
|
|
||||||
|
# Install the packages we need, clean up after them and us
|
||||||
|
RUN apt-get update \
|
||||||
|
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
||||||
|
&& apt-get install -y --no-install-recommends git golang ca-certificates \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists \
|
||||||
|
|
||||||
|
&& go get -v github.com/brimstone/consuldock \
|
||||||
|
&& mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
|
||||||
|
|
||||||
|
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
||||||
|
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
||||||
|
&& rm /tmp/dpkg.* \
|
||||||
|
&& rm -rf $GOPATH
|
|
@ -0,0 +1,5 @@
|
||||||
|
(from "brimstone/ubuntu:14.04")
|
||||||
|
(maintainer "brimstone@the.narro.ws")
|
||||||
|
(env "GOPATH" "/go")
|
||||||
|
(entrypoint "/usr/local/bin/consuldock")
|
||||||
|
(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH")
|
|
@ -0,0 +1,52 @@
|
||||||
|
FROM brimstone/ubuntu:14.04
|
||||||
|
|
||||||
|
CMD []
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"]
|
||||||
|
|
||||||
|
EXPOSE 8500 8600 8400 8301 8302
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y unzip wget \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists
|
||||||
|
|
||||||
|
RUN cd /tmp \
|
||||||
|
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
|
||||||
|
-O web_ui.zip \
|
||||||
|
&& unzip web_ui.zip \
|
||||||
|
&& mv dist /webui \
|
||||||
|
&& rm web_ui.zip
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
||||||
|
&& apt-get install -y --no-install-recommends unzip wget \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists \
|
||||||
|
|
||||||
|
&& cd /tmp \
|
||||||
|
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
|
||||||
|
-O web_ui.zip \
|
||||||
|
&& unzip web_ui.zip \
|
||||||
|
&& mv dist /webui \
|
||||||
|
&& rm web_ui.zip \
|
||||||
|
|
||||||
|
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
||||||
|
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
||||||
|
&& rm /tmp/dpkg.*
|
||||||
|
|
||||||
|
ENV GOPATH /go
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
||||||
|
&& apt-get install -y --no-install-recommends git golang ca-certificates build-essential \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists \
|
||||||
|
|
||||||
|
&& go get -v github.com/hashicorp/consul \
|
||||||
|
&& mv $GOPATH/bin/consul /usr/bin/consul \
|
||||||
|
|
||||||
|
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
||||||
|
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
||||||
|
&& rm /tmp/dpkg.* \
|
||||||
|
&& rm -rf $GOPATH
|
|
@ -0,0 +1,9 @@
|
||||||
|
(from "brimstone/ubuntu:14.04")
|
||||||
|
(cmd "")
|
||||||
|
(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
|
||||||
|
(expose "8500" "8600" "8400" "8301" "8302")
|
||||||
|
(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists")
|
||||||
|
(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip")
|
||||||
|
(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.*")
|
||||||
|
(env "GOPATH" "/go")
|
||||||
|
(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/hashicorp/consul && mv $GOPATH/bin/consul /usr/bin/consul && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH")
|
|
@ -0,0 +1,54 @@
|
||||||
|
FROM cpuguy83/ubuntu
|
||||||
|
ENV NAGIOS_HOME /opt/nagios
|
||||||
|
ENV NAGIOS_USER nagios
|
||||||
|
ENV NAGIOS_GROUP nagios
|
||||||
|
ENV NAGIOS_CMDUSER nagios
|
||||||
|
ENV NAGIOS_CMDGROUP nagios
|
||||||
|
ENV NAGIOSADMIN_USER nagiosadmin
|
||||||
|
ENV NAGIOSADMIN_PASS nagios
|
||||||
|
ENV APACHE_RUN_USER nagios
|
||||||
|
ENV APACHE_RUN_GROUP nagios
|
||||||
|
ENV NAGIOS_TIMEZONE UTC
|
||||||
|
|
||||||
|
RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list
|
||||||
|
RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx
|
||||||
|
RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
|
||||||
|
RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )
|
||||||
|
|
||||||
|
ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz
|
||||||
|
RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
|
||||||
|
ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/
|
||||||
|
RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
|
||||||
|
|
||||||
|
RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars
|
||||||
|
RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default
|
||||||
|
|
||||||
|
RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
|
||||||
|
|
||||||
|
RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
|
||||||
|
|
||||||
|
RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
|
||||||
|
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
||||||
|
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
||||||
|
RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf
|
||||||
|
|
||||||
|
RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \
|
||||||
|
sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg
|
||||||
|
RUN cp /etc/services /var/spool/postfix/etc/
|
||||||
|
|
||||||
|
RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix
|
||||||
|
ADD nagios.init /etc/sv/nagios/run
|
||||||
|
ADD apache.init /etc/sv/apache/run
|
||||||
|
ADD postfix.init /etc/sv/postfix/run
|
||||||
|
ADD postfix.stop /etc/sv/postfix/finish
|
||||||
|
|
||||||
|
ADD start.sh /usr/local/bin/start_nagios
|
||||||
|
|
||||||
|
ENV APACHE_LOCK_DIR /var/run
|
||||||
|
ENV APACHE_LOG_DIR /var/log/apache2
|
||||||
|
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"]
|
||||||
|
|
||||||
|
CMD ["/usr/local/bin/start_nagios"]
|
|
@ -0,0 +1,40 @@
|
||||||
|
(from "cpuguy83/ubuntu")
|
||||||
|
(env "NAGIOS_HOME" "/opt/nagios")
|
||||||
|
(env "NAGIOS_USER" "nagios")
|
||||||
|
(env "NAGIOS_GROUP" "nagios")
|
||||||
|
(env "NAGIOS_CMDUSER" "nagios")
|
||||||
|
(env "NAGIOS_CMDGROUP" "nagios")
|
||||||
|
(env "NAGIOSADMIN_USER" "nagiosadmin")
|
||||||
|
(env "NAGIOSADMIN_PASS" "nagios")
|
||||||
|
(env "APACHE_RUN_USER" "nagios")
|
||||||
|
(env "APACHE_RUN_GROUP" "nagios")
|
||||||
|
(env "NAGIOS_TIMEZONE" "UTC")
|
||||||
|
(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list")
|
||||||
|
(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx")
|
||||||
|
(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
|
||||||
|
(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )")
|
||||||
|
(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz")
|
||||||
|
(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
|
||||||
|
(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/")
|
||||||
|
(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
|
||||||
|
(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars")
|
||||||
|
(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default")
|
||||||
|
(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
|
||||||
|
(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
|
||||||
|
(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
|
||||||
|
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
||||||
|
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
||||||
|
(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf")
|
||||||
|
(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg")
|
||||||
|
(run "cp /etc/services /var/spool/postfix/etc/")
|
||||||
|
(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix")
|
||||||
|
(add "nagios.init" "/etc/sv/nagios/run")
|
||||||
|
(add "apache.init" "/etc/sv/apache/run")
|
||||||
|
(add "postfix.init" "/etc/sv/postfix/run")
|
||||||
|
(add "postfix.stop" "/etc/sv/postfix/finish")
|
||||||
|
(add "start.sh" "/usr/local/bin/start_nagios")
|
||||||
|
(env "APACHE_LOCK_DIR" "/var/run")
|
||||||
|
(env "APACHE_LOG_DIR" "/var/log/apache2")
|
||||||
|
(expose "80")
|
||||||
|
(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
|
||||||
|
(cmd "/usr/local/bin/start_nagios")
|
|
@ -0,0 +1,105 @@
|
||||||
|
# This file describes the standard way to build Docker, using docker
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
#
|
||||||
|
# # Assemble the full dev environment. This is slow the first time.
|
||||||
|
# docker build -t docker .
|
||||||
|
#
|
||||||
|
# # Mount your source in an interactive container for quick testing:
|
||||||
|
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
|
||||||
|
#
|
||||||
|
# # Run the test suite:
|
||||||
|
# docker run --privileged docker hack/make.sh test
|
||||||
|
#
|
||||||
|
# # Publish a release:
|
||||||
|
# docker run --privileged \
|
||||||
|
# -e AWS_S3_BUCKET=baz \
|
||||||
|
# -e AWS_ACCESS_KEY=foo \
|
||||||
|
# -e AWS_SECRET_KEY=bar \
|
||||||
|
# -e GPG_PASSPHRASE=gloubiboulga \
|
||||||
|
# docker hack/release.sh
|
||||||
|
#
|
||||||
|
# Note: Apparmor used to mess with privileged mode, but this is no longer
|
||||||
|
# the case. Therefore, you don't have to disable it anymore.
|
||||||
|
#
|
||||||
|
|
||||||
|
docker-version 0.6.1
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||||
|
|
||||||
|
# Packaged dependencies
|
||||||
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
||||||
|
apt-utils \
|
||||||
|
aufs-tools \
|
||||||
|
automake \
|
||||||
|
btrfs-tools \
|
||||||
|
build-essential \
|
||||||
|
curl \
|
||||||
|
dpkg-sig \
|
||||||
|
git \
|
||||||
|
iptables \
|
||||||
|
libapparmor-dev \
|
||||||
|
libcap-dev \
|
||||||
|
libsqlite3-dev \
|
||||||
|
lxc=1.0* \
|
||||||
|
mercurial \
|
||||||
|
pandoc \
|
||||||
|
parallel \
|
||||||
|
reprepro \
|
||||||
|
ruby1.9.1 \
|
||||||
|
ruby1.9.1-dev \
|
||||||
|
s3cmd=1.1.0* \
|
||||||
|
--no-install-recommends
|
||||||
|
|
||||||
|
# Get lvm2 source for compiling statically
|
||||||
|
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||||
|
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||||
|
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
|
||||||
|
|
||||||
|
# Compile and install lvm2
|
||||||
|
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||||
|
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||||
|
|
||||||
|
# Install Go
|
||||||
|
RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz
|
||||||
|
ENV PATH /usr/local/go/bin:$PATH
|
||||||
|
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
|
||||||
|
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||||
|
|
||||||
|
# Compile Go for cross compilation
|
||||||
|
ENV DOCKER_CROSSPLATFORMS \
|
||||||
|
linux/386 linux/arm \
|
||||||
|
darwin/amd64 darwin/386 \
|
||||||
|
freebsd/amd64 freebsd/386 freebsd/arm
|
||||||
|
# (set an explicit GOARM of 5 for maximum compatibility)
|
||||||
|
ENV GOARM 5
|
||||||
|
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
||||||
|
|
||||||
|
# Grab Go's cover tool for dead-simple code coverage testing
|
||||||
|
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||||
|
|
||||||
|
# TODO replace FPM with some very minimal debhelper stuff
|
||||||
|
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
|
||||||
|
|
||||||
|
# Get the "busybox" image source so we can build locally instead of pulling
|
||||||
|
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
|
||||||
|
|
||||||
|
# Setup s3cmd config
|
||||||
|
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||||
|
|
||||||
|
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||||
|
RUN git config --global user.email 'docker-dummy@example.com'
|
||||||
|
|
||||||
|
# Add an unprivileged user to be used for tests which need it
|
||||||
|
RUN groupadd -r docker
|
||||||
|
RUN useradd --create-home --gid docker unprivilegeduser
|
||||||
|
|
||||||
|
VOLUME /var/lib/docker
|
||||||
|
WORKDIR /go/src/github.com/docker/docker
|
||||||
|
ENV DOCKER_BUILDTAGS apparmor selinux
|
||||||
|
|
||||||
|
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||||
|
ENTRYPOINT ["hack/dind"]
|
||||||
|
|
||||||
|
# Upload docker source
|
||||||
|
COPY . /go/src/github.com/docker/docker
|
|
@ -0,0 +1,25 @@
|
||||||
|
(docker-version "0.6.1")
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
|
||||||
|
(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends")
|
||||||
|
(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
|
||||||
|
(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
|
||||||
|
(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
|
||||||
|
(env "PATH" "/usr/local/go/bin:$PATH")
|
||||||
|
(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
|
||||||
|
(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
|
||||||
|
(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm")
|
||||||
|
(env "GOARM" "5")
|
||||||
|
(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
|
||||||
|
(run "go get code.google.com/p/go.tools/cmd/cover")
|
||||||
|
(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
|
||||||
|
(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
|
||||||
|
(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
|
||||||
|
(run "git config --global user.email 'docker-dummy@example.com'")
|
||||||
|
(run "groupadd -r docker")
|
||||||
|
(run "useradd --create-home --gid docker unprivilegeduser")
|
||||||
|
(volume "/var/lib/docker")
|
||||||
|
(workdir "/go/src/github.com/docker/docker")
|
||||||
|
(env "DOCKER_BUILDTAGS" "apparmor selinux")
|
||||||
|
(entrypoint "hack/dind")
|
||||||
|
(copy "." "/go/src/github.com/docker/docker")
|
|
@ -0,0 +1,8 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
MAINTAINER Erik \\Hollensbe <erik@hollensbe.org>\"
|
||||||
|
|
||||||
|
RUN apt-get \update && \
|
||||||
|
apt-get \"install znc -y
|
||||||
|
ADD \conf\\" /.znc
|
||||||
|
|
||||||
|
CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ]
|
|
@ -0,0 +1,5 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(maintainer "Erik \\\\Hollensbe <erik@hollensbe.org>\\\"")
|
||||||
|
(run "apt-get \\update && apt-get \\\"install znc -y")
|
||||||
|
(add "\\conf\\\\\"" "/.znc")
|
||||||
|
(cmd "/usr\\\"/bin/znc" "-f" "-r")
|
|
@ -0,0 +1,15 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install wget -y
|
||||||
|
RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
|
||||||
|
RUN dpkg -i influxdb_latest_amd64.deb
|
||||||
|
RUN rm -r /opt/influxdb/shared
|
||||||
|
|
||||||
|
VOLUME /opt/influxdb/shared
|
||||||
|
|
||||||
|
CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml
|
||||||
|
|
||||||
|
EXPOSE 8083
|
||||||
|
EXPOSE 8086
|
||||||
|
EXPOSE 8090
|
||||||
|
EXPOSE 8099
|
|
@ -0,0 +1,11 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(run "apt-get update && apt-get install wget -y")
|
||||||
|
(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb")
|
||||||
|
(run "dpkg -i influxdb_latest_amd64.deb")
|
||||||
|
(run "rm -r /opt/influxdb/shared")
|
||||||
|
(volume "/opt/influxdb/shared")
|
||||||
|
(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml")
|
||||||
|
(expose "8083")
|
||||||
|
(expose "8086")
|
||||||
|
(expose "8090")
|
||||||
|
(expose "8099")
|
|
@ -0,0 +1 @@
|
||||||
|
CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]"
|
|
@ -0,0 +1 @@
|
||||||
|
(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"")
|
|
@ -0,0 +1 @@
|
||||||
|
CMD '["echo", "Well, JSON in a string is JSON too?"]'
|
|
@ -0,0 +1 @@
|
||||||
|
(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'")
|
|
@ -0,0 +1 @@
|
||||||
|
CMD ['echo','single quotes are invalid JSON']
|
|
@ -0,0 +1 @@
|
||||||
|
(cmd "['echo','single quotes are invalid JSON']")
|
|
@ -0,0 +1 @@
|
||||||
|
CMD ["echo", "Please, close the brackets when you're done"
|
|
@ -0,0 +1 @@
|
||||||
|
(cmd "[\"echo\", \"Please, close the brackets when you're done\"")
|
|
@ -0,0 +1 @@
|
||||||
|
CMD ["echo", "look ma, no quote!]
|
|
@ -0,0 +1 @@
|
||||||
|
(cmd "[\"echo\", \"look ma, no quote!]")
|
|
@ -0,0 +1,7 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
MAINTAINER James Turnbull "james@example.com"
|
||||||
|
ENV REFRESHED_AT 2014-06-01
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get -y install redis-server redis-tools
|
||||||
|
EXPOSE 6379
|
||||||
|
ENTRYPOINT [ "/usr/bin/redis-server" ]
|
|
@ -0,0 +1,7 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(maintainer "James Turnbull \"james@example.com\"")
|
||||||
|
(env "REFRESHED_AT" "2014-06-01")
|
||||||
|
(run "apt-get update")
|
||||||
|
(run "apt-get -y install redis-server redis-tools")
|
||||||
|
(expose "6379")
|
||||||
|
(entrypoint "/usr/bin/redis-server")
|
|
@ -0,0 +1,48 @@
|
||||||
|
FROM busybox:buildroot-2014.02
|
||||||
|
|
||||||
|
MAINTAINER docker <docker@docker.io>
|
||||||
|
|
||||||
|
ONBUILD RUN ["echo", "test"]
|
||||||
|
ONBUILD RUN echo test
|
||||||
|
ONBUILD COPY . /
|
||||||
|
|
||||||
|
|
||||||
|
# RUN Commands \
|
||||||
|
# linebreak in comment \
|
||||||
|
RUN ["ls", "-la"]
|
||||||
|
RUN ["echo", "'1234'"]
|
||||||
|
RUN echo "1234"
|
||||||
|
RUN echo 1234
|
||||||
|
RUN echo '1234' && \
|
||||||
|
echo "456" && \
|
||||||
|
echo 789
|
||||||
|
RUN sh -c 'echo root:testpass \
|
||||||
|
> /tmp/passwd'
|
||||||
|
RUN mkdir -p /test /test2 /test3/test
|
||||||
|
|
||||||
|
# ENV \
|
||||||
|
ENV SCUBA 1 DUBA 3
|
||||||
|
ENV SCUBA "1 DUBA 3"
|
||||||
|
|
||||||
|
# CMD \
|
||||||
|
CMD ["echo", "test"]
|
||||||
|
CMD echo test
|
||||||
|
CMD echo "test"
|
||||||
|
CMD echo 'test'
|
||||||
|
CMD echo 'test' | wc -
|
||||||
|
|
||||||
|
#EXPOSE\
|
||||||
|
EXPOSE 3000
|
||||||
|
EXPOSE 9000 5000 6000
|
||||||
|
|
||||||
|
USER docker
|
||||||
|
USER docker:root
|
||||||
|
|
||||||
|
VOLUME ["/test"]
|
||||||
|
VOLUME ["/test", "/test2"]
|
||||||
|
VOLUME /test3
|
||||||
|
|
||||||
|
WORKDIR /test
|
||||||
|
|
||||||
|
ADD . /
|
||||||
|
COPY . copy
|
|
@ -0,0 +1,29 @@
|
||||||
|
(from "busybox:buildroot-2014.02")
|
||||||
|
(maintainer "docker <docker@docker.io>")
|
||||||
|
(onbuild (run "echo" "test"))
|
||||||
|
(onbuild (run "echo test"))
|
||||||
|
(onbuild (copy "." "/"))
|
||||||
|
(run "ls" "-la")
|
||||||
|
(run "echo" "'1234'")
|
||||||
|
(run "echo \"1234\"")
|
||||||
|
(run "echo 1234")
|
||||||
|
(run "echo '1234' && echo \"456\" && echo 789")
|
||||||
|
(run "sh -c 'echo root:testpass > /tmp/passwd'")
|
||||||
|
(run "mkdir -p /test /test2 /test3/test")
|
||||||
|
(env "SCUBA" "1 DUBA 3")
|
||||||
|
(env "SCUBA" "\"1 DUBA 3\"")
|
||||||
|
(cmd "echo" "test")
|
||||||
|
(cmd "echo test")
|
||||||
|
(cmd "echo \"test\"")
|
||||||
|
(cmd "echo 'test'")
|
||||||
|
(cmd "echo 'test' | wc -")
|
||||||
|
(expose "3000")
|
||||||
|
(expose "9000" "5000" "6000")
|
||||||
|
(user "docker")
|
||||||
|
(user "docker:root")
|
||||||
|
(volume "/test")
|
||||||
|
(volume "/test" "/test2")
|
||||||
|
(volume "/test3")
|
||||||
|
(workdir "/test")
|
||||||
|
(add "." "/")
|
||||||
|
(copy "." "copy")
|
|
@ -0,0 +1,16 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
|
||||||
|
RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y
|
||||||
|
ADD .muttrc /
|
||||||
|
ADD .offlineimaprc /
|
||||||
|
ADD .tmux.conf /
|
||||||
|
ADD mutt /.mutt
|
||||||
|
ADD vim /.vim
|
||||||
|
ADD vimrc /.vimrc
|
||||||
|
ADD crontab /etc/crontab
|
||||||
|
RUN chmod 644 /etc/crontab
|
||||||
|
RUN mkdir /Mail
|
||||||
|
RUN mkdir /.offlineimap
|
||||||
|
RUN echo "export TERM=screen-256color" >/.zshenv
|
||||||
|
|
||||||
|
CMD setsid cron; tmux -2
|
|
@ -0,0 +1,14 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y")
|
||||||
|
(add ".muttrc" "/")
|
||||||
|
(add ".offlineimaprc" "/")
|
||||||
|
(add ".tmux.conf" "/")
|
||||||
|
(add "mutt" "/.mutt")
|
||||||
|
(add "vim" "/.vim")
|
||||||
|
(add "vimrc" "/.vimrc")
|
||||||
|
(add "crontab" "/etc/crontab")
|
||||||
|
(run "chmod 644 /etc/crontab")
|
||||||
|
(run "mkdir /Mail")
|
||||||
|
(run "mkdir /.offlineimap")
|
||||||
|
(run "echo \"export TERM=screen-256color\" >/.zshenv")
|
||||||
|
(cmd "setsid cron; tmux -2")
|
|
@ -0,0 +1,7 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install libcap2-bin mumble-server -y
|
||||||
|
|
||||||
|
ADD ./mumble-server.ini /etc/mumble-server.ini
|
||||||
|
|
||||||
|
CMD /usr/sbin/murmurd
|
|
@ -0,0 +1,4 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(run "apt-get update && apt-get install libcap2-bin mumble-server -y")
|
||||||
|
(add "./mumble-server.ini" "/etc/mumble-server.ini")
|
||||||
|
(cmd "/usr/sbin/murmurd")
|
|
@ -0,0 +1,14 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
MAINTAINER Erik Hollensbe <erik@hollensbe.org>
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install nginx-full -y
|
||||||
|
RUN rm -rf /etc/nginx
|
||||||
|
ADD etc /etc/nginx
|
||||||
|
RUN chown -R root:root /etc/nginx
|
||||||
|
RUN /usr/sbin/nginx -qt
|
||||||
|
RUN mkdir /www
|
||||||
|
|
||||||
|
CMD ["/usr/sbin/nginx"]
|
||||||
|
|
||||||
|
VOLUME /www
|
||||||
|
EXPOSE 80
|
|
@ -0,0 +1,11 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(maintainer "Erik Hollensbe <erik@hollensbe.org>")
|
||||||
|
(run "apt-get update && apt-get install nginx-full -y")
|
||||||
|
(run "rm -rf /etc/nginx")
|
||||||
|
(add "etc" "/etc/nginx")
|
||||||
|
(run "chown -R root:root /etc/nginx")
|
||||||
|
(run "/usr/sbin/nginx -qt")
|
||||||
|
(run "mkdir /www")
|
||||||
|
(cmd "/usr/sbin/nginx")
|
||||||
|
(volume "/www")
|
||||||
|
(expose "80")
|
|
@ -0,0 +1,23 @@
|
||||||
|
FROM ubuntu:12.04
|
||||||
|
|
||||||
|
EXPOSE 27015
|
||||||
|
EXPOSE 27005
|
||||||
|
EXPOSE 26901
|
||||||
|
EXPOSE 27020
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y
|
||||||
|
RUN mkdir -p /steam
|
||||||
|
RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam
|
||||||
|
ADD ./script /steam/script
|
||||||
|
RUN /steam/steamcmd.sh +runscript /steam/script
|
||||||
|
RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf
|
||||||
|
RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf
|
||||||
|
ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg
|
||||||
|
ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg
|
||||||
|
ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg
|
||||||
|
RUN rm -r /steam/tf2/tf/addons/sourcemod/configs
|
||||||
|
ADD ./configs /steam/tf2/tf/addons/sourcemod/configs
|
||||||
|
RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en
|
||||||
|
RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en
|
||||||
|
|
||||||
|
CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill
|
|
@ -0,0 +1,20 @@
|
||||||
|
(from "ubuntu:12.04")
|
||||||
|
(expose "27015")
|
||||||
|
(expose "27005")
|
||||||
|
(expose "26901")
|
||||||
|
(expose "27020")
|
||||||
|
(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y")
|
||||||
|
(run "mkdir -p /steam")
|
||||||
|
(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam")
|
||||||
|
(add "./script" "/steam/script")
|
||||||
|
(run "/steam/steamcmd.sh +runscript /steam/script")
|
||||||
|
(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf")
|
||||||
|
(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf")
|
||||||
|
(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg")
|
||||||
|
(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg")
|
||||||
|
(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg")
|
||||||
|
(run "rm -r /steam/tf2/tf/addons/sourcemod/configs")
|
||||||
|
(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs")
|
||||||
|
(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en")
|
||||||
|
(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en")
|
||||||
|
(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill")
|
|
@ -0,0 +1,9 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
|
||||||
|
RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y
|
||||||
|
|
||||||
|
ADD .weechat /.weechat
|
||||||
|
ADD .tmux.conf /
|
||||||
|
RUN echo "export TERM=screen-256color" >/.zshenv
|
||||||
|
|
||||||
|
CMD zsh -c weechat
|
|
@ -0,0 +1,6 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y")
|
||||||
|
(add ".weechat" "/.weechat")
|
||||||
|
(add ".tmux.conf" "/")
|
||||||
|
(run "echo \"export TERM=screen-256color\" >/.zshenv")
|
||||||
|
(cmd "zsh -c weechat")
|
|
@ -0,0 +1,7 @@
|
||||||
|
FROM ubuntu:14.04
|
||||||
|
MAINTAINER Erik Hollensbe <erik@hollensbe.org>
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install znc -y
|
||||||
|
ADD conf /.znc
|
||||||
|
|
||||||
|
CMD [ "/usr/bin/znc", "-f", "-r" ]
|
|
@ -0,0 +1,5 @@
|
||||||
|
(from "ubuntu:14.04")
|
||||||
|
(maintainer "Erik Hollensbe <erik@hollensbe.org>")
|
||||||
|
(run "apt-get update && apt-get install znc -y")
|
||||||
|
(add "conf" "/.znc")
|
||||||
|
(cmd "/usr/bin/znc" "-f" "-r")
|
|
@ -0,0 +1,86 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QuoteString walks characters (after trimming), escapes any quotes and
|
||||||
|
// escapes, then wraps the whole thing in quotes. Very useful for generating
|
||||||
|
// argument output in nodes.
|
||||||
|
func QuoteString(str string) string {
|
||||||
|
result := ""
|
||||||
|
chars := strings.Split(strings.TrimSpace(str), "")
|
||||||
|
|
||||||
|
for _, char := range chars {
|
||||||
|
switch char {
|
||||||
|
case `"`:
|
||||||
|
result += `\"`
|
||||||
|
case `\`:
|
||||||
|
result += `\\`
|
||||||
|
default:
|
||||||
|
result += char
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return `"` + result + `"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumps the AST defined by `node` as a list of sexps. Returns a string
|
||||||
|
// suitable for printing.
|
||||||
|
func (node *Node) Dump() string {
|
||||||
|
str := ""
|
||||||
|
str += node.Value
|
||||||
|
|
||||||
|
for _, n := range node.Children {
|
||||||
|
str += "(" + n.Dump() + ")\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.Next != nil {
|
||||||
|
for n := node.Next; n != nil; n = n.Next {
|
||||||
|
if len(n.Children) > 0 {
|
||||||
|
str += " " + n.Dump()
|
||||||
|
} else {
|
||||||
|
str += " " + QuoteString(n.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSpace(str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// performs the dispatch based on the two primal strings, cmd and args. Please
|
||||||
|
// look at the dispatch table in parser.go to see how these dispatchers work.
|
||||||
|
func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
|
||||||
|
if _, ok := dispatch[cmd]; !ok {
|
||||||
|
return nil, nil, fmt.Errorf("'%s' is not a valid dockerfile command", cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
sexp, attrs, err := dispatch[cmd](args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return sexp, attrs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitCommand takes a single line of text and parses out the cmd and args,
|
||||||
|
// which are used for dispatching to more exact parsing functions.
|
||||||
|
func splitCommand(line string) (string, string) {
|
||||||
|
cmdline := TOKEN_WHITESPACE.Split(line, 2)
|
||||||
|
cmd := strings.ToLower(cmdline[0])
|
||||||
|
// the cmd should never have whitespace, but it's possible for the args to
|
||||||
|
// have trailing whitespace.
|
||||||
|
return cmd, strings.TrimSpace(cmdline[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// covers comments and empty lines. Lines should be trimmed before passing to
|
||||||
|
// this function.
|
||||||
|
func stripComments(line string) string {
|
||||||
|
// string is already trimmed at this point
|
||||||
|
if TOKEN_COMMENT.MatchString(line) {
|
||||||
|
return TOKEN_COMMENT.ReplaceAllString(line, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
return line
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
|
||||||
|
)
|
||||||
|
|
||||||
|
// handle environment replacement. Used in dispatcher.
|
||||||
|
func (b *Builder) replaceEnv(str string) string {
|
||||||
|
for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) {
|
||||||
|
match = match[strings.Index(match, "$"):]
|
||||||
|
matchKey := strings.Trim(match, "${}")
|
||||||
|
|
||||||
|
for _, keyval := range b.Config.Env {
|
||||||
|
tmp := strings.SplitN(keyval, "=", 2)
|
||||||
|
if tmp[0] == matchKey {
|
||||||
|
str = strings.Replace(str, match, tmp[1], -1)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleJsonArgs(args []string, attributes map[string]bool) []string {
|
||||||
|
if attributes != nil && attributes["json"] {
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
|
// literal string command, not an exec array
|
||||||
|
return append([]string{"/bin/sh", "-c", strings.Join(args, " ")})
|
||||||
|
}
|
1006
daemon/build.go
1006
daemon/build.go
File diff suppressed because it is too large
Load Diff
|
@ -101,7 +101,6 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
|
||||||
// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
|
// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
|
||||||
for name, method := range map[string]engine.Handler{
|
for name, method := range map[string]engine.Handler{
|
||||||
"attach": daemon.ContainerAttach,
|
"attach": daemon.ContainerAttach,
|
||||||
"build": daemon.CmdBuild,
|
|
||||||
"commit": daemon.ContainerCommit,
|
"commit": daemon.ContainerCommit,
|
||||||
"container_changes": daemon.ContainerChanges,
|
"container_changes": daemon.ContainerChanges,
|
||||||
"container_copy": daemon.ContainerCopy,
|
"container_copy": daemon.ContainerCopy,
|
||||||
|
|
|
@ -5,6 +5,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/docker/docker/builder"
|
||||||
"github.com/docker/docker/builtins"
|
"github.com/docker/docker/builtins"
|
||||||
"github.com/docker/docker/daemon"
|
"github.com/docker/docker/daemon"
|
||||||
_ "github.com/docker/docker/daemon/execdriver/lxc"
|
_ "github.com/docker/docker/daemon/execdriver/lxc"
|
||||||
|
@ -48,6 +49,10 @@ func mainDaemon() {
|
||||||
if err := d.Install(eng); err != nil {
|
if err := d.Install(eng); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
b := &builder.BuilderJob{eng, d}
|
||||||
|
b.Install()
|
||||||
|
|
||||||
// after the daemon is done setting up we can tell the api to start
|
// after the daemon is done setting up we can tell the api to start
|
||||||
// accepting connections
|
// accepting connections
|
||||||
if err := eng.Job("acceptconnections").Run(); err != nil {
|
if err := eng.Job("acceptconnections").Run(); err != nil {
|
||||||
|
|
|
@ -685,10 +685,11 @@ func TestBuildRelativeWorkdir(t *testing.T) {
|
||||||
|
|
||||||
func TestBuildEnv(t *testing.T) {
|
func TestBuildEnv(t *testing.T) {
|
||||||
name := "testbuildenv"
|
name := "testbuildenv"
|
||||||
expected := "[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
|
expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
|
||||||
defer deleteImages(name)
|
defer deleteImages(name)
|
||||||
_, err := buildImage(name,
|
_, err := buildImage(name,
|
||||||
`FROM busybox
|
`FROM busybox
|
||||||
|
ENV PATH /test:$PATH
|
||||||
ENV PORT 2375
|
ENV PORT 2375
|
||||||
RUN [ $(env | grep PORT) = 'PORT=2375' ]`,
|
RUN [ $(env | grep PORT) = 'PORT=2375' ]`,
|
||||||
true)
|
true)
|
||||||
|
@ -1708,6 +1709,9 @@ func TestBuildEnvUsage(t *testing.T) {
|
||||||
name := "testbuildenvusage"
|
name := "testbuildenvusage"
|
||||||
defer deleteImages(name)
|
defer deleteImages(name)
|
||||||
dockerfile := `FROM busybox
|
dockerfile := `FROM busybox
|
||||||
|
ENV PATH $HOME/bin:$PATH
|
||||||
|
ENV PATH /tmp:$PATH
|
||||||
|
RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ]
|
||||||
ENV FOO /foo/baz
|
ENV FOO /foo/baz
|
||||||
ENV BAR /bar
|
ENV BAR /bar
|
||||||
ENV BAZ $BAR
|
ENV BAZ $BAR
|
||||||
|
@ -1717,7 +1721,8 @@ RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
|
||||||
ENV FROM hello/docker/world
|
ENV FROM hello/docker/world
|
||||||
ENV TO /docker/world/hello
|
ENV TO /docker/world/hello
|
||||||
ADD $FROM $TO
|
ADD $FROM $TO
|
||||||
RUN [ "$(cat $TO)" = "hello" ]`
|
RUN [ "$(cat $TO)" = "hello" ]
|
||||||
|
`
|
||||||
ctx, err := fakeContext(dockerfile, map[string]string{
|
ctx, err := fakeContext(dockerfile, map[string]string{
|
||||||
"hello/docker/world": "hello",
|
"hello/docker/world": "hello",
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in New Issue