Merge pull request #2263 from tianon/hack-release

Update hack/release.sh process to automatically invoke hack/make.sh and bail on building/testing issues
This commit is contained in:
Tianon Gravi 2013-10-23 15:31:16 -07:00
commit f0dbdba5c0
5 changed files with 95 additions and 21 deletions

1
.gitignore vendored
View File

@ -17,3 +17,4 @@ docs/_templates
bundles/ bundles/
.hg/ .hg/
.git/ .git/
vendor/pkg/

View File

@ -27,6 +27,31 @@ do
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
mountpoint -q $CGROUP/$SUBSYS || mountpoint -q $CGROUP/$SUBSYS ||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
# The two following sections address a bug which manifests itself
# by a cryptic "lxc-start: no ns_cgroup option specified" when
# trying to start containers withina container.
# The bug seems to appear when the cgroup hierarchies are not
# mounted on the exact same directories in the host, and in the
# container.
# Named, control-less cgroups are mounted with "-o name=foo"
# (and appear as such under /proc/<pid>/cgroup) but are usually
# mounted on a directory named "foo" (without the "name=" prefix).
# Systemd and OpenRC (and possibly others) both create such a
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
# "name=foo". This shouldn't have any adverse effect.
echo $SUBSYS | grep -q ^name= && {
NAME=$(echo $SUBSYS | sed s/^name=//)
ln -s $SUBSYS $CGROUP/$NAME
}
# Likewise, on at least one system, it has been reported that
# systemd would mount the CPU and CPU accounting controllers
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
# but on a directory called "cpu,cpuacct" (note the inversion
# in the order of the groups). This tries to work around it.
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
done done
# Note: as I write those lines, the LXC userland tools cannot setup # Note: as I write those lines, the LXC userland tools cannot setup
@ -38,7 +63,7 @@ grep -qw devices /proc/1/cgroup ||
echo "WARNING: it looks like the 'devices' cgroup is not mounted." echo "WARNING: it looks like the 'devices' cgroup is not mounted."
# Now, close extraneous file descriptors. # Now, close extraneous file descriptors.
pushd /proc/self/fd pushd /proc/self/fd >/dev/null
for FD in * for FD in *
do do
case "$FD" in case "$FD" in
@ -51,9 +76,10 @@ do
;; ;;
esac esac
done done
popd popd >/dev/null
# Mount /tmp # Mount /tmp
mount -t tmpfs none /tmp mount -t tmpfs none /tmp
exec $* [ "$1" ] && exec "$@"
echo "You probably want to run hack/make.sh, or maybe a shell?"

View File

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
set -e
# This script builds various binary artifacts from a checkout of the docker # This script builds various binary artifacts from a checkout of the docker
# source code. # source code.
@ -19,7 +20,7 @@
# "docker run hack/make.sh" in the resulting container image. # "docker run hack/make.sh" in the resulting container image.
# #
set -e set -o pipefail
# We're a nice, sexy, little shell script, and people might try to run us; # We're a nice, sexy, little shell script, and people might try to run us;
# but really, they shouldn't. We want to be in a container! # but really, they shouldn't. We want to be in a container!
@ -32,8 +33,8 @@ grep -q "$RESOLVCONF" /proc/mounts || {
# List of bundles to create when no argument is passed # List of bundles to create when no argument is passed
DEFAULT_BUNDLES=( DEFAULT_BUNDLES=(
test
binary binary
test
ubuntu ubuntu
) )
@ -66,7 +67,7 @@ main() {
fi fi
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ $# -lt 1 ]; then if [ $# -lt 1 ]; then
bundles=($DEFAULT_BUNDLES) bundles=(${DEFAULT_BUNDLES[@]})
else else
bundles=($@) bundles=($@)
fi fi

View File

@ -14,6 +14,7 @@ bundle_test() {
for test_dir in $(find_test_dirs); do ( for test_dir in $(find_test_dirs); do (
set -x set -x
cd $test_dir cd $test_dir
go test -i
go test -v -ldflags "$LDFLAGS" $TESTFLAGS go test -v -ldflags "$LDFLAGS" $TESTFLAGS
) done ) done
} 2>&1 | tee $DEST/test.log } 2>&1 | tee $DEST/test.log

View File

@ -1,4 +1,5 @@
#!/bin/sh #!/bin/bash
set -e
# This script looks for bundles built by make.sh, and releases them on a # This script looks for bundles built by make.sh, and releases them on a
# public S3 bucket. # public S3 bucket.
@ -9,11 +10,11 @@
# official Dockerfile at the root of the Docker source code. The Dockerfile, # official Dockerfile at the root of the Docker source code. The Dockerfile,
# make.sh and release.sh should all be from the same source code revision. # make.sh and release.sh should all be from the same source code revision.
set -e set -o pipefail
# Print a usage message and exit. # Print a usage message and exit.
usage() { usage() {
cat <<EOF cat >&2 <<'EOF'
To run, I need: To run, I need:
- to be in a container generated by the Dockerfile at the top of the Docker - to be in a container generated by the Dockerfile at the top of the Docker
repository; repository;
@ -26,11 +27,12 @@ To run, I need:
- a generous amount of good will and nice manners. - a generous amount of good will and nice manners.
The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
docker run -e AWS_S3_BUCKET=get-staging.docker.io \\ docker run -e AWS_S3_BUCKET=get-staging.docker.io \
AWS_ACCESS_KEY=AKI1234... \\ -e AWS_ACCESS_KEY=AKI1234... \
AWS_SECRET_KEY=sEs4mE... \\ -e AWS_SECRET_KEY=sEs4mE... \
GPG_PASSPHRASE=m0resEs4mE... \\ -e GPG_PASSPHRASE=m0resEs4mE... \
f0058411 -i -t -privileged \
docker ./hack/release.sh
EOF EOF
exit 1 exit 1
} }
@ -39,8 +41,41 @@ EOF
[ "$AWS_ACCESS_KEY" ] || usage [ "$AWS_ACCESS_KEY" ] || usage
[ "$AWS_SECRET_KEY" ] || usage [ "$AWS_SECRET_KEY" ] || usage
[ "$GPG_PASSPHRASE" ] || usage [ "$GPG_PASSPHRASE" ] || usage
[ -d /go/src/github.com/dotcloud/docker/ ] || usage [ -d /go/src/github.com/dotcloud/docker ] || usage
cd /go/src/github.com/dotcloud/docker/ cd /go/src/github.com/dotcloud/docker
[ -x hack/make.sh ] || usage
RELEASE_BUNDLES=(
binary
ubuntu
)
if [ "$1" != '--release-regardless-of-test-failure' ]; then
RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" )
fi
if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
echo >&2
echo >&2 'The build or tests appear to have failed.'
echo >&2
echo >&2 'You, as the release maintainer, now have a couple options:'
echo >&2 '- delay release and fix issues'
echo >&2 '- delay release and fix issues'
echo >&2 '- did we mention how important this is? issues need fixing :)'
echo >&2
echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
echo >&2 ' really knows all the hairy problems at hand with the current release'
echo >&2 ' issues) may bypass this checking by running this script again with the'
echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
echo >&2 ' avoid using this if at all possible.'
echo >&2
echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
echo >&2 ' should be used. If there are release issues, we should always err on the'
echo >&2 ' side of caution.'
echo >&2
exit 1
fi
VERSION=$(cat VERSION) VERSION=$(cat VERSION)
BUCKET=$AWS_S3_BUCKET BUCKET=$AWS_S3_BUCKET
@ -81,6 +116,10 @@ s3_url() {
# 1. A full APT repository is published at $BUCKET/ubuntu/ # 1. A full APT repository is published at $BUCKET/ubuntu/
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
release_ubuntu() { release_ubuntu() {
[ -e bundles/$VERSION/ubuntu ] || {
echo >&2 './hack/make.sh must be run before release_ubuntu'
exit 1
}
# Make sure that we have our keys # Make sure that we have our keys
mkdir -p /.gnupg/ mkdir -p /.gnupg/
s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
@ -116,8 +155,7 @@ EOF
reprepro -b $APTDIR includedeb docker $DEBFILE reprepro -b $APTDIR includedeb docker $DEBFILE
# Sign # Sign
for F in $(find $APTDIR -name Release) for F in $(find $APTDIR -name Release); do
do
gpg -u releasedocker --passphrase $GPG_PASSPHRASE \ gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
--armor --sign --detach-sign \ --armor --sign --detach-sign \
--output $F.gpg $F --output $F.gpg $F
@ -137,13 +175,20 @@ echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
curl $(s3_url)/gpg | apt-key add - curl $(s3_url)/gpg | apt-key add -
# Install docker # Install docker
apt-get update ; apt-get install -y lxc-docker apt-get update ; apt-get install -y lxc-docker
#
# Alternatively, just use the curl-able install.sh script provided at $(s3_url)
#
EOF EOF
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info" echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info"
} }
# Upload a static binary to S3 # Upload a static binary to S3
release_binary() { release_binary() {
[ -e bundles/$VERSION ] [ -e bundles/$VERSION/binary/docker-$VERSION ] || {
echo >&2 './hack/make.sh must be run before release_binary'
exit 1
}
S3DIR=s3://$BUCKET/builds/Linux/x86_64 S3DIR=s3://$BUCKET/builds/Linux/x86_64
s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
cat <<EOF | write_to_s3 s3://$BUCKET/builds/info cat <<EOF | write_to_s3 s3://$BUCKET/builds/info