Create dev-upload tasks, for a faster upload during dev builds

This commit is contained in:
Justin SB 2018-12-20 10:55:19 -05:00
parent ede358c19b
commit a3a48e9078
No known key found for this signature in database
GPG Key ID: 8DEC5C8217494E37
6 changed files with 88 additions and 12 deletions

View File

@ -15,6 +15,7 @@
DOCKER_REGISTRY?=gcr.io/must-override DOCKER_REGISTRY?=gcr.io/must-override
S3_BUCKET?=s3://must-override/ S3_BUCKET?=s3://must-override/
UPLOAD_DEST?=$(S3_BUCKET)
GCS_LOCATION?=gs://must-override GCS_LOCATION?=gs://must-override
GCS_URL=$(GCS_LOCATION:gs://%=https://storage.googleapis.com/%) GCS_URL=$(GCS_LOCATION:gs://%=https://storage.googleapis.com/%)
LATEST_FILE?=latest-ci.txt LATEST_FILE?=latest-ci.txt
@ -48,6 +49,9 @@ GCFLAGS?=
# See http://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile # See http://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile
MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))")) MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))"))
UPLOAD=$(MAKEDIR)/hack/upload
# Unexport environment variables that can affect tests and are not used in builds # Unexport environment variables that can affect tests and are not used in builds
unexport AWS_ACCESS_KEY_ID AWS_REGION AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN CNI_VERSION_URL DNS_IGNORE_NS_CHECK DNSCONTROLLER_IMAGE DO_ACCESS_TOKEN GOOGLE_APPLICATION_CREDENTIALS unexport AWS_ACCESS_KEY_ID AWS_REGION AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN CNI_VERSION_URL DNS_IGNORE_NS_CHECK DNSCONTROLLER_IMAGE DO_ACCESS_TOKEN GOOGLE_APPLICATION_CREDENTIALS
unexport KOPS_BASE_URL KOPS_CLUSTER_NAME KOPS_RUN_OBSOLETE_VERSION KOPS_STATE_STORE KOPS_STATE_S3_ACL KUBE_API_VERSIONS NODEUP_URL OPENSTACK_CREDENTIAL_FILE PROTOKUBE_IMAGE SKIP_PACKAGE_UPDATE unexport KOPS_BASE_URL KOPS_CLUSTER_NAME KOPS_RUN_OBSOLETE_VERSION KOPS_STATE_STORE KOPS_STATE_S3_ACL KUBE_API_VERSIONS NODEUP_URL OPENSTACK_CREDENTIAL_FILE PROTOKUBE_IMAGE SKIP_PACKAGE_UPDATE
@ -80,7 +84,7 @@ ifndef VERSION
# we never cross versions. # we never cross versions.
# #
# We expect that if you are uploading nodeup/protokube, you will set # We expect that if you are uploading nodeup/protokube, you will set
# VERSION (along with S3_BUCKET), either directly or by setting CI=1 # VERSION (along with UPLOAD_DEST), either directly or by setting CI=1
ifndef CI ifndef CI
VERSION=${KOPS_RELEASE_VERSION} VERSION=${KOPS_RELEASE_VERSION}
else else
@ -747,7 +751,7 @@ push-node-authorizer:
bazel-protokube-export: bazel-protokube-export:
mkdir -p ${BAZELIMAGES} mkdir -p ${BAZELIMAGES}
bazel build --action_env=PROTOKUBE_TAG=${PROTOKUBE_TAG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //images:protokube.tar bazel build --action_env=PROTOKUBE_TAG=${PROTOKUBE_TAG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //images:protokube.tar
cp bazel-bin/images/protokube.tar ${BAZELIMAGES}/protokube.tar cp -fp bazel-bin/images/protokube.tar ${BAZELIMAGES}/protokube.tar
gzip --force --fast ${BAZELIMAGES}/protokube.tar gzip --force --fast ${BAZELIMAGES}/protokube.tar
(${SHASUMCMD} ${BAZELIMAGES}/protokube.tar.gz | cut -d' ' -f1) > ${BAZELIMAGES}/protokube.tar.gz.sha1 (${SHASUMCMD} ${BAZELIMAGES}/protokube.tar.gz | cut -d' ' -f1) > ${BAZELIMAGES}/protokube.tar.gz.sha1
@ -789,3 +793,37 @@ update-machine-types: #Update machine_types.go
go build -o hack/machine_types/machine_types ${KOPS_ROOT}/hack/machine_types/ go build -o hack/machine_types/machine_types ${KOPS_ROOT}/hack/machine_types/
hack/machine_types/machine_types --out upup/pkg/fi/cloudup/awsup/machine_types.go hack/machine_types/machine_types --out upup/pkg/fi/cloudup/awsup/machine_types.go
go fmt upup/pkg/fi/cloudup/awsup/machine_types.go go fmt upup/pkg/fi/cloudup/awsup/machine_types.go
#-----------------------------------------------------------
# development targets
# dev-upload-nodeup uploads nodeup to GCS
.PHONY: dev-upload-nodeup
dev-upload-nodeup: bazel-crossbuild-nodeup
mkdir -p ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/
cp -fp bazel-bin/cmd/nodeup/linux_amd64_pure_stripped/nodeup ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/nodeup
(${SHASUMCMD} ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/nodeup | cut -d' ' -f1) > ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/nodeup.sha1
${UPLOAD} ${BAZELUPLOAD}/ ${UPLOAD_DEST}
# dev-upload-protokube uploads protokube to GCS
.PHONY: dev-upload-protokube
dev-upload-protokube: bazel-protokube-export # Upload kops to GCS
mkdir -p ${BAZELUPLOAD}/kops/${VERSION}/images/
cp -fp ${BAZELIMAGES}/protokube.tar.gz ${BAZELUPLOAD}/kops/${VERSION}/images/protokube.tar.gz
cp -fp ${BAZELIMAGES}/protokube.tar.gz.sha1 ${BAZELUPLOAD}/kops/${VERSION}/images/protokube.tar.gz.sha1
${UPLOAD} ${BAZELUPLOAD}/ ${UPLOAD_DEST}
# dev-copy-utils copies utils from a recent release
# We don't currently have a bazel build for them, and the build is pretty slow, but they change rarely.
.PHONE: dev-copy-utils
dev-copy-utils:
mkdir -p ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/
cd ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/; wget -N https://kubeupv2.s3.amazonaws.com/kops/1.11.0-alpha.1/linux/amd64/utils.tar.gz
cd ${BAZELUPLOAD}/kops/${VERSION}/linux/amd64/; wget -N https://kubeupv2.s3.amazonaws.com/kops/1.11.0-alpha.1/linux/amd64/utils.tar.gz.sha1
${UPLOAD} ${BAZELUPLOAD}/ ${UPLOAD_DEST}
# dev-upload does a faster build and uploads to GCS / S3
# It copies utils instead of building it
.PHONY: dev-upload
dev-upload: dev-upload-nodeup dev-upload-protokube dev-copy-utils
echo "Done"

View File

@ -186,10 +186,9 @@ and then push nodeup using:
``` ```
export S3_BUCKET_NAME=<yourbucketname> export S3_BUCKET_NAME=<yourbucketname>
make kops-install upload S3_BUCKET=s3://${S3_BUCKET_NAME} VERSION=dev make kops-install dev-upload UPLOAD_DEST=s3://${S3_BUCKET_NAME}
export KOPS_BASE_URL=https://${S3_BUCKET_NAME}.s3.amazonaws.com/kops/dev/
export KOPS_BASE_URL=https://${S3_BUCKET_NAME}.s3.amazonaws.com/kops/${KOPS_VERSION}/
kops create cluster <clustername> --zones us-east-1b kops create cluster <clustername> --zones us-east-1b
... ...
``` ```

View File

@ -56,7 +56,7 @@ make dns-controller-push DOCKER_REGISTRY=kope
``` ```
# export AWS_PROFILE=??? # If needed # export AWS_PROFILE=??? # If needed
make upload S3_BUCKET=s3://kubeupv2 make upload UPLOAD_DEST=s3://kubeupv2
``` ```
## Tag new version ## Tag new version

View File

@ -71,16 +71,27 @@ This isn't yet terribly useful, though - it just shows how to replicate the
existing job, but not with your custom code. To test a custom `kops` build, you existing job, but not with your custom code. To test a custom `kops` build, you
can do the following: can do the following:
To use S3:
``` ```
# cd to your kops repo # cd to your kops repo
export S3_BUCKET_NAME=<yourbucketname> export S3_BUCKET_NAME=<yourbucketname>
make kops-install upload S3_BUCKET=s3://${S3_BUCKET_NAME} VERSION=dev make kops-install dev-upload UPLOAD_DEST=s3://${S3_BUCKET_NAME}
export KOPS_BASE_URL=https://${S3_BUCKET_NAME}.s3.amazonaws.com/kops/dev/ KOPS_VERSION=`bazel run //cmd/kops version | cut -f2 -d ' '`
export KOPS_BASE_URL=https://${S3_BUCKET_NAME}.s3.amazonaws.com/kops/${KOPS_VERSION}/
kops create cluster <clustername> --zones us-east-1b
``` ```
To use GCS:
```
export GCS_BUCKET_NAME=kops-dev-${USER}
make kops-install dev-upload UPLOAD_DEST=gs://${GCS_BUCKET_NAME}
KOPS_VERSION=`bazel run //cmd/kops version | cut -f2 -d ' '`
export KOPS_BASE_URL=https://${GCS_BUCKET_NAME}.storage.googleapis.com/kops/${KOPS_VERSION}/
```
You can create a cluster using `kops create cluster <clustername> --zones us-east-1b`
Then follow the test directions above. Then follow the test directions above.
To override the test list for the job, you need to familiar with the To override the test list for the job, you need to familiar with the

View File

@ -95,7 +95,7 @@ echo "Starting build"
# removing CI=1 because it forces a new upload every time # removing CI=1 because it forces a new upload every time
# export CI=1 # export CI=1
make && S3_BUCKET=s3://${NODEUP_BUCKET} make upload make && UPLOAD_DEST=s3://${NODEUP_BUCKET} make upload
# removing make test since it relies on the files in the bucket # removing make test since it relies on the files in the bucket
# && make test # && make test

28
hack/upload Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash -e
SRC=$1
DEST=$2
if [[ -z "${SRC}" ]]; then
echo "syntax: $0 <src> <dest>"
exit 1
fi
if [[ -z "${DEST}" ]]; then
echo "syntax: $0 <src> <dest>"
exit 1
fi
if [[ "${DEST:0:5}" == "s3://" ]]; then
aws s3 sync --acl public-read ${SRC} ${DEST}
exit 0
fi
if [[ "${DEST:0:5}" == "gs://" ]]; then
gsutil -h "Cache-Control:private,max-age=0" rsync -r -a public-read ${SRC} ${DEST}
exit 0
fi
echo "Unsupported destination - supports s3:// and gs:// urls: ${DEST}"
exit 1