Compare commits

..

2 Commits

Author SHA1 Message Date
Natalie Arellano aa4bbaca87
Merge pull request #614 from buildpacks/bump-imgutil
Bump imgutil
2021-05-10 17:04:58 +00:00
Natalie Arellano 79698166a1 Bump imgutil
Signed-off-by: Natalie Arellano <narellano@vmware.com>
2021-05-10 12:19:52 -04:00
681 changed files with 15138 additions and 35065 deletions

View File

@ -2,14 +2,12 @@
name: Bug name: Bug
about: Bug report about: Bug report
title: '' title: ''
labels: type/bug, status/triage labels: status/triage, type/bug
assignees: '' assignees: ''
--- ---
### Summary ### Summary
<!-- Please provide a general summary of the issue. --> <!--- Please provide a general summary of the issue. -->
--- ---
@ -17,20 +15,17 @@ assignees: ''
### Reproduction ### Reproduction
##### Steps ##### Steps
<!-- What steps should be taken to reproduce the issue? --> <!--- What steps should be taken to reproduce the issue? -->
1. 1.
2. 2.
3. 3.
##### Current behavior ##### Current behavior
<!-- What happened? Logs, etc. could go here. --> <!--- What happened? Logs, etc. could go here. -->
##### Expected behavior
<!-- What did you expect to happen? -->
##### Expected
<!--- What did you expect to happen? -->
--- ---
@ -38,15 +33,10 @@ assignees: ''
### Context ### Context
##### lifecycle version ##### lifecycle version
<!-- If you can find this, it helps us pin down the issue. For example, run `pack builder inspect <builder name>` which should report the lifecycle version in question. --> <!--- If you can find this, it helps us pin down the issue. For example, run `pack inspect-builder BUILDER` which should report the lifecycle version in question. -->
##### platform version(s) ##### platform version(s)
<!-- For example run `pack report` and `docker info` and copy output here, redacting any sensitive information. --> <!--- For example run `pack report` and `docker info` and copy output here. -->
##### anything else? ##### anything else?
<!-- Add any other context that may help (e.g., Tekton task version, kpack version, etc.). --> <!--- Tekton task version, kpack version, etc. -->

View File

@ -1,26 +0,0 @@
---
name: Chore
about: Suggest a chore that will help contributors and doesn't affect end users
title: ''
labels: type/chore, status/triage
assignees: ''
---
### Summary
<!-- Please describe why this chore matters, who will enjoy it and how. -->
---
### Proposal
<!-- How do you think the chore should be implemented? -->
---
### Context
<!-- Add any other context that may help. -->

View File

@ -1,33 +0,0 @@
---
name: Feature request
about: Suggest a new feature or an improvement to existing functionality
title: ''
labels: type/enhancement, status/triage
assignees: ''
---
### Summary
<!-- Please describe the feature and why it matters. -->
---
### Proposal
<!-- How do you think the feature should be implemented? -->
---
### Related
<!-- If this feature addresses an RFC, please provide the RFC number below. -->
RFC #___
---
### Context
<!-- Add any other context that may help. -->

View File

@ -1,16 +0,0 @@
version: 2
updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly
groups:
# Group all minor/patch go dependencies into a single PR.
go-dependencies:
update-types:
- "minor"
- "patch"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: weekly

View File

@ -1,25 +0,0 @@
<!-- 🎉🎉🎉 Thank you for the PR!!! 🎉🎉🎉 -->
### Summary
<!-- Please describe your changes at a high level. -->
#### Release notes
<!-- Please provide 1-2 sentences for release notes. -->
<!-- Example: When using platform API `0.7` or greater, the `creator` logs the expected phase header for the analyze phase -->
---
### Related
<!-- If this PR addresses an issue, please provide the issue number below. -->
Resolves #___
---
### Context
<!-- Add any other context that may help reviewers (e.g., code that requires special attention, etc.). -->

View File

@ -11,17 +11,14 @@ on:
- 'release/**' - 'release/**'
jobs: jobs:
test-linux-amd64: test-linux:
runs-on: ubuntu-latest runs-on: ubuntu-18.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Set up go
uses: actions/setup-go@v2
with: with:
fetch-depth: '0' go-version: '1.15'
- name: Setup go
uses: actions/setup-go@v5
with:
check-latest: true
go-version-file: 'go.mod'
- name: Install jq - name: Install jq
run: | run: |
mkdir -p deps/bin mkdir -p deps/bin
@ -29,201 +26,56 @@ jobs:
chmod +x deps/bin/jq chmod +x deps/bin/jq
echo "${PWD}/deps/bin" >> $GITHUB_PATH echo "${PWD}/deps/bin" >> $GITHUB_PATH
- name: Test - name: Test
env:
TEST_COVERAGE: 1
run: make test run: make test
- name: Upload coverage to Codecov test-windows:
uses: codecov/codecov-action@v5 runs-on: windows-latest
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./out/tests/coverage-unit.txt
flags: unit,os_linux
fail_ci_if_error: true
verbose: true
test-linux-arm64:
runs-on: linux-arm64
steps: steps:
- uses: actions/checkout@v4 - name: Set git to use LF and symlinks
run: |
git config --global core.autocrlf false
git config --global core.eol lf
git config --global core.symlinks true
- uses: actions/checkout@v2
- name: Set up go
uses: actions/setup-go@v2
with: with:
fetch-depth: '0' go-version: '1.15'
- name: Setup go - name: Install jq
uses: actions/setup-go@v5 run: choco install jq
with:
check-latest: true
go-version-file: 'go.mod'
- name: Test - name: Test
run: | run: make test
make format || true build:
make test
build-and-publish:
needs: needs:
- test-linux-amd64 - test-linux
- test-linux-arm64 - test-windows
runs-on: ubuntu-latest runs-on: ubuntu-18.04
permissions:
id-token: write
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 # fetch all history for all branches and tags fetch-depth: 0
- name: Setup go - name: Set up go
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
check-latest: true go-version: '1.15'
go-version-file: 'go.mod'
- name: Install Cosign
uses: sigstore/cosign-installer@v3
- name: Set version
run: |
echo "LIFECYCLE_VERSION=$(go run tools/version/main.go)" | tee -a $GITHUB_ENV version.txt
- uses: actions/upload-artifact@v4
with:
name: version
path: version.txt
- name: Set tag
run: |
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> tag.txt
- uses: actions/upload-artifact@v4
with:
name: tag
path: tag.txt
- name: Build - name: Build
run: | run: |
make clean make clean
make build make build
make package make package
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v2
with: with:
name: lifecycle-linux-x86-64 name: lifecycle-linux-x86-64
path: out/lifecycle-v*+linux.x86-64.tgz path: out/lifecycle-v*+linux.x86-64.tgz
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v2
with: with:
name: lifecycle-linux-x86-64-sha256 name: lifecycle-windows-x86-64
path: out/lifecycle-v*+linux.x86-64.tgz.sha256 path: out/lifecycle-v*+windows.x86-64.tgz
- uses: actions/upload-artifact@v4
with:
name: lifecycle-linux-arm64
path: out/lifecycle-v*+linux.arm64.tgz
- uses: actions/upload-artifact@v4
with:
name: lifecycle-linux-arm64-sha256
path: out/lifecycle-v*+linux.arm64.tgz.sha256
- uses: actions/upload-artifact@v4
with:
name: lifecycle-linux-ppc64le
path: out/lifecycle-v*+linux.ppc64le.tgz
- uses: actions/upload-artifact@v4
with:
name: lifecycle-linux-ppc64le-sha256
path: out/lifecycle-v*+linux.ppc64le.tgz.sha256
- uses: actions/upload-artifact@v4
with:
name: lifecycle-linux-s390x
path: out/lifecycle-v*+linux.s390x.tgz
- uses: actions/upload-artifact@v4
with:
name: lifecycle-linux-s390x-sha256
path: out/lifecycle-v*+linux.s390x.tgz.sha256
- name: Generate SBOM JSON
uses: CycloneDX/gh-gomod-generate-sbom@v2
with:
args: mod -licenses -json -output lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json
version: ^v1
- uses: actions/upload-artifact@v4
with:
name: lifecycle-bom-cdx
path: lifecycle-v*-bom.cdx.json
- name: Calculate SBOM sha
run: |
shasum -a 256 lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json > lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json.sha256
- uses: actions/upload-artifact@v4
with:
name: lifecycle-bom-cdx-sha256
path: lifecycle-v*-bom.cdx.json.sha256
- uses: azure/docker-login@v2
if: github.event_name == 'push'
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- uses: actions/download-artifact@v5
with:
name: tag
- name: Set env
run: |
cat tag.txt >> $GITHUB_ENV
- name: Publish images - name: Publish images
if: github.event_name == 'push' if: github.event_name == 'push'
run: | run: |
DOCKER_CLI_EXPERIMENTAL=enabled echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7) LIFECYCLE_IMAGE_TAG=$(git describe --always --dirty)
go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.x86-64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux
LINUX_AMD64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.x86-64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64 | awk '{print $NF}') go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+windows.x86-64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-windows -os windows
echo "LINUX_AMD64_SHA: $LINUX_AMD64_SHA" DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-windows
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}
LINUX_ARM64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.arm64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64 -arch arm64 | awk '{print $NF}')
echo "LINUX_ARM64_SHA: $LINUX_ARM64_SHA"
LINUX_PPC64LE_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.ppc64le.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le -arch ppc64le | awk '{print $NF}')
echo "LINUX_PPC64LE_SHA: LINUX_PPC64LE_SHA"
LINUX_S390X_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.s390x.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x -arch s390x | awk '{print $NF}')
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
docker manifest create buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64@${LINUX_AMD64_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64@${LINUX_ARM64_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le@${LINUX_PPC64LE_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x@${LINUX_S390X_SHA}
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG})
echo "MANIFEST_SHA: $MANIFEST_SHA"
cosign sign -r -y \
-a tag=${LIFECYCLE_IMAGE_TAG} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}@${MANIFEST_SHA}
cosign verify \
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" \
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
-a tag=${LIFECYCLE_IMAGE_TAG} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}
- name: Scan image
if: github.event_name == 'push'
uses: anchore/scan-action@v6
with:
image: buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}
pack-acceptance-linux:
if: github.event_name == 'push'
needs: build-and-publish
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
repository: 'buildpacks/pack'
path: 'pack'
ref: 'main'
fetch-depth: 0 # fetch all history for all branches and tags
- name: Setup go
uses: actions/setup-go@v5
with:
go-version-file: 'pack/go.mod'
- uses: actions/download-artifact@v5
with:
name: version
- uses: actions/download-artifact@v5
with:
name: tag
- name: Set env
run: |
cat version.txt >> $GITHUB_ENV
cat tag.txt >> $GITHUB_ENV
- uses: actions/download-artifact@v5
with:
name: lifecycle-linux-x86-64
path: pack
- name: Run pack acceptance
run: |
cd pack
git checkout $(git describe --abbrev=0 --tags) # check out the latest tag
LIFECYCLE_PATH="../lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz" \
LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}" \
make acceptance

View File

@ -1,127 +0,0 @@
name: check-latest-release
on:
schedule:
- cron: 0 2 * * 1,4
workflow_dispatch: {}
jobs:
check-release:
runs-on:
- ubuntu-latest
permissions:
issues: write
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
check-latest: true
go-version-file: 'go.mod'
- name: Get previous release tag
id: get-previous-release-tag
uses: actions/github-script@v6
with:
github-token: ${{secrets.GITHUB_TOKEN}}
result-encoding: string
script: |
return github.rest.repos.getLatestRelease({
owner: "buildpacks",
repo: "lifecycle",
}).then(result => {
return result.data.tag_name
})
- name: Read go and release versions
id: read-versions
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
#!/usr/bin/env bash
set -euo pipefail
LATEST_GO_VERSION=$(go version | cut -d ' ' -f 3)
LATEST_RELEASE_VERSION=${{ steps.get-previous-release-tag.outputs.result }}
wget https://github.com/buildpacks/lifecycle/releases/download/$LATEST_RELEASE_VERSION/lifecycle-$LATEST_RELEASE_VERSION+linux.x86-64.tgz -O lifecycle.tgz
tar xzf lifecycle.tgz
LATEST_RELEASE_GO_VERSION=$(go version ./lifecycle/lifecycle | cut -d ' ' -f 2)
echo "latest-go-version=${LATEST_GO_VERSION}" >> "$GITHUB_OUTPUT"
echo "latest-release-go-version=${LATEST_RELEASE_GO_VERSION}" >> "$GITHUB_OUTPUT"
LATEST_RELEASE_VERSION=$(echo $LATEST_RELEASE_VERSION | cut -d \v -f 2)
echo "latest-release-version=${LATEST_RELEASE_VERSION}" >> "$GITHUB_OUTPUT"
- name: Create issue if needed
if: ${{ steps.read-versions.outputs.latest-go-version != steps.read-versions.outputs.latest-release-go-version }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
#!/usr/bin/env bash
set -euo pipefail
title="Upgrade lifecycle to ${{ steps.read-versions.outputs.latest-go-version }}"
label=${{ steps.read-versions.outputs.latest-go-version }}
# Create label to use for exact search
gh label create "$label" || true
search_output=$(gh issue list --search "$title" --label "$label")
body="Latest lifecycle release v${{ steps.read-versions.outputs.latest-release-version }} is built with Go version ${{ steps.read-versions.outputs.latest-release-go-version }}; newer version ${{ steps.read-versions.outputs.latest-go-version }} is available."
if [ -z "${search_output// }" ]
then
echo "No issues matched search; creating new issue..."
gh issue create \
--label "type/bug" \
--label "status/triage" \
--label "$label" \
--title "$title" \
--body "$body"
else
echo "Found matching issues:"
echo $search_output
fi
- name: Scan latest release image
id: scan-image
uses: anchore/scan-action@v6
with:
image: buildpacksio/lifecycle:${{ steps.read-versions.outputs.latest-release-version }}
fail-build: true
severity-cutoff: medium
output-format: json
- name: Create issue if needed
if: failure() && steps.scan-image.outcome == 'failure'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
#!/usr/bin/env bash
set -euo pipefail
title="CVE(s) found in v${{ steps.read-versions.outputs.latest-release-version }}"
label=cve
# Create label to use for exact search
gh label create "$label" || true
search_output=$(gh issue list --search "$title" --label "$label")
GITHUB_WORKFLOW_URL=https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID
body="Latest lifecycle release v${{ steps.read-versions.outputs.latest-release-version }} triggered CVE(s) from Grype. For further details, see: $GITHUB_WORKFLOW_URL json: $(cat ${{ steps.scan-image.outputs.json }} | jq '.matches[] | .vulnerability | {id, severity, description}' )"
if [ -z "${search_output// }" ]
then
echo "No issues matched search; creating new issue..."
gh issue create \
--label "type/bug" \
--label "status/triage" \
--label "$label" \
--title "$title" \
--body "$body"
else
echo "Found matching issues:"
echo $search_output
fi

View File

@ -6,10 +6,8 @@ on:
jobs: jobs:
draft-release: draft-release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: write
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Install jq - name: Install jq
run: | run: |
mkdir -p deps/bin mkdir -p deps/bin
@ -24,16 +22,16 @@ jobs:
exit 1 exit 1
fi fi
echo "LIFECYCLE_VERSION=$version" >> $GITHUB_ENV echo "LIFECYCLE_VERSION=$version" >> $GITHUB_ENV
- name: Determine download urls for linux-x86-64, linux-arm64, linux-ppc64le, linux-s390x shell: bash
- name: Determine download urls for linux and windows
id: artifact-urls id: artifact-urls
# FIXME: this script should be updated to work with actions/github-script@v6 uses: actions/github-script@v3.0.0
uses: actions/github-script@v3
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} github-token: ${{secrets.GITHUB_TOKEN}}
script: | script: |
return github.actions return github.actions
.listRepoWorkflows({ .listRepoWorkflows({
owner: "${{ github.repository_owner }}", owner: "buildpacks",
repo: "lifecycle", repo: "lifecycle",
}) })
.then(workflows_result => { .then(workflows_result => {
@ -47,7 +45,7 @@ jobs:
}) })
.then(workflow_id => { .then(workflow_id => {
return github.actions.listWorkflowRunsForRepo({ return github.actions.listWorkflowRunsForRepo({
owner: "${{ github.repository_owner }}", owner: "buildpacks",
repo: "lifecycle", repo: "lifecycle",
workflow_id: workflow_id, workflow_id: workflow_id,
branch: "release/${{ env.LIFECYCLE_VERSION }}", branch: "release/${{ env.LIFECYCLE_VERSION }}",
@ -65,7 +63,7 @@ jobs:
}) })
.then(workflow_runid => { .then(workflow_runid => {
return github.actions.listWorkflowRunArtifacts({ return github.actions.listWorkflowRunArtifacts({
owner: "${{ github.repository_owner }}", owner: "buildpacks",
repo: "lifecycle", repo: "lifecycle",
run_id: workflow_runid run_id: workflow_runid
}) })
@ -73,137 +71,80 @@ jobs:
.then(artifacts_result => { .then(artifacts_result => {
let tuples = artifacts_result.data.artifacts let tuples = artifacts_result.data.artifacts
.map(artifact => [artifact.name, artifact.archive_download_url]); .map(artifact => [artifact.name, artifact.archive_download_url]);
let urlList = new Array(); let urlMap = new Map();
tuples.forEach(function(tuple) { tuples.forEach(function(tuple) {
if (tuple[0].includes("lifecycle-")) { if (tuple[0].includes("linux")) {
urlList.push(tuple[1]); urlMap.set("linux", tuple[1])
}
if (tuple[0].includes("windows")) {
urlMap.set("windows", tuple[1])
} }
}) })
if (urlList.length === 0) { if (urlMap.size === 0) {
throw "no artifacts found" throw "no artifacts found"
} }
if (urlList.length != 10) { if (urlMap.size != 2) {
// found too many artifacts throw "there should be exactly two artifacts"
// list them and throw
console.log(urlList);
throw "there should be exactly 10 artifacts, found " + urlList.length + " artifacts"
} }
return urlList.join(",") return Object.fromEntries(urlMap.entries())
}) })
- name: Download artifacts - name: Download linux artifact
run: | run: |
mkdir artifacts url=$(echo '${{ steps.artifact-urls.outputs.result }}' | jq -r .linux )
echo "ARTIFACTS_PATH=$PWD/artifacts" >> $GITHUB_ENV curl -sL -w 'RESP_CODE:%{response_code}\n' \
--header 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
urls=$(echo '${{ steps.artifact-urls.outputs.result }}' | jq -r . ) -o artifact-linux.zip $url
mkdir artifact-linux
for url in $(echo $urls | tr "," "\n"); do unzip -d artifact-linux artifact-linux.zip
curl -sL -w 'RESP_CODE:%{response_code}\n' \ lifecycle_path=$(ls artifact-linux/lifecycle-*linux.x86-64.tgz)
--header 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ echo "ARTIFACT_LINUX_PATH=$PWD/$lifecycle_path" >> $GITHUB_ENV
-o tmp-artifact.zip $url - name: Download windows artifact
unzip -d artifacts tmp-artifact.zip
rm tmp-artifact.zip
done
- name: Combine checksums
run: | run: |
cd ${{ env.ARTIFACTS_PATH }} url=$(echo '${{ steps.artifact-urls.outputs.result }}' | jq -r .windows )
cat *.sha256 | sort > lifecycle-v${{ env.LIFECYCLE_VERSION }}-checksums.txt curl -sL -w 'RESP_CODE:%{response_code}\n' \
rm *.sha256 --header 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- name: Set pre-release kind -o artifact-windows.zip $url
if: "contains(env.LIFECYCLE_VERSION, 'rc') || contains(env.LIFECYCLE_VERSION, 'pre')" # e.g., 0.99.0-rc.1 mkdir artifact-windows
run: | unzip -d artifact-windows artifact-windows.zip
echo "RELEASE_KIND=pre-release" >> $GITHUB_ENV lifecycle_path=$(ls artifact-windows/lifecycle-*windows.x86-64.tgz)
- name: Set release kind echo "ARTIFACT_WINDOWS_PATH=$PWD/$lifecycle_path" >> $GITHUB_ENV
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')" - name: Create Release
run: | id: create_release
echo "RELEASE_KIND=release" >> $GITHUB_ENV uses: actions/create-release@latest
- name: Get previous release tag
id: get-previous-release-tag
uses: actions/github-script@v6
with:
github-token: ${{secrets.GITHUB_TOKEN}}
result-encoding: string
script: |
return github.rest.repos.getLatestRelease({
owner: "buildpacks",
repo: "lifecycle",
}).then(result => {
return result.data.tag_name
})
- name: Setup go
uses: actions/setup-go@v5
with:
check-latest: true
go-version-file: 'go.mod'
- name: Get go version
id: get-go-version
run: |
mkdir tmp
tar xzvf ${{ env.ARTIFACTS_PATH }}/lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz -C tmp/
echo "GO_VERSION=$(go version tmp/lifecycle/lifecycle | cut -d ' ' -f 2 | sed -e 's/^go//')" >> $GITHUB_ENV
- name: Set release body text
run: |
cat << EOF > body.txt
# lifecycle v${{ env.LIFECYCLE_VERSION }}
Welcome to v${{ env.LIFECYCLE_VERSION }}, a ${{ env.RELEASE_KIND }} of the Cloud Native Buildpacks Lifecycle.
## Prerequisites
The lifecycle runs as a normal user in a series of unprivileged containers. To export images and cache image layers, it requires access to a Docker (compatible) daemon **or** an OCI registry.
## Install
Extract the .tgz file and copy the lifecycle binaries into a [build image](https://github.com/buildpacks/spec/blob/main/platform.md#build-image). The build image can then be orchestrated by a platform implementation such as the [pack CLI](https://github.com/buildpack/pack) or [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks).
## Lifecycle Image
An OCI image containing the lifecycle binaries is available at buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}.
## Features
* TODO
* Updates go to version ${{ env.GO_VERSION }}
## Bugfixes
* TODO
## Chores
* TODO
**Full Changelog**: https://github.com/buildpacks/lifecycle/compare/${{ steps.get-previous-release-tag.outputs.result }}...release/${{ env.LIFECYCLE_VERSION }}
## Contributors
We'd like to acknowledge that this release wouldn't be as good without the help of the following amazing contributors:
TODO
EOF
- name: Create pre-release
if: "contains(env.LIFECYCLE_VERSION, 'rc') || contains(env.LIFECYCLE_VERSION, 'pre')" # e.g., 0.99.0-rc.1
run: |
cd ${{ env.ARTIFACTS_PATH }}
gh release create v${{ env.LIFECYCLE_VERSION }} \
$(ls | sort | paste -sd " " -) \
--draft \
--notes-file ../body.txt \
--prerelease \
--target $GITHUB_REF_NAME \
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create release with:
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')" tag_name: v${{ env.LIFECYCLE_VERSION }}
run: | release_name: lifecycle v${{ env.LIFECYCLE_VERSION }}
cd ${{ env.ARTIFACTS_PATH }} draft: true
gh release create v${{ env.LIFECYCLE_VERSION }} \ prerelease: false
$(ls | sort | paste -sd " " -) \ body: |
--draft \ # lifecycle v${{ env.LIFECYCLE_VERSION }}
--notes-file ../body.txt \
--target $GITHUB_REF_NAME \ Welcome to `v${{ env.LIFECYCLE_VERSION }}`, a **beta** release of the Cloud Native Buildpack Lifecycle.
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
## Prerequisites
The lifecycle runs as a normal user in a series of unprivileged containers. To export images and cache image layers, it requires access to a Docker daemon **or** Docker registry.
## Install
Extract the `.tgz` file and copy the lifecycle binaries into a [build stack base image](https://github.com/buildpack/spec/blob/master/platform.md#stacks). The build image can then be orchestrated by a platform implementation such as the [pack CLI](https://github.com/buildpack/pack) or [tekton](https://github.com/tektoncd/catalog/blob/master/task/buildpacks/0.1/README.md).
- name: Upload Release Asset - linux
uses: actions/upload-release-asset@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ${{ env.ARTIFACT_LINUX_PATH }}
asset_name: lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz
asset_content_type: application/gzip
- name: Upload Release Asset - windows
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ${{ env.ARTIFACT_WINDOWS_PATH }}
asset_name: lifecycle-v${{ env.LIFECYCLE_VERSION }}+windows.x86-64.tgz
asset_content_type: application/gzip

View File

@ -3,126 +3,58 @@ name: post-release
on: on:
release: release:
types: types:
- published # trigger for releases and pre-releases - published
jobs: jobs:
retag-lifecycle-images: retag-lifecycle-images-linux:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
id-token: write
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Setup go - name: Docker login
uses: actions/setup-go@v5
with:
check-latest: true
go-version-file: 'go.mod'
- name: Install crane
run: | run: |
go install github.com/google/go-containerregistry/cmd/crane@latest echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
- name: Install cosign
uses: sigstore/cosign-installer@v3
- uses: azure/docker-login@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Set env - name: Set env
run: | run: |
echo "LIFECYCLE_VERSION=$(echo ${{ github.event.release.tag_name }} | cut -d "v" -f2)" >> $GITHUB_ENV echo "LIFECYCLE_VERSION=$(echo ${{ github.event.release.tag_name }} | cut -d "v" -f2)" >> $GITHUB_ENV
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> $GITHUB_ENV echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --dirty)" >> $GITHUB_ENV
- name: Verify lifecycle images - name: Retag release candidate lifecycle images
run: | run: |
LINUX_AMD64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64 | jq -r .[0].critical.image.\"docker-manifest-digest\") docker pull buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux
echo "LINUX_AMD64_SHA: $LINUX_AMD64_SHA" docker image tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux
echo "LINUX_AMD64_SHA=$LINUX_AMD64_SHA" >> $GITHUB_ENV docker image tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux buildpacksio/lifecycle:latest-linux
docker push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux
LINUX_ARM64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64 | jq -r .[0].critical.image.\"docker-manifest-digest\") docker push buildpacksio/lifecycle:latest-linux
echo "LINUX_ARM64_SHA: $LINUX_ARM64_SHA" retag-lifecycle-images-windows:
echo "LINUX_ARM64_SHA=$LINUX_ARM64_SHA" >> $GITHUB_ENV runs-on: windows-latest
steps:
LINUX_PPC64LE_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le | jq -r .[0].critical.image.\"docker-manifest-digest\") - uses: actions/checkout@v2
echo "LINUX_PPC64LE_SHA: $LINUX_PPC64LE_SHA" - name: Docker login
echo "LINUX_PPC64LE_SHA=$LINUX_PPC64LE_SHA" >> $GITHUB_ENV
LINUX_S390X_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x | jq -r .[0].critical.image.\"docker-manifest-digest\")
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
echo "LINUX_S390X_SHA=$LINUX_S390X_SHA" >> $GITHUB_ENV
- name: Download SBOM
run: | run: |
gh release download --pattern '*-bom.cdx.json' ${{ github.event.release.tag_name }} echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
env: - name: Set env
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Retag lifecycle images & create manifest list - semver
run: | run: |
DOCKER_CLI_EXPERIMENTAL=enabled echo "LIFECYCLE_VERSION=$(echo ${{ github.event.release.tag_name }} | cut -d "v" -f2)" >> $env:GITHUB_ENV
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --dirty)" >> $env:GITHUB_ENV
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-x86-64 - name: Retag release candidate lifecycle images
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-arm64
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-ppc64le
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-s390x
docker manifest create buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-s390x@${{ env.LINUX_S390X_SHA }}
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }})
echo "MANIFEST_SHA: $MANIFEST_SHA"
cosign sign -r -y \
-a tag=${{ env.LIFECYCLE_VERSION }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
cosign verify \
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
-a tag=${{ env.LIFECYCLE_VERSION }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
cosign attach sbom --sbom ./*-bom.cdx.json --type cyclonedx buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
cosign sign -r -y \
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
cosign verify \
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
- name: Retag lifecycle images & create manifest list - latest
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
run: | run: |
DOCKER_CLI_EXPERIMENTAL=enabled docker pull buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows
docker image tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-windows
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} latest-linux-x86-64 docker image tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows buildpacksio/lifecycle:latest-windows
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} latest-linux-arm64 docker push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-windows
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} latest-linux-ppc64le docker push buildpacksio/lifecycle:latest-windows
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} latest-linux-s390x create-manifest-lists:
runs-on: ubuntu-latest
docker manifest create buildpacksio/lifecycle:latest \ needs: [retag-lifecycle-images-linux, retag-lifecycle-images-windows]
buildpacksio/lifecycle:latest-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \ steps:
buildpacksio/lifecycle:latest-linux-arm64@${{ env.LINUX_ARM64_SHA }} \ - name: Docker login
buildpacksio/lifecycle:latest-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \ run: |
buildpacksio/lifecycle:latest-linux-s390x@${{ env.LINUX_S390X_SHA }} echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
- name: Set env
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:latest) run: |
echo "MANIFEST_SHA: $MANIFEST_SHA" echo "LIFECYCLE_VERSION=$(echo ${{ github.event.release.tag_name }} | cut -d "v" -f2)" >> $GITHUB_ENV
- name: Create lifecycle image manifest lists
cosign sign -r -y \ run: |
-a tag=latest \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }} buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-windows
buildpacksio/lifecycle:latest@${MANIFEST_SHA} DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
cosign verify \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create buildpacksio/lifecycle:latest buildpacksio/lifecycle:latest-linux buildpacksio/lifecycle:latest-windows
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push buildpacksio/lifecycle:latest
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
-a tag=latest \
buildpacksio/lifecycle:latest
cosign attach sbom --sbom ./*-bom.cdx.json --type cyclonedx buildpacksio/lifecycle:latest
cosign sign -r -y \
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
buildpacksio/lifecycle:latest@${MANIFEST_SHA}
cosign verify \
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
buildpacksio/lifecycle:latest

View File

@ -1,87 +0,0 @@
name: test-s390x
on:
push:
branches:
- main
- 'release/**'
pull_request:
branches:
- main
- 'release/**'
jobs:
test-linux-s390x:
if: (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release*')
runs-on: ubuntu-latest
env:
ZVSI_FP_NAME: bp-floating-ci-${{ github.run_id }}
ZVSI_INSTANCE_NAME: bp-zvsi-ci-${{ github.run_id }}
ZVSI_ZONE_NAME: ca-tor-1
ZVSI_PROFILE_NAME: bz2-4x16
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: install ibmcli and setup ibm login
run: |
curl -fsSL https://clis.cloud.ibm.com/install/linux | sh
ibmcloud login -q --apikey ${{ secrets.IBMCLOUD_API_KEY }} -r ca-tor
ibmcloud plugin install vpc-infrastructure
- name: Creation of ZVSI
id: ZVSI
run: |
#creation of zvsi
ibmcloud is instance-create $ZVSI_INSTANCE_NAME ${{ secrets.ZVSI_VPC }} $ZVSI_ZONE_NAME $ZVSI_PROFILE_NAME ${{ secrets.ZVSI_SUBNET }} --image ${{ secrets.ZVSI_IMAGE }} --keys ${{ secrets.ZVSI_KEY }} --resource-group-id ${{ secrets.ZVSI_RG_ID }} --primary-network-interface "{\"name\":\"eth0\",\"allow_ip_spoofing\":false,\"subnet\": {\"name\":\"${{ secrets.ZVSI_SUBNET }}\"},\"security_groups\":[{\"id\":\"${{ secrets.ZVSI_SG }}\"}]}"
#Reserving a floating ip to the ZVSI
ibmcloud is floating-ip-reserve $ZVSI_FP_NAME --zone $ZVSI_ZONE_NAME --resource-group-id ${{ secrets.ZVSI_RG_ID }} --in $ZVSI_INSTANCE_NAME
#Bouding the Floating ip to the ZVSI
ibmcloud is floating-ip-update $ZVSI_FP_NAME --nic eth0 --in $ZVSI_INSTANCE_NAME
sleep 60
#Saving the Floating IP to login ZVSI
ZVSI_HOST=$(ibmcloud is floating-ip $ZVSI_FP_NAME | awk '/Address/{print $2}')
echo $ZVSI_HOST
echo "IP=${ZVSI_HOST}" >> $GITHUB_OUTPUT
- name: Status of ZVSI
run: |
check=$(ibmcloud is ins| awk '/'$ZVSI_INSTANCE_NAME'/{print $3}')
while [[ $check != "running" ]]
do
check=$(ibmcloud is ins | awk '/'$ZVSI_INSTANCE_NAME'/{print $3}')
if [[ $check == 'failed' ]]
then
echo "Failed to run the ZVSI"
break
fi
done
- name: Install dependencies and run all tests on s390x ZVSI
uses: appleboy/ssh-action@v1.2.2
env:
GH_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
GH_REF: ${{ github.ref }}
with:
host: ${{ steps.ZVSI.outputs.IP }}
username: ${{ secrets.ZVSI_SSH_USER }}
key: ${{ secrets.ZVSI_PR_KEY }}
envs: GH_REPOSITORY,GH_REF
command_timeout: 100m
script: |
apt-get update -y
apt-get install -y wget curl git make gcc jq docker.io
wget https://go.dev/dl/go1.24.6.linux-s390x.tar.gz
rm -rf /usr/local/go && tar -C /usr/local -xzf go1.24.6.linux-s390x.tar.gz
export PATH=$PATH:/usr/local/go/bin
git clone ${GH_REPOSITORY} lifecycle
cd lifecycle && git checkout ${GH_REF}
go env
export PATH=$PATH:~/go/bin
make format || true
make test
- name: Cleanup ZVSI
if: ${{ steps.ZVSI.conclusion == 'success' && always() }}
run: |
#Delete the created ZVSI
ibmcloud is instance-delete $ZVSI_INSTANCE_NAME --force
sleep 20
#Release the created FP
ibmcloud is floating-ip-release $ZVSI_FP_NAME --force

10
.gitignore vendored
View File

@ -3,15 +3,7 @@
*.coverprofile *.coverprofile
*.test *.test
*~ *~
.tool-versions
/out /out
.vscode
acceptance/testdata/*/**/container/cnb/lifecycle/* acceptance/testdata/*/**/container/cnb/lifecycle/*
acceptance/testdata/*/**/container/docker-config/* acceptance/testdata/*/**/container/docker-config/config.json
acceptance/testdata/exporter/container/cnb/run.toml
acceptance/testdata/exporter/container/layers/*analyzed.toml
acceptance/testdata/exporter/container/other_layers/*analyzed.toml
acceptance/testdata/restorer/container/layers/*analyzed.toml

View File

@ -1,19 +0,0 @@
tasks:
# allow socket to be writable by all
# (necessary for acceptance tests / calls from within container)
- init: chmod ugo+w /var/run/docker.sock
# build linux to install dependencies
- init: make tidy build-linux
github:
prebuilds:
master: true
branches: true
pullRequests: true
pullRequestsFromForks: true
addCheck: true
vscode:
extensions:
- golang.go
- ms-azuretools.vscode-docker

View File

@ -1,5 +0,0 @@
ignore:
- vulnerability: CVE-2015-5237 # false positive, see https://github.com/anchore/grype/issues/558
- vulnerability: CVE-2021-22570 # false positive, see https://github.com/anchore/grype/issues/558
- vulnerability: CVE-2024-41110 # non-impactful as we only use docker as a client
- vulnerability: GHSA-v23v-6jw2-98fq # non-impactful as we only use docker as a client

View File

@ -1,6 +0,0 @@
{
"go.testTimeout": "10m",
"go.testFlags": [
"-v"
]
}

View File

@ -1,42 +0,0 @@
## Policies
This repository adheres to the following project policies:
- [Code of Conduct][code-of-conduct] - How we should act with each other.
- [Contributing][contributing] - General contributing standards.
- [Security][security] - Reporting security concerns.
- [Support][support] - Getting support.
## Contributing to this repository
### Welcome
We welcome contributions to this repository! To get a sense of what the team is currently focusing on, check out our [milestones](https://github.com/buildpacks/lifecycle/milestones). Issues labeled [good first issue](https://github.com/buildpacks/lifecycle/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and issues in our [docs repo](https://github.com/buildpacks/docs/issues?q=is%3Aissue+is%3Aopen+label%3Ateam%2Fimplementations) are great places to get started, but you are welcome to work on any issue that interests you. For issues requiring a greater degree of coordination, such as those labeled `status/needs-discussion` or that are part of larger epics, please reach out in the #implementation channel in [Slack](https://slack.buildpacks.io/).
### Development
Aside from the policies above, you may find [DEVELOPMENT.md](DEVELOPMENT.md) helpful in developing in this repository.
### Background
Here are some topics that might be helpful in further understanding the lifecycle:
* Cloud Native Buildpacks platform api spec
* Example platforms: [pack CLI](https://github.com/buildpack/pack), [Tekton](https://github.com/tektoncd/catalog/blob/master/task/buildpacks/0.1/README.md)
* Cloud Native Buildpacks buildpack api spec
* Example buildpack providers: [Google](https://github.com/GoogleCloudPlatform/buildpacks), [Heroku](https://www.heroku.com/), [Paketo](https://paketo.io/)
* The Open Container Initiative (OCI) and [OCI image spec](https://github.com/opencontainers/image-spec)
* Questions to deepen understanding:
* What are the different [lifecycle phases](https://buildpacks.io/docs/concepts/components/lifecycle/)? What is the purpose of each phase?
* What is a [builder](https://buildpacks.io/docs/concepts/components/builder/)? Is it required to run the lifecycle?
* What is the [untrusted builder workflow](https://medium.com/buildpacks/faster-more-secure-builds-with-pack-0-11-0-4d0c633ca619)? Why do we have this flow?
* What is the [launcher](https://github.com/buildpacks/spec/blob/main/platform.md#launch)? Why do we have a launcher?
* What does a [buildpack](https://buildpacks.io/docs/concepts/components/buildpack/) do? Where does it write data? How does it communicate with the lifecycle?
* What does a [platform](https://buildpacks.io/docs/concepts/components/platform/) do? What things does it know about that the lifecycle does not? How does it communicate with the lifecycle?
* What is a [stack](https://buildpacks.io/docs/concepts/components/stack/)? Who produces stacks? Why is the stack concept important for the lifecycle?
[code-of-conduct]: https://github.com/buildpacks/.github/blob/main/CODE_OF_CONDUCT.md
[contributing]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTING.md
[security]: https://github.com/buildpacks/.github/blob/main/SECURITY.md
[support]: https://github.com/buildpacks/.github/blob/main/SUPPORT.md
[pull-request-process]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTIONS.md#pull-request-process

View File

@ -1,105 +0,0 @@
# Development
## Prerequisites
* [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
* macOS: _(built-in)_
* Windows:
* `choco install git -y`
* `git config --global core.autocrlf false`
* [Go](https://golang.org/doc/install)
* macOS: `brew install go`
* Windows: `choco install golang -y`
* [Docker](https://www.docker.com/products/docker-desktop)
* [jq](https://stedolan.github.io/jq/) and [yj](https://github.com/sclevine/yj) utilities
* macOS: `brew install jq yj`
* Windows:
* `choco insall jq -y`
* `go get github.com/sclevine/yj`
* Make (and build tools)
* macOS: `xcode-select --install`
* Windows:
* `choco install cygwin make -y`
* `[Environment]::SetEnvironmentVariable("PATH", "C:\tools\cygwin\bin;$ENV:PATH", "MACHINE")`
### Caveats
* The acceptance tests require the docker daemon to be able to communicate with a local containerized insecure registry. On Docker Desktop 3.3.x, this may result in failures such as: `Expected nil: push response: : Get http://localhost:<port>/v2/: dial tcp [::1]:<port>: connect: connection refused`. To fix these failures, it may be necessary to add the following to the Docker Desktop Engine config:
* macOS: Docker > Preferences > Docker Engine:
```
"insecure-registries": [
"<my-host-ip>/32"
]
```
### Testing GitHub actions on forks
The lifecycle release process involves chaining a series of GitHub actions together such that:
* The "build" workflow creates the artifacts
* .tgz files containing the lifecycle binaries, shasums for the .tgz files, an SBOM, etc.
* OCI images containing the lifecycle binaries, tagged with their commit sha (for more information, see RELEASE.md)
* The "draft-release" workflow finds the artifacts and downloads them, creating the draft release
* The "post-release" workflow re-tags the OCI images that were created during the "build" workflow with the release version
It can be rather cumbersome to test changes to these workflows, as they are heavily intertwined. Thus we recommend forking the buildpacks/lifecycle repository in GitHub and running through the entire release process end-to-end.
For the fork, it is necessary to add the following secrets:
* DOCKER_PASSWORD (if not using ghcr.io)
* DOCKER_USERNAME (if not using ghcr.io)
The tools/test-fork.sh script can be used to update the source code to reflect the state of the fork.
It can be invoked like so: `./tools/test-fork.sh <registry repo name>`
## Tasks
To test, build, and package binaries into an archive, simply run:
```bash
$ make all
```
This will create archives at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz` and `out/lifecycle-<LIFECYCLE_VERSION>+windows.x86-64.tgz`.
`LIFECYCLE_VERSION` defaults to the value returned by `git describe --tags` if not on a release branch (for more information about the release process, see [RELEASE](RELEASE.md)). It can be changed by prepending `LIFECYCLE_VERSION=<some version>` to the
`make` command. For example:
```bash
$ LIFECYCLE_VERSION=1.2.3 make all
```
Steps can also be run individually as shown below.
### Test
Formats, vets, and tests the code.
```bash
$ make test
```
#### Mocks
We use mock generators like most golang projects to help with our testing. To make new mocks:
```bash
$ make generate
$ make format lint
```
This is because the mock generator will make a larger diff that the formatter will fix.
### Build
Builds binaries to `out/linux/lifecycle/` and `out/windows/lifecycle/`.
```bash
$ make build
```
> To clean the `out/` directory, run `make clean`.
### Package
Creates archives at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz` and `out/lifecycle-<LIFECYCLE_VERSION>+windows.x86-64.tgz`, using the contents of the
`out/linux/lifecycle/` directory, for the given (or default) `LIFECYCLE_VERSION`.
```bash
$ make package
```

View File

@ -1,44 +0,0 @@
# Quick reference
This image is maintained by the [Cloud Native Buildpacks project](https://buildpacks.io/). The maintainers can be contacted via the [Cloud Native Buildpacks Slack](https://slack.buildpacks.io/), or by opening an issue on the `buildpacks/lifecycle` [GitHub repo](https://github.com/buildpacks/lifecycle).
# Supported tags
Supported tags are semver-versioned manifest lists - e.g., `0.12.0` or `0.12.0-rc.1`, pointing to one of the following os/architectures:
* `linux/amd64`
* `linux/arm64`
# About this image
Images are built in [GitHub actions](https://github.com/buildpacks/lifecycle/actions) and signed with [`cosign`](https://github.com/sigstore/cosign). To verify:
* Run:
```
cosign version # must be at least 2.0.0
cosign verify \
--certificate-identity-regexp "https://github.com/buildpacks/lifecycle/.github/workflows/post-release.yml" \
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
buildpacksio/lifecycle:<tag>
```
A CycloneDX SBOM is "attached" to the image and signed with [`cosign`](https://github.com/sigstore/cosign). To verify:
* Run:
```
cosign version # must be at least 2.0.0
cosign verify \
--certificate-identity-regexp "https://github.com/buildpacks/lifecycle/.github/workflows/post-release.yml" \
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
-a tag=<tag> -attachment sbom \
buildpacksio/lifecycle:<tag>
cosign download sbom buildpacksio/lifecycle:<tag>
```
# Using this image
With [pack](https://github.com/buildpack/pack):
* `pack build <target> --lifecycle-image buildpacksio/lifecycle:<tag>`
With [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks-phases/0.2):
* Provide as param `LIFECYCLE_IMAGE` in taskrun
***
[Source](https://github.com/buildpacks/lifecycle/blob/main/IMAGE.md) for this page

291
Makefile
View File

@ -17,8 +17,8 @@ else
LIFECYCLE_IMAGE_TAG?=$(LIFECYCLE_VERSION) LIFECYCLE_IMAGE_TAG?=$(LIFECYCLE_VERSION)
endif endif
ACCEPTANCE_TIMEOUT?=2400s
GOCMD?=go GOCMD?=go
GOARCH?=amd64
GOENV=GOARCH=$(GOARCH) CGO_ENABLED=0 GOENV=GOARCH=$(GOARCH) CGO_ENABLED=0
LIFECYCLE_DESCRIPTOR_PATH?=lifecycle.toml LIFECYCLE_DESCRIPTOR_PATH?=lifecycle.toml
SCM_REPO?=github.com/buildpacks/lifecycle SCM_REPO?=github.com/buildpacks/lifecycle
@ -30,6 +30,8 @@ LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.Version=$(LIFECYCLE_VERSION)'
GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)" GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)"
GOTEST=$(GOCMD) test $(GOFLAGS) GOTEST=$(GOCMD) test $(GOFLAGS)
BUILD_DIR?=$(PWD)$/out BUILD_DIR?=$(PWD)$/out
LINUX_COMPILATION_IMAGE?=golang:1.15-alpine
WINDOWS_COMPILATION_IMAGE?=golang:1.15-windowsservercore-1809
SOURCE_COMPILATION_IMAGE?=lifecycle-img SOURCE_COMPILATION_IMAGE?=lifecycle-img
BUILD_CTR?=lifecycle-ctr BUILD_CTR?=lifecycle-ctr
DOCKER_CMD?=make test DOCKER_CMD?=make test
@ -38,131 +40,155 @@ GOFILES := $(shell $(GOCMD) run tools$/lister$/main.go)
all: test build package all: test build package
GOOS_ARCHS = linux/amd64 linux/arm64 linux/ppc64le linux/s390x darwin/amd64 darwin/arm64 build: build-linux build-windows
build: build-linux-amd64 build-linux-arm64 build-linux-ppc64le build-linux-s390x build-linux: build-linux-lifecycle build-linux-symlinks build-linux-launcher
build-windows: build-windows-lifecycle build-windows-symlinks build-windows-launcher
build-image-linux-amd64: build-linux-amd64 package-linux-amd64 build-image-linux: build-linux package-linux
build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz build-image-linux: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz
build-image-linux-amd64: build-image-linux:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch amd64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
build-image-linux-arm64: build-linux-arm64 package-linux-arm64 build-image-windows: build-windows package-windows
build-image-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.arm64.tgz build-image-windows: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+windows.x86-64.tgz
build-image-linux-arm64: build-image-windows:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch arm64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os windows -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
build-image-linux-ppc64le: build-linux-ppc64le package-linux-ppc64le build-linux-lifecycle: $(BUILD_DIR)/linux/lifecycle/lifecycle
build-image-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.ppc64le.tgz
build-image-linux-ppc64le:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch ppc64le -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
build-image-linux-s390x: build-linux-s390x package-linux-s390x docker-compilation-image-linux:
build-image-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.s390x.tgz docker build ./tools --build-arg from_image=$(LINUX_COMPILATION_IMAGE) --tag $(SOURCE_COMPILATION_IMAGE)
build-image-linux-s390x:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch s390x -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
define build_targets
build-$(1)-$(2): build-$(1)-$(2)-lifecycle build-$(1)-$(2)-symlinks build-$(1)-$(2)-launcher
build-$(1)-$(2)-lifecycle: $(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle $(BUILD_DIR)/linux/lifecycle/lifecycle: export GOOS:=linux
$(BUILD_DIR)/linux/lifecycle/lifecycle: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
$(BUILD_DIR)/linux/lifecycle/lifecycle: GOENV:=GOARCH=$(GOARCH) CGO_ENABLED=1
$(BUILD_DIR)/linux/lifecycle/lifecycle: docker-compilation-image-linux
$(BUILD_DIR)/linux/lifecycle/lifecycle: $(GOFILES)
$(BUILD_DIR)/linux/lifecycle/lifecycle:
@echo "> Building lifecycle/lifecycle for linux..."
mkdir -p $(OUT_DIR)
docker run \
--workdir=/lifecycle \
--volume $(OUT_DIR):/out \
--volume $(PWD):/lifecycle \
--volume gocache:/go \
$(SOURCE_COMPILATION_IMAGE) \
sh -c '$(GOENV) $(GOBUILD) -o /out/lifecycle -a ./cmd/lifecycle'
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOOS:=$(1) build-linux-launcher: $(BUILD_DIR)/linux/lifecycle/launcher
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOARCH:=$(2)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: $$(GOFILES)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle:
@echo "> Building lifecycle/lifecycle for $$(GOOS)/$$(GOARCH)..."
mkdir -p $$(OUT_DIR)
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/lifecycle -a ./cmd/lifecycle
build-$(1)-$(2)-symlinks: export GOOS:=$(1) $(BUILD_DIR)/linux/lifecycle/launcher: export GOOS:=linux
build-$(1)-$(2)-symlinks: export GOARCH:=$(2) $(BUILD_DIR)/linux/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)/lifecycle
build-$(1)-$(2)-symlinks: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle $(BUILD_DIR)/linux/lifecycle/launcher: $(GOFILES)
build-$(1)-$(2)-symlinks: $(BUILD_DIR)/linux/lifecycle/launcher:
@echo "> Creating phase symlinks for $$(GOOS)/$$(GOARCH)..." @echo "> Building lifecycle/launcher for linux..."
ln -sf lifecycle $$(OUT_DIR)/detector mkdir -p $(OUT_DIR)
ln -sf lifecycle $$(OUT_DIR)/analyzer $(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
ln -sf lifecycle $$(OUT_DIR)/restorer test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
ln -sf lifecycle $$(OUT_DIR)/builder
ln -sf lifecycle $$(OUT_DIR)/exporter
ln -sf lifecycle $$(OUT_DIR)/rebaser
ln -sf lifecycle $$(OUT_DIR)/creator
ln -sf lifecycle $$(OUT_DIR)/extender
build-$(1)-$(2)-launcher: $$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher build-linux-symlinks: export GOOS:=linux
build-linux-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)/lifecycle
build-linux-symlinks:
@echo "> Creating phase symlinks for linux..."
ln -sf lifecycle $(OUT_DIR)/detector
ln -sf lifecycle $(OUT_DIR)/analyzer
ln -sf lifecycle $(OUT_DIR)/restorer
ln -sf lifecycle $(OUT_DIR)/builder
ln -sf lifecycle $(OUT_DIR)/exporter
ln -sf lifecycle $(OUT_DIR)/rebaser
ln -sf lifecycle $(OUT_DIR)/creator
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOOS:=$(1) build-windows-lifecycle: $(BUILD_DIR)/windows/lifecycle/lifecycle.exe
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOARCH:=$(2)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: $$(GOFILES)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher:
@echo "> Building lifecycle/launcher for $$(GOOS)/$$(GOARCH)..."
mkdir -p $$(OUT_DIR)
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/launcher -a ./cmd/launcher
test $$$$(du -m $$(OUT_DIR)/launcher|cut -f 1) -le 3
endef
$(foreach ga,$(GOOS_ARCHS),$(eval $(call build_targets,$(word 1, $(subst /, ,$(ga))),$(word 2, $(subst /, ,$(ga)))))) $(BUILD_DIR)/windows/lifecycle/lifecycle.exe: export GOOS:=windows
$(BUILD_DIR)/windows/lifecycle/lifecycle.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)$/lifecycle
$(BUILD_DIR)/windows/lifecycle/lifecycle.exe: $(GOFILES)
$(BUILD_DIR)/windows/lifecycle/lifecycle.exe:
@echo "> Building lifecycle/lifecycle for Windows..."
$(GOBUILD) -o $(OUT_DIR)$/lifecycle.exe -a .$/cmd$/lifecycle
generate-sbom: run-syft-linux-amd64 run-syft-linux-arm64 run-syft-linux-ppc64le run-syft-linux-s390x build-windows-launcher: $(BUILD_DIR)/windows/lifecycle/launcher.exe
run-syft-linux-amd64: install-syft $(BUILD_DIR)/windows/lifecycle/launcher.exe: export GOOS:=windows
run-syft-linux-amd64: export GOOS:=linux $(BUILD_DIR)/windows/lifecycle/launcher.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)$/lifecycle
run-syft-linux-amd64: export GOARCH:=amd64 $(BUILD_DIR)/windows/lifecycle/launcher.exe: $(GOFILES)
run-syft-linux-amd64: $(BUILD_DIR)/windows/lifecycle/launcher.exe:
@echo "> Running syft..." @echo "> Building lifecycle/launcher for Windows..."
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json $(GOBUILD) -o $(OUT_DIR)$/launcher.exe -a .$/cmd$/launcher
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
run-syft-linux-arm64: install-syft build-windows-symlinks: export GOOS:=windows
run-syft-linux-arm64: export GOOS:=linux build-windows-symlinks: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)$/lifecycle
run-syft-linux-arm64: export GOARCH:=arm64 build-windows-symlinks:
run-syft-linux-arm64: @echo "> Creating phase symlinks for Windows..."
@echo "> Running syft..." ifeq ($(OS),Windows_NT)
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json call del $(OUT_DIR)$/detector.exe
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json call del $(OUT_DIR)$/analyzer.exe
call del $(OUT_DIR)$/restorer.exe
call del $(OUT_DIR)$/builder.exe
call del $(OUT_DIR)$/exporter.exe
call del $(OUT_DIR)$/rebaser.exe
call del $(OUT_DIR)$/creator.exe
call mklink $(OUT_DIR)$/detector.exe lifecycle.exe
call mklink $(OUT_DIR)$/analyzer.exe lifecycle.exe
call mklink $(OUT_DIR)$/restorer.exe lifecycle.exe
call mklink $(OUT_DIR)$/builder.exe lifecycle.exe
call mklink $(OUT_DIR)$/exporter.exe lifecycle.exe
call mklink $(OUT_DIR)$/rebaser.exe lifecycle.exe
call mklink $(OUT_DIR)$/creator.exe lifecycle.exe
else
ln -sf lifecycle.exe $(OUT_DIR)$/detector.exe
ln -sf lifecycle.exe $(OUT_DIR)$/analyzer.exe
ln -sf lifecycle.exe $(OUT_DIR)$/restorer.exe
ln -sf lifecycle.exe $(OUT_DIR)$/builder.exe
ln -sf lifecycle.exe $(OUT_DIR)$/exporter.exe
ln -sf lifecycle.exe $(OUT_DIR)$/rebaser.exe
ln -sf lifecycle.exe $(OUT_DIR)$/creator.exe
endif
run-syft-linux-ppc64le: install-syft build-darwin: build-darwin-lifecycle build-darwin-launcher
run-syft-linux-ppc64le: export GOOS:=linux
run-syft-linux-ppc64le: export GOARCH:=ppc64le
run-syft-linux-ppc64le:
@echo "> Running syft..."
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
run-syft-linux-s390x: install-syft build-darwin-lifecycle: $(BUILD_DIR)/darwin/lifecycle/lifecycle
run-syft-linux-s390x: export GOOS:=linux $(BUILD_DIR)/darwin/lifecycle/lifecycle: export GOOS:=darwin
run-syft-linux-s390x: export GOARCH:=s390x $(BUILD_DIR)/darwin/lifecycle/lifecycle: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
run-syft-linux-s390x: $(BUILD_DIR)/darwin/lifecycle/lifecycle: $(GOFILES)
@echo "> Running syft..." $(BUILD_DIR)/darwin/lifecycle/lifecycle:
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json @echo "> Building lifecycle for macos..."
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json $(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
@echo "> Creating lifecycle symlinks for macos..."
ln -sf lifecycle $(OUT_DIR)/detector
ln -sf lifecycle $(OUT_DIR)/analyzer
ln -sf lifecycle $(OUT_DIR)/restorer
ln -sf lifecycle $(OUT_DIR)/builder
ln -sf lifecycle $(OUT_DIR)/exporter
ln -sf lifecycle $(OUT_DIR)/rebaser
install-syft: build-darwin-launcher: $(BUILD_DIR)/darwin/lifecycle/launcher
@echo "> Installing syft..." $(BUILD_DIR)/darwin/lifecycle/launcher: export GOOS:=darwin
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin $(BUILD_DIR)/darwin/lifecycle/launcher: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
$(BUILD_DIR)/darwin/lifecycle/launcher: $(GOFILES)
define install-go-tool $(BUILD_DIR)/darwin/lifecycle/launcher:
@echo "> Installing $(1)..." @echo "> Building launcher for macos..."
$(GOCMD) install $(1)@$(shell $(GOCMD) list -m -f '{{.Version}}' $(2)) mkdir -p $(OUT_DIR)
endef $(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 4
install-goimports: install-goimports:
@echo "> Installing goimports..." @echo "> Installing goimports..."
$(call install-go-tool,golang.org/x/tools/cmd/goimports,golang.org/x/tools) cd tools && $(GOCMD) install golang.org/x/tools/cmd/goimports
install-yj: install-yj:
@echo "> Installing yj..." @echo "> Installing yj..."
$(call install-go-tool,github.com/sclevine/yj,github.com/sclevine/yj) cd tools && $(GOCMD) install github.com/sclevine/yj
install-mockgen: install-mockgen:
@echo "> Installing mockgen..." @echo "> Installing mockgen..."
$(call install-go-tool,github.com/golang/mock/mockgen,github.com/golang/mock) cd tools && $(GOCMD) install github.com/golang/mock/mockgen
install-golangci-lint: install-golangci-lint:
@echo "> Installing golangci-lint..." @echo "> Installing golangci-lint..."
$(call install-go-tool,github.com/golangci/golangci-lint/v2/cmd/golangci-lint,github.com/golangci/golangci-lint/v2) cd tools && $(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint
lint: install-golangci-lint lint: install-golangci-lint
@echo "> Linting code..." @echo "> Linting code..."
@ -180,65 +206,52 @@ format: install-goimports
tidy: tidy:
@echo "> Tidying modules..." @echo "> Tidying modules..."
$(GOCMD) mod tidy $(GOCMD) mod tidy
cd tools && $(GOCMD) mod tidy
test: unit acceptance test: unit acceptance
# append coverage arguments
ifeq ($(TEST_COVERAGE), 1)
unit: GOTESTFLAGS:=$(GOTESTFLAGS) -coverprofile=./out/tests/coverage-unit.txt -covermode=atomic
endif
unit: out
unit: UNIT_PACKAGES=$(shell $(GOCMD) list ./... | grep -v acceptance) unit: UNIT_PACKAGES=$(shell $(GOCMD) list ./... | grep -v acceptance)
unit: format lint tidy install-yj unit: format lint tidy install-yj
@echo "> Running unit tests..." @echo "> Running unit tests..."
$(GOTEST) $(GOTESTFLAGS) -v -count=1 $(UNIT_PACKAGES) $(GOTEST) -v -count=1 $(UNIT_PACKAGES)
out: acceptance: format lint tidy
@mkdir out || (exit 0)
mkdir out$/tests || (exit 0)
acceptance: format tidy
@echo "> Running acceptance tests..." @echo "> Running acceptance tests..."
$(GOTEST) -v -count=1 -tags=acceptance -timeout=$(ACCEPTANCE_TIMEOUT) ./acceptance/... $(GOTEST) -v -count=1 -tags=acceptance ./acceptance/...
clean: clean:
@echo "> Cleaning workspace..." @echo "> Cleaning workspace..."
rm -rf $(BUILD_DIR) rm -rf $(BUILD_DIR)
package: generate-sbom package-linux-amd64 package-linux-arm64 package-linux-ppc64le package-linux-s390x package: package-linux package-windows
package-linux-amd64: GOOS:=linux package-linux: GOOS:=linux
package-linux-amd64: GOARCH:=amd64 package-linux: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
package-linux-amd64: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle package-linux: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz
package-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz package-linux: PACKAGER=./tools/packager/main.go
package-linux-amd64: PACKAGER=./tools/packager/main.go package-linux:
package-linux-amd64: @echo "> Packaging lifecycle for $(GOOS)..."
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) $(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
package-linux-arm64: GOOS:=linux package-windows: GOOS:=windows
package-linux-arm64: GOARCH:=arm64 package-windows: INPUT_DIR:=$(BUILD_DIR)$/$(GOOS)$/lifecycle
package-linux-arm64: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle package-windows: ARCHIVE_PATH=$(BUILD_DIR)$/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz
package-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).arm64.tgz package-windows: PACKAGER=.$/tools$/packager$/main.go
package-linux-arm64: PACKAGER=./tools/packager/main.go package-windows:
package-linux-arm64: @echo "> Packaging lifecycle for $(GOOS)..."
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) $(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
package-linux-ppc64le: GOOS:=linux # Ensure workdir is clean and build image from .git
package-linux-ppc64le: GOARCH:=ppc64le docker-build-source-image-windows: $(GOFILES)
package-linux-ppc64le: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle docker-build-source-image-windows:
package-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).ppc64le.tgz $(if $(shell git status --short), @echo Uncommitted changes. Refusing to run. && exit 1)
package-linux-ppc64le: PACKAGER=./tools/packager/main.go docker build -f tools/Dockerfile.windows --tag $(SOURCE_COMPILATION_IMAGE) --build-arg image_tag=$(WINDOWS_COMPILATION_IMAGE) --cache-from=$(SOURCE_COMPILATION_IMAGE) --isolation=process --quiet .git
package-linux-ppc64le:
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..." docker-run-windows: docker-build-source-image-windows
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) docker-run-windows:
@echo "> Running '$(DOCKER_CMD)' in docker windows..."
@docker volume rm -f lifecycle-out
docker run -v lifecycle-out:c:/lifecycle/out -e LIFECYCLE_VERSION -e PLATFORM_API -e BUILDPACK_API -v gopathcache:c:/gopath -v '\\.\pipe\docker_engine:\\.\pipe\docker_engine' --isolation=process --interactive --tty --rm $(SOURCE_COMPILATION_IMAGE) $(DOCKER_CMD)
docker run -v lifecycle-out:c:/lifecycle/out --rm $(SOURCE_COMPILATION_IMAGE) tar -cf- out | tar -xf-
@docker volume rm -f lifecycle-out
package-linux-s390x: GOOS:=linux
package-linux-s390x: GOARCH:=s390x
package-linux-s390x: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
package-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).s390x.tgz
package-linux-s390x: PACKAGER=./tools/packager/main.go
package-linux-s390x:
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)

View File

@ -2,58 +2,40 @@
[![Build Status](https://github.com/buildpacks/lifecycle/workflows/build/badge.svg)](https://github.com/buildpacks/lifecycle/actions) [![Build Status](https://github.com/buildpacks/lifecycle/workflows/build/badge.svg)](https://github.com/buildpacks/lifecycle/actions)
[![GoDoc](https://godoc.org/github.com/buildpacks/lifecycle?status.svg)](https://godoc.org/github.com/buildpacks/lifecycle) [![GoDoc](https://godoc.org/github.com/buildpacks/lifecycle?status.svg)](https://godoc.org/github.com/buildpacks/lifecycle)
[![codecov](https://codecov.io/gh/buildpacks/lifecycle/branch/main/graph/badge.svg)](https://codecov.io/gh/buildpacks/lifecycle/tree/main)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4748/badge)](https://bestpractices.coreinfrastructure.org/projects/4748)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/buildpacks/lifecycle)
A reference implementation of the [Cloud Native Buildpacks specification](https://github.com/buildpacks/spec). A reference implementation of the [Cloud Native Buildpacks specification](https://github.com/buildpacks/spec).
## Supported APIs ## Supported APIs
| Lifecycle Version | Platform APIs | Buildpack APIs | Lifecycle Version | Platform APIs | Buildpack APIs |
|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| ------------------|--------------------------------------------------------|----------------|
| 0.20.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13], [0.14][p/0.14] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] | 0.11.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6]
| 0.19.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] | 0.10.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5]
| 0.18.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] | 0.9.x | [0.3][p/0.3], [0.4][p/0.4] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4]
| 0.17.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] | 0.8.x | [0.3][p/0.3] | [0.2][b/0.2]
| 0.16.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] | 0.7.x | [0.2][p/0.2] | [0.2][b/0.2]
| 0.15.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] | 0.6.x | [0.2][p/0.2] | [0.2][b/0.2]
| 0.14.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8] |
| 0.13.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7] |
[b/0.2]: https://github.com/buildpacks/spec/blob/buildpack/v0.2/buildpack.md [b/0.2]: https://github.com/buildpacks/spec/blob/buildpack/v0.2/buildpack.md
[b/0.3]: https://github.com/buildpacks/spec/tree/buildpack/v0.3/buildpack.md [b/0.3]: https://github.com/buildpacks/spec/tree/buildpack/v0.3/buildpack.md
[b/0.4]: https://github.com/buildpacks/spec/tree/buildpack/v0.4/buildpack.md [b/0.4]: https://github.com/buildpacks/spec/tree/buildpack/v0.4/buildpack.md
[b/0.5]: https://github.com/buildpacks/spec/tree/buildpack/v0.5/buildpack.md [b/0.5]: https://github.com/buildpacks/spec/tree/buildpack/v0.5/buildpack.md
[b/0.6]: https://github.com/buildpacks/spec/tree/buildpack/v0.6/buildpack.md [b/0.6]: https://github.com/buildpacks/spec/tree/buildpack/v0.6/buildpack.md
[b/0.7]: https://github.com/buildpacks/spec/tree/buildpack/v0.7/buildpack.md
[b/0.8]: https://github.com/buildpacks/spec/tree/buildpack/v0.8/buildpack.md
[b/0.9]: https://github.com/buildpacks/spec/tree/buildpack/v0.9/buildpack.md
[b/0.10]: https://github.com/buildpacks/spec/tree/buildpack/v0.10/buildpack.md
[b/0.11]: https://github.com/buildpacks/spec/tree/buildpack/v0.11/buildpack.md
[p/0.2]: https://github.com/buildpacks/spec/blob/platform/v0.2/platform.md [p/0.2]: https://github.com/buildpacks/spec/blob/platform/v0.2/platform.md
[p/0.3]: https://github.com/buildpacks/spec/blob/platform/v0.3/platform.md [p/0.3]: https://github.com/buildpacks/spec/blob/platform/v0.3/platform.md
[p/0.4]: https://github.com/buildpacks/spec/blob/platform/v0.4/platform.md [p/0.4]: https://github.com/buildpacks/spec/blob/platform/v0.4/platform.md
[p/0.5]: https://github.com/buildpacks/spec/blob/platform/v0.5/platform.md [p/0.5]: https://github.com/buildpacks/spec/blob/platform/v0.5/platform.md
[p/0.6]: https://github.com/buildpacks/spec/blob/platform/v0.6/platform.md [p/0.6]: https://github.com/buildpacks/spec/blob/platform/v0.6/platform.md
[p/0.7]: https://github.com/buildpacks/spec/blob/platform/v0.7/platform.md
[p/0.8]: https://github.com/buildpacks/spec/blob/platform/v0.8/platform.md
[p/0.9]: https://github.com/buildpacks/spec/blob/platform/v0.9/platform.md
[p/0.10]: https://github.com/buildpacks/spec/blob/platform/v0.10/platform.md
[p/0.11]: https://github.com/buildpacks/spec/blob/platform/v0.11/platform.md
[p/0.12]: https://github.com/buildpacks/spec/blob/platform/v0.12/platform.md
[p/0.13]: https://github.com/buildpacks/spec/blob/platform/v0.13/platform.md
[p/0.14]: https://github.com/buildpacks/spec/blob/platform/v0.14/platform.md
\* denotes unreleased version \* denotes unreleased version
## Usage ## Commands
### Build ### Build
Either: Either:
* `analyzer` - Reads metadata from the previous image and ensures registry access.
* `detector` - Chooses buildpacks (via `/bin/detect`) and produces a build plan. * `detector` - Chooses buildpacks (via `/bin/detect`) and produces a build plan.
* `restorer` - Restores layer metadata from the previous image and from the cache, and restores cached layers. * `analyzer` - Restores layer metadata from the previous image and from the cache.
* `restorer` - Restores cached layers.
* `builder` - Executes buildpacks (via `/bin/build`). * `builder` - Executes buildpacks (via `/bin/build`).
* `exporter` - Creates an image and caches layers. * `exporter` - Creates an image and caches layers.
@ -68,7 +50,46 @@ Or:
* `rebaser` - Creates an image from a previous image with updated base layers. * `rebaser` - Creates an image from a previous image with updated base layers.
## Contributing ## Development
- [CONTRIBUTING](CONTRIBUTING.md) - Information on how to contribute and grow your understanding of the lifecycle. To test, build, and package binaries into an archive, simply run:
- [DEVELOPMENT](DEVELOPMENT.md) - Further detail to help you during the development process.
- [RELEASE](RELEASE.md) - Further details about our release process. ```bash
$ make all
```
This will create an archive at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz`.
`LIFECYCLE_VERSION` defaults to the value returned by `git describe --tags` if not on a release branch (for more information about the release process, see [RELEASE](RELEASE.md). It can be changed by prepending `LIFECYCLE_VERSION=<some version>` to the
`make` command. For example:
```bash
$ LIFECYCLE_VERSION=1.2.3 make all
```
Steps can also be run individually as shown below.
### Test
Formats, vets, and tests the code.
```bash
$ make test
```
### Build
Builds binaries to `out/linux/lifecycle/`.
```bash
$ make build
```
> To clean the `out/` directory, run `make clean`.
### Package
Creates an archive at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz`, using the contents of the
`out/linux/lifecycle/` directory, for the given (or default) `LIFECYCLE_VERSION`.
```bash
$ make package
```

View File

@ -1,73 +1,8 @@
# Release Finalization ## Release Finalization
## Types of releases To cut a release:
1. Create a release branch in the format `release/0.99.0`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
#### New minor 1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/509/files)
* For newly supported Platform or Buildpack API versions, or breaking changes (e.g., API deprecations). 1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
1. Edit the release notes as necessary and perform any manual validation of the artifacts
#### Pre-release aka release candidate 1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:0.99.0`.
* Ideally we should ship a pre-release (waiting a few days for folks to try it out) before we ship a new minor.
* We typically don't ship pre-releases for patches or backports.
#### New patch
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
* Review the latest commits on `main` to determine if any are unacceptable for a patch - if there are commits that should be excluded, branch off the latest tag for the current minor and cherry-pick commits over.
#### Backport
* New patch for an old minor. Typically, to help folks out who haven't yet upgraded from [unsupported APIs](https://github.com/buildpacks/rfcs/blob/main/text/0110-deprecate-apis.md).
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
* Branch off the latest tag for the desired minor.
## Release Finalization Steps
### Step 1 - Prepare
Determine the type of release ([new minor](#new-minor), [pre-release](#pre-release-aka-release-candidate), [new patch](#new-patch), or [backport](#backport)) and prepare the branch accordingly.
**To prepare the release branch:**
1. Check open PRs for any dependabot updates that should be merged.
1. Create a release branch in the format `release/0.99.0-rc.1` (for pre-releases) or `release/0.99.0` (for final releases).
* New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550).
* For final releases (not pre-releases), remove the pre-release note (`*`) for the latest apis.
**For final releases (not pre-releases):**
1. Ensure the relevant spec APIs have been released.
1. Ensure the `lifecycle/0.99.0` milestone on the [docs repo](https://github.com/buildpacks/docs/blob/main/RELEASE.md#lump-changes) is complete, such that every new feature in the lifecycle is fully explained in the `release/lifecycle/0.99` branch on the docs repo, and [migration guides](https://github.com/buildpacks/docs/tree/main/content/docs/reference/spec/migration) (if relevant) are included.
### Step 2 - Publish the Release
1. Manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/<release version>`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
1. Edit the release notes as necessary.
1. Perform any manual validation of the artifacts as necessary (usually none).
1. Edit the release page and click "Publish release".
* This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:<release version>`.
* For final releases ONLY, this will also re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:latest`.
### Step 3 - Follow-up
**For pre-releases:**
* Ask the relevant teams to try out the pre-released artifacts.
**For final releases:**
* Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`.
* Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo.
## Go version updates
Go version updates should be released as a [new minor](#new-minor) or [new patch](#new-patch) release.
### New Patch
If the go patch is in [actions/go-versions](https://github.com/actions/go-versions/pulls?q=is%3Apr+is%3Aclosed) then CI should pull it in automatically without any action needed.
We simply need to create the release branch and let the pipeline run.
### New Minor
We typically do this when the existing patch version exceeds 6 - e.g., `1.22.6`. This means we have about 6 months to upgrade before the current minor becomes unsupported due to the introduction of the new n+2 minor.
#### Steps
1. Update go.mod
1. Search for the old `major.minor`, there are a few files that need to be updated (example PR: https://github.com/buildpacks/lifecycle/pull/1405/files)
1. Update the linter to a version that supports the current `major.minor`
1. Fix any lint errors as necessary

View File

@ -1,7 +1,7 @@
package acceptance package acceptance
import ( import (
"fmt" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -27,18 +27,17 @@ var (
func TestVersion(t *testing.T) { func TestVersion(t *testing.T) {
var err error var err error
buildDir, err = os.MkdirTemp("", "lifecycle-acceptance") buildDir, err = ioutil.TempDir("", "lifecycle-acceptance")
h.AssertNil(t, err) h.AssertNil(t, err)
defer func() { defer func() {
h.AssertNil(t, os.RemoveAll(buildDir)) h.AssertNil(t, os.RemoveAll(buildDir))
}() }()
outDir := filepath.Join(buildDir, fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH), "lifecycle") outDir := filepath.Join(buildDir, runtime.GOOS, "lifecycle")
h.AssertNil(t, os.MkdirAll(outDir, 0755)) h.AssertNil(t, os.MkdirAll(outDir, 0755))
h.MakeAndCopyLifecycle(t, h.MakeAndCopyLifecycle(t,
runtime.GOOS, runtime.GOOS,
runtime.GOARCH,
outDir, outDir,
"LIFECYCLE_VERSION=some-version", "LIFECYCLE_VERSION=some-version",
"SCM_COMMIT="+expectedCommit, "SCM_COMMIT="+expectedCommit,
@ -141,7 +140,6 @@ func testVersion(t *testing.T, when spec.G, it spec.S) {
w(tc.description, func() { w(tc.description, func() {
it("only prints the version", func() { it("only prints the version", func() {
cmd := lifecycleCmd(tc.command, tc.args...) cmd := lifecycleCmd(tc.command, tc.args...)
cmd.Env = []string{fmt.Sprintf("CNB_PLATFORM_API=%s", api.Platform.Latest().String())}
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err != nil { if err != nil {
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err) t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
@ -155,5 +153,5 @@ func testVersion(t *testing.T, when spec.G, it spec.S) {
} }
func lifecycleCmd(phase string, args ...string) *exec.Cmd { func lifecycleCmd(phase string, args ...string) *exec.Cmd {
return exec.Command(filepath.Join(buildDir, fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH), "lifecycle", phase), args...) // #nosec G204 return exec.Command(filepath.Join(buildDir, runtime.GOOS, "lifecycle", phase), args...) // #nosec G204
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,535 +0,0 @@
package acceptance
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/platform/files"
h "github.com/buildpacks/lifecycle/testhelpers"
)
var (
builderDockerContext = filepath.Join("testdata", "builder")
builderBinaryDir = filepath.Join("testdata", "builder", "container", "cnb", "lifecycle")
builderImage = "lifecycle/acceptance/builder"
builderDaemonOS, builderDaemonArch string
)
func TestBuilder(t *testing.T) {
info, err := h.DockerCli(t).Info(context.TODO())
h.AssertNil(t, err)
// These variables are clones of the variables in analyzer_test.go.
// You can find the same variables there without `builder` prefix.
// These lines are added for supporting windows tests.
builderDaemonOS = info.OSType
builderDaemonArch = info.Architecture
if builderDaemonArch == "x86_64" {
builderDaemonArch = "amd64"
} else if builderDaemonArch == "aarch64" {
builderDaemonArch = "arm64"
}
h.MakeAndCopyLifecycle(t, builderDaemonOS, builderDaemonArch, builderBinaryDir)
h.DockerBuild(t,
builderImage,
builderDockerContext,
h.WithArgs("--build-arg", fmt.Sprintf("cnb_platform_api=%s", api.Platform.Latest())),
h.WithFlags(
"-f", filepath.Join(builderDockerContext, dockerfileName),
),
)
defer h.DockerImageRemove(t, builderImage)
spec.Run(t, "acceptance-builder", testBuilder, spec.Parallel(), spec.Report(report.Terminal{}))
}
func testBuilder(t *testing.T, when spec.G, it spec.S) {
var copyDir, containerName, cacheVolume string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
if h.DockerVolumeExists(t, cacheVolume) {
h.DockerVolumeRemove(t, cacheVolume)
}
os.RemoveAll(copyDir)
})
// .../cmd/lifecycle/builder.go#Args
when("called with arguments", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
builderImage,
"some-arg",
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to parse arguments: received unexpected arguments"
h.AssertStringContains(t, string(output), expected)
})
})
// .../cmd/lifecycle/builder.go#Privileges
when("running as a root", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--user",
"root",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to build: refusing to run as root"
h.AssertStringContains(t, string(output), expected)
})
})
when("correct and full group.toml and plan.toml", func() {
it("succeeds", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
),
)
// check builder metadata.toml for success test
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
})
})
when("writing metadata.toml", func() {
it("writes and reads successfully", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
),
)
// check builder metadata.toml for success test
contents, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
// prevent regression of inline table serialization
h.AssertStringDoesNotContain(t, contents, "processes =")
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
h.AssertEq(t, len(md.Processes), 1)
h.AssertEq(t, md.Processes[0].Type, "hello")
h.AssertEq(t, len(md.Processes[0].Command.Entries), 1)
h.AssertEq(t, md.Processes[0].Command.Entries[0], "echo world")
h.AssertEq(t, len(md.Processes[0].Args), 1)
h.AssertEq(t, md.Processes[0].Args[0], "arg1")
h.AssertEq(t, md.Processes[0].Direct, true)
h.AssertEq(t, md.Processes[0].WorkingDirectory, "")
h.AssertEq(t, md.Processes[0].Default, false)
})
when("the platform < 0.10", func() {
it("writes and reads successfully", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API=0.9",
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
),
)
// check builder metadata.toml for success test
contents, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
// prevent regression of inline table serialization
h.AssertStringDoesNotContain(t, contents, "processes =")
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
h.AssertEq(t, len(md.Processes), 1)
h.AssertEq(t, md.Processes[0].Type, "hello")
h.AssertEq(t, len(md.Processes[0].Command.Entries), 1)
h.AssertEq(t, md.Processes[0].Command.Entries[0], "echo world")
h.AssertEq(t, len(md.Processes[0].Args), 1)
h.AssertEq(t, md.Processes[0].Args[0], "arg1")
h.AssertEq(t, md.Processes[0].Direct, true)
h.AssertEq(t, md.Processes[0].WorkingDirectory, "")
h.AssertEq(t, md.Processes[0].Default, false)
})
})
})
when("-group contains extensions", func() {
it("includes the provided extensions in <layers>/config/metadata.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/group_with_ext.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
),
)
// check builder metadata.toml for success test
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
h.AssertStringContains(t, md.Extensions[0].API, "0.10")
h.AssertStringContains(t, md.Extensions[0].ID, "hello_world")
h.AssertStringContains(t, md.Extensions[0].Version, "0.0.1")
})
})
when("invalid input files", func() {
// .../cmd/lifecycle/builder.go#readData
when("group.toml", func() {
when("not found", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to read group file: open /layers/group.toml: no such file or directory"
h.AssertStringContains(t, string(output), expected)
})
})
when("empty", func() {
it("succeeds", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/empty_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
),
)
// check builder metadata.toml for success test
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
h.AssertEq(t, len(md.Processes), 0)
})
})
when("invalid", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/wrong_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to read group file: toml: line 1: expected '.' or '=', but got 'a' instead"
h.AssertStringContains(t, string(output), expected)
})
})
// .../cmd/lifecycle/builder.go#Exec
when("invalid builpack api", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/invalid_buildpack_api_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "parse buildpack API '<nil>' for buildpack 'hello_world@0.0.1'"
h.AssertStringContains(t, string(output), expected)
})
})
})
// .../cmd/lifecycle/builder.go#readData
when("plan.toml", func() {
when("not found", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to read plan file: open /layers/plan.toml: no such file or directory"
h.AssertStringContains(t, string(output), expected)
})
})
when("empty", func() {
it("succeeds", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/empty_plan.toml",
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
),
)
// check builder metadata.toml for success test
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
})
})
when("invalid", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/wrong_plan.toml",
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to read plan file: toml: line 1: expected '.' or '=', but got 'a' instead"
h.AssertStringContains(t, string(output), expected)
})
})
})
})
when("determining the location of input files", func() {
// .../cmd/lifecycle/builder.go#Args
when("group.toml path is not specified", func() {
it("will look for group.toml in the provided layers directory", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_LAYERS_DIR=/layers/different_layer_dir_from_env",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan_buildpack_2.toml",
),
)
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers/different_layer_dir_from_env/config/metadata.toml"))
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world_2")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.2")
})
})
// .../cmd/lifecycle/builder.go#Args
when("plan.toml path is not specified", func() {
it("will look for plan.toml in the provided layers directory", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
ctrPath("/layers"),
builderImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_LAYERS_DIR=/layers/different_layer_dir_from_env",
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group_buildpack2.toml",
),
)
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers/different_layer_dir_from_env/config/metadata.toml"))
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world_2")
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.2")
})
})
})
when("CNB_APP_DIR is set", func() {
it("sets the buildpacks' working directory to CNB_APP_DIR", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
"--env", "CNB_APP_DIR=/env_folders/different_cnb_app_dir_from_env",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNil(t, err)
expected := "CNB_APP_DIR: /env_folders/different_cnb_app_dir_from_env"
h.AssertStringContains(t, string(output), expected)
})
})
when("CNB_BUILDPACKS_DIR is set", func() {
it("uses buildpacks from CNB_BUILDPACKS_DIR", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
"--env", "CNB_BUILDPACKS_DIR=/env_folders/different_buildpack_dir_from_env",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNil(t, err)
expected := "CNB_BUILDPACK_DIR: /env_folders/different_buildpack_dir_from_env"
h.AssertStringContains(t, string(output), expected)
})
})
when("CNB_LAYERS_DIR is set", func() {
it("CNB_LAYERS_DIR is a parent of the buildpack layers dir", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
"--env", "CNB_LAYERS_DIR=/layers/different_layer_dir_from_env",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNil(t, err)
expected := "LAYERS_DIR: /layers/different_layer_dir_from_env/hello_world"
h.AssertStringContains(t, string(output), expected)
})
})
when("CNB_PLAN_PATH is set", func() {
it("provides the buildpack a filtered version of the plan found at CNB_PLAN_PATH", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/different_plan_from_env.toml",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNil(t, err)
expected := "name = \"different_plan_from_env.toml_reqires_subset_content\""
h.AssertStringContains(t, string(output), expected)
})
})
when("CNB_PLATFORM_DIR is set", func() {
it("CNB_PLATFORM_DIR is successfully transmitted to build script", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
"--env", "CNB_PLATFORM_DIR=/env_folders/different_platform_dir_from_env",
builderImage,
)
output, err := command.CombinedOutput()
h.AssertNil(t, err)
expected := "PLATFORM_DIR: /env_folders/different_platform_dir_from_env"
h.AssertStringContains(t, string(output), expected)
})
})
when("It runs", func() {
it("sets CNB_TARGET_* vars", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_LAYERS_DIR=/layers/03_layer",
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan_buildpack_3.toml",
builderImage,
)
output, err := command.CombinedOutput()
fmt.Println(string(output))
h.AssertNil(t, err)
h.AssertStringContains(t, string(output), "CNB_TARGET_ARCH: amd64")
h.AssertStringContains(t, string(output), "CNB_TARGET_ARCH_VARIANT: some-variant")
h.AssertStringContains(t, string(output), "CNB_TARGET_OS: linux")
h.AssertStringContains(t, string(output), "CNB_TARGET_DISTRO_NAME: ubuntu")
h.AssertStringContains(t, string(output), "CNB_TARGET_DISTRO_VERSION: some-cute-version")
})
})
}
func getBuilderMetadata(t *testing.T, path string) (string, *files.BuildMetadata) {
t.Helper()
contents, _ := os.ReadFile(path)
h.AssertEq(t, len(contents) > 0, true)
buildMD, err := files.Handler.ReadBuildMetadata(path, api.MustParse(latestPlatformAPI))
h.AssertNil(t, err)
return string(contents), buildMD
}

View File

@ -1,445 +0,0 @@
//go:build acceptance
package acceptance
import (
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/buildpacks/lifecycle/internal/path"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api"
h "github.com/buildpacks/lifecycle/testhelpers"
)
var (
createImage string
createRegAuthConfig string
createRegNetwork string
creatorPath string
createDaemonFixtures *daemonImageFixtures
createRegFixtures *regImageFixtures
createTest *PhaseTest
)
func TestCreator(t *testing.T) {
testImageDockerContext := filepath.Join("testdata", "creator")
createTest = NewPhaseTest(t, "creator", testImageDockerContext)
createTest.Start(t)
defer createTest.Stop(t)
createImage = createTest.testImageRef
creatorPath = createTest.containerBinaryPath
createRegAuthConfig = createTest.targetRegistry.authConfig
createRegNetwork = createTest.targetRegistry.network
createDaemonFixtures = createTest.targetDaemon.fixtures
createRegFixtures = createTest.targetRegistry.fixtures
for _, platformAPI := range api.Platform.Supported {
spec.Run(t, "acceptance-creator/"+platformAPI.String(), testCreatorFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
}
}
func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) {
var createdImageName string
when("called with run", func() {
it("uses the provided run.toml path", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept -run")
cmd := exec.Command(
"docker", "run", "--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
createImage,
ctrPath(creatorPath),
"-run", "/cnb/run.toml",
createRegFixtures.SomeAppImage,
) // #nosec G204
output, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to resolve inputs: failed to find accessible run image"
h.AssertStringContains(t, string(output), expected)
})
})
when("detected order contains extensions", func() {
it("errors", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
cmd := exec.Command(
"docker", "run", "--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
createImage,
ctrPath(creatorPath),
"-log-level", "debug",
"-order", "/cnb/order-with-extensions.toml",
"-run-image", createRegFixtures.ReadOnlyRunImage,
createRegFixtures.SomeAppImage,
) // #nosec G204
output, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
expected := "detected order contains extensions which is not supported by the creator"
h.AssertStringContains(t, string(output), expected)
})
})
when("daemon case", func() {
it.After(func() {
h.DockerImageRemove(t, createdImageName)
})
it("creates app", func() {
createFlags := []string{"-daemon"}
createFlags = append(createFlags, []string{"-run-image", createRegFixtures.ReadOnlyRunImage}...)
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
createdImageName = "some-created-image-" + h.RandString(10)
createArgs = append(createArgs, createdImageName)
output := h.DockerRun(t,
createImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
)...),
h.WithArgs(createArgs...),
)
h.AssertStringContains(t, output, "Saving "+createdImageName)
assertImageOSAndArch(t, createdImageName, createTest)
output = h.DockerRun(t,
createdImageName,
h.WithFlags(
"--entrypoint", "/cnb/lifecycle/launcher",
),
h.WithArgs("env"),
)
h.AssertStringContains(t, output, "SOME_VAR=some-val") // set by buildpack
})
})
when("registry case", func() {
it.After(func() {
h.DockerImageRemove(t, createdImageName)
})
it("creates app", func() {
var createFlags []string
createFlags = append(createFlags, []string{"-run-image", createRegFixtures.ReadOnlyRunImage}...)
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
createdImageName = createTest.RegRepoName("some-created-image-" + h.RandString(10))
createArgs = append(createArgs, createdImageName)
output := h.DockerRun(t,
createImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
),
h.WithArgs(createArgs...),
)
h.AssertStringContains(t, output, "Saving "+createdImageName)
h.Run(t, exec.Command("docker", "pull", createdImageName))
assertImageOSAndArch(t, createdImageName, createTest)
output = h.DockerRun(t,
createdImageName,
h.WithFlags(
"--entrypoint", "/cnb/lifecycle/launcher",
),
h.WithArgs("env"),
)
h.AssertStringContains(t, output, "SOME_VAR=some-val") // set by buildpack
})
})
when("multiple builds", func() {
var (
container1 string
container2 string
container3 string
container4 string
dirBuild1 string
dirBuild2 string
dirCache string
dirLaunchCache string
dirRun1 string
dirRun2 string
imageName string
)
it.Before(func() {
// assign container names
for _, cPtr := range []*string{&container1, &container2, &container3, &container4} {
*cPtr = "test-container-" + h.RandString(10)
}
// create temp dirs
for _, dirPtr := range []*string{&dirCache, &dirLaunchCache, &dirBuild1, &dirRun1, &dirBuild2, &dirRun2} {
dir, err := os.MkdirTemp("", "creator-acceptance")
h.AssertNil(t, err)
h.AssertNil(t, os.Chmod(dir, 0777)) // Override umask
// Resolve temp dir so it can be properly mounted by the Docker daemon.
*dirPtr, err = filepath.EvalSymlinks(dir)
h.AssertNil(t, err)
}
// assign image name
imageName = "some-created-image-" + h.RandString(10)
})
it.After(func() {
// remove containers if needed
for _, container := range []string{container1, container2, container3, container4} {
if h.DockerContainerExists(t, container) {
h.Run(t, exec.Command("docker", "rm", container))
}
}
// remove temp dirs
for _, dir := range []string{dirCache, dirLaunchCache, dirBuild1, dirRun1, dirBuild2, dirRun2} {
_ = os.RemoveAll(dir)
}
// remove image
h.DockerImageRemove(t, imageName)
})
when("multiple builds", func() {
var (
createFlags []string
createArgs []string
duration1, duration2 time.Duration
)
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.8"), "Platform API < 0.8 does not support standardized SBOM")
createFlags = []string{"-daemon"}
createFlags = append(createFlags, []string{
"-run-image", createRegFixtures.ReadOnlyRunImage,
"-cache-dir", ctrPath("/cache"),
"-launch-cache", ctrPath("/launch-cache"),
"-log-level", "debug",
}...)
createArgs = append([]string{ctrPath(creatorPath)}, createFlags...)
createArgs = append(createArgs, imageName)
startTime := time.Now()
// first build
output := h.DockerRunAndCopy(t,
container1,
dirBuild1,
ctrPath("/layers"),
createImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
"--volume", dirCache+":"+ctrPath("/cache"),
"--volume", dirLaunchCache+":"+ctrPath("/launch-cache"),
)...),
h.WithArgs(createArgs...),
)
duration1 = time.Now().Sub(startTime)
t.Logf("First build duration: %s", duration1)
h.AssertStringDoesNotContain(t, output, "restored with content")
h.AssertPathExists(t, filepath.Join(dirBuild1, "layers", "sbom", "build", "samples_hello-world", "sbom.cdx.json"))
h.AssertPathExists(t, filepath.Join(dirBuild1, "layers", "sbom", "build", "samples_hello-world", "some-build-layer", "sbom.cdx.json"))
// first run
output = h.DockerRunAndCopy(t,
container2,
dirRun1,
ctrPath("/layers"),
imageName,
h.WithFlags(
"--entrypoint", "/cnb/lifecycle/launcher",
),
h.WithArgs("env"),
)
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "sbom.cdx.json"))
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-launch-cache-layer", "sbom.cdx.json"))
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-layer", "sbom.cdx.json"))
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "build"))
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "cache"))
})
when("rebuild with cache", func() {
it("exports SBOM in the app image", func() {
startTime := time.Now()
// second build
output := h.DockerRunAndCopy(t,
container3,
dirBuild2,
ctrPath("/layers"),
createImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
"--volume", dirCache+":/cache",
"--volume", dirLaunchCache+":"+ctrPath("/launch-cache"),
)...),
h.WithArgs(createArgs...),
)
// check that launch cache was used
duration2 = time.Now().Sub(startTime)
t.Logf("Second build duration: %s", duration2)
if duration2+time.Duration(0.1*float64(time.Second)) >= duration1 {
t.Logf("Second build output: %s", output)
t.Fatalf("Expected second build to complete 0.1s faster than first build; first build took %s, second build took %s", duration1, duration2)
}
h.AssertStringContains(t, output, "some-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-bom-content\"}")
h.AssertStringContains(t, output, "some-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-cache-true-bom-content\"}")
h.AssertStringContains(t, output, "some-launch-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-cache-true-bom-content\"}")
h.AssertStringContains(t, output, "Reusing layer 'buildpacksio/lifecycle:launch.sbom'")
h.AssertPathExists(t, filepath.Join(dirBuild2, "layers", "sbom", "build", "samples_hello-world", "sbom.cdx.json"))
h.AssertPathExists(t, filepath.Join(dirBuild2, "layers", "sbom", "build", "samples_hello-world", "some-build-layer", "sbom.cdx.json"))
t.Log("restores store.toml")
h.AssertStringContains(t, output, "store.toml restored with content: [metadata]")
// second run
output = h.DockerRunAndCopy(t,
container4,
dirRun2,
ctrPath("/layers"),
imageName,
h.WithFlags(
"--entrypoint", "/cnb/lifecycle/launcher",
),
h.WithArgs("env"),
)
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "sbom.cdx.json"))
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-launch-cache-layer", "sbom.cdx.json"))
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-layer", "sbom.cdx.json"))
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "build"))
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "cache"))
})
})
when("rebuild with clear cache", func() {
it("exports SBOM in the app image", func() {
createArgs = append([]string{ctrPath(creatorPath)}, append(createFlags, "-skip-restore")...)
createArgs = append(createArgs, imageName)
// second build
output := h.DockerRunAndCopy(t,
container3,
dirBuild2,
ctrPath("/layers"),
createImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
"--network", createRegNetwork,
"--volume", dirCache+":/cache",
"--volume", dirLaunchCache+":"+ctrPath("/launch-cache"),
)...),
h.WithArgs(createArgs...),
)
h.AssertStringDoesNotContain(t, output, "some-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-bom-content\"}")
h.AssertStringDoesNotContain(t, output, "some-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-cache-true-bom-content\"}")
h.AssertStringDoesNotContain(t, output, "some-launch-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-cache-true-bom-content\"}")
// check that store.toml was restored
if api.MustParse(platformAPI).AtLeast("0.10") {
h.AssertStringContains(t, output, "store.toml restored with content: [metadata]")
} else {
h.AssertStringDoesNotContain(t, output, "store.toml restored with content")
}
})
})
})
})
when("layout case", func() {
var (
containerName string
err error
layoutDir string
tmpDir string
)
when("experimental mode is enabled", func() {
it.Before(func() {
// creates the directory to save all the OCI images on disk
tmpDir, err = os.MkdirTemp("", "layout")
h.AssertNil(t, err)
containerName = "test-container-" + h.RandString(10)
layoutDir = filepath.Join(path.RootDir, "layout-repo")
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
h.DockerImageRemove(t, createdImageName)
// removes all images created
os.RemoveAll(tmpDir)
})
it("creates app", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
var createFlags []string
createFlags = append(createFlags, []string{"-layout", "-layout-dir", layoutDir, "-run-image", "busybox"}...)
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
createdImageName = "some-created-image-" + h.RandString(10)
createArgs = append(createArgs, createdImageName)
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, createImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_EXPERIMENTAL_MODE=warn",
),
h.WithArgs(createArgs...))
h.AssertStringContains(t, output, "Saving /layout-repo/index.docker.io/library/"+createdImageName+"/latest")
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", createdImageName+"/latest"))
h.AssertEq(t, len(index.Manifests), 1)
})
})
when("experimental mode is not enabled", func() {
it("errors", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
cmd := exec.Command(
"docker", "run", "--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
createImage,
ctrPath(creatorPath),
"-layout",
"-layout-dir", layoutDir,
"-run-image", "busybox",
"some-image",
) // #nosec G204
output, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
h.AssertStringContains(t, string(output), expected)
})
})
})
}
}

View File

@ -1,46 +1,41 @@
//go:build acceptance // +build acceptance
package acceptance package acceptance
import ( import (
"context"
"fmt" "fmt"
"io/ioutil"
"math/rand"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"time"
"github.com/BurntSushi/toml"
"github.com/sclevine/spec" "github.com/sclevine/spec"
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api" "github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/cmd" "github.com/buildpacks/lifecycle/buildpack"
"github.com/buildpacks/lifecycle/platform/files" "github.com/buildpacks/lifecycle/platform"
h "github.com/buildpacks/lifecycle/testhelpers" h "github.com/buildpacks/lifecycle/testhelpers"
) )
var ( var (
detectDockerContext = filepath.Join("testdata", "detector") detectDockerContext = filepath.Join("testdata", "detector")
detectorBinaryDir = filepath.Join("testdata", "detector", "container", "cnb", "lifecycle") detectorBinaryDir = filepath.Join("testdata", "detector", "container", "cnb", "lifecycle")
detectImage = "lifecycle/acceptance/detector" detectImage = "lifecycle/acceptance/detector"
userID = "1234" userID = "1234"
detectorDaemonOS, detectorDaemonArch string
) )
func TestDetector(t *testing.T) { func TestDetector(t *testing.T) {
info, err := h.DockerCli(t).Info(context.TODO()) h.SkipIf(t, runtime.GOOS == "windows", "Detector acceptance tests are not yet supported on Windows")
h.AssertNil(t, err)
detectorDaemonOS = info.OSType rand.Seed(time.Now().UTC().UnixNano())
detectorDaemonArch = info.Architecture
if detectorDaemonArch == "x86_64" {
detectorDaemonArch = "amd64"
}
if detectorDaemonArch == "aarch64" {
detectorDaemonArch = "arm64"
}
h.MakeAndCopyLifecycle(t, detectorDaemonOS, detectorDaemonArch, detectorBinaryDir) h.MakeAndCopyLifecycle(t, "linux", detectorBinaryDir)
h.DockerBuild(t, h.DockerBuild(t,
detectImage, detectImage,
detectDockerContext, detectDockerContext,
@ -48,77 +43,412 @@ func TestDetector(t *testing.T) {
) )
defer h.DockerImageRemove(t, detectImage) defer h.DockerImageRemove(t, detectImage)
for _, platformAPI := range api.Platform.Supported { spec.Run(t, "acceptance-detector", testDetector, spec.Parallel(), spec.Report(report.Terminal{}))
if platformAPI.LessThan("0.12") {
continue
}
spec.Run(t, "acceptance-detector/"+platformAPI.String(), testDetectorFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
}
} }
func testDetectorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) { func testDetector(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) { when("called with arguments", func() {
when("called with arguments", func() { it("errors", func() {
it("errors", func() { command := exec.Command(
command := exec.Command( "docker",
"docker", "run",
"run", "--rm",
"--rm", "--env", "CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "CNB_PLATFORM_API="+platformAPI, detectImage,
"some-arg",
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to parse arguments: received unexpected arguments"
h.AssertStringContains(t, string(output), expected)
})
})
when("running as a root", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--user",
"root",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
detectImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to build: refusing to run as root"
h.AssertStringContains(t, string(output), expected)
})
})
when("read buildpack order file failed", func() {
it("errors", func() {
// no order.toml file in the default search locations
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
detectImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to read buildpack order file"
h.AssertStringContains(t, string(output), expected)
})
})
when("no buildpack group passed detection", func() {
it("errors", func() {
command := exec.Command(
"docker",
"run",
"--rm",
"--env", "CNB_ORDER_PATH=/cnb/orders/empty_order.toml",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
detectImage,
)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
failErr, ok := err.(*exec.ExitError)
if !ok {
t.Fatalf("expected an error of type exec.ExitError")
}
h.AssertEq(t, failErr.ExitCode(), 20) // platform code for cmd.FailedDetect
expected := "No buildpack groups passed detection."
h.AssertStringContains(t, string(output), expected)
})
})
when("there is a buildpack group that pass detection", func() {
var copyDir, containerName string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = ioutil.TempDir("", "test-docker-copy-")
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
it("writes group.toml and plan.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
detectImage,
"/layers",
h.WithFlags("--user", userID,
"--env", "CNB_ORDER_PATH=/cnb/orders/simple_order.toml",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
),
h.WithArgs(),
)
// check group.toml
tempGroupToml := filepath.Join(copyDir, "layers", "group.toml")
var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
// check plan.toml
tempPlanToml := filepath.Join(copyDir, "layers", "plan.toml")
var buildPlan platform.BuildPlan
_, err = toml.DecodeFile(tempPlanToml, &buildPlan)
h.AssertNil(t, err)
h.AssertEq(t, buildPlan.Entries[0].Providers[0].ID, "simple_buildpack")
h.AssertEq(t, buildPlan.Entries[0].Providers[0].Version, "simple_buildpack_version")
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Name, "some_requirement")
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["some_metadata_key"], "some_metadata_val")
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["version"], "some_version")
})
})
when("environment variables are provided for buildpack and app directories and for the output files", func() {
var copyDir, containerName string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = ioutil.TempDir("", "test-docker-copy-")
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
it("writes group.toml and plan.toml in the right location and with the right names", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
detectImage,
"/layers",
h.WithFlags("--user", userID,
"--env", "CNB_ORDER_PATH=/cnb/orders/always_detect_order.toml",
"--env", "CNB_BUILDPACKS_DIR=/cnb/custom_buildpacks",
"--env", "CNB_APP_DIR=/custom_workspace",
"--env", "CNB_GROUP_PATH=./custom_group.toml",
"--env", "CNB_PLAN_PATH=./custom_plan.toml",
"--env", "CNB_PLATFORM_DIR=/custom_platform",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
),
h.WithArgs("-log-level=debug"),
)
// check group.toml
tempGroupToml := filepath.Join(copyDir, "layers", "custom_group.toml")
var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "always_detect_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "always_detect_buildpack_version")
// check plan.toml - should be empty since we're using always_detect_order.toml so there is no "actual plan"
tempPlanToml := filepath.Join(copyDir, "layers", "custom_plan.toml")
planContents, err := ioutil.ReadFile(tempPlanToml)
h.AssertNil(t, err)
h.AssertEq(t, len(planContents) == 0, true)
// check platform directory
logs := h.Run(t, exec.Command("docker", "logs", containerName))
expectedPlatformPath := "platform_path: /custom_platform"
expectedAppDir := "app_dir: /custom_workspace"
h.AssertStringContains(t, logs, expectedPlatformPath)
h.AssertStringContains(t, logs, expectedAppDir)
})
})
when("-order is provided", func() {
var copyDir, containerName, expectedOrderTOMLPath string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = ioutil.TempDir("", "test-docker-copy-")
h.AssertNil(t, err)
simpleOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "simple_order.toml")
expectedOrderTOMLPath, err = filepath.Abs(simpleOrderTOML)
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
when("the order.toml exists", func() {
it("processes the provided order.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
detectImage, detectImage,
"some-arg", "/layers",
h.WithFlags("--user", userID,
"--volume", expectedOrderTOMLPath+":/custom/order.toml",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
),
h.WithArgs(
"-log-level=debug",
"-order=/custom/order.toml",
),
) )
// check group.toml
tempGroupToml := filepath.Join(copyDir, "layers", "group.toml")
var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
})
})
when("the order.toml does not exist", func() {
it("errors", func() {
command := exec.Command("docker", "run",
"--user", userID,
"--rm",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
detectImage,
"-order=/custom/order.toml")
output, err := command.CombinedOutput() output, err := command.CombinedOutput()
h.AssertNotNil(t, err) h.AssertNotNil(t, err)
expected := "failed to parse arguments: received unexpected arguments" expected := "failed to read buildpack order file: open /custom/order.toml: no such file or directory"
h.AssertStringContains(t, string(output), expected) h.AssertStringContains(t, string(output), expected)
}) })
}) })
when("running as a root", func() { })
it("errors", func() {
command := exec.Command( when("-order is not provided", func() {
"docker", var copyDir, containerName, expectedOrderTOMLPath, otherOrderTOMLPath string
"run",
"--rm", it.Before(func() {
"--user", containerName = "test-container-" + h.RandString(10)
"root", var err error
"--env", "CNB_PLATFORM_API="+platformAPI, copyDir, err = ioutil.TempDir("", "test-docker-copy-")
h.AssertNil(t, err)
simpleOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "simple_order.toml")
expectedOrderTOMLPath, err = filepath.Abs(simpleOrderTOML)
h.AssertNil(t, err)
alwaysDetectOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "always_detect_order.toml")
otherOrderTOMLPath, err = filepath.Abs(alwaysDetectOrderTOML)
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
when("/cnb/order.toml and /layers/order.toml are present", func() {
it("prefers /layers/order.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
detectImage, detectImage,
"/layers",
h.WithFlags("--user", userID,
"--volume", expectedOrderTOMLPath+":/layers/order.toml",
"--volume", otherOrderTOMLPath+":/cnb/order.toml",
"--env", "CNB_ORDER_PATH=",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
),
h.WithArgs("-log-level=debug"),
) )
output, err := command.CombinedOutput()
h.AssertNotNil(t, err) // check group.toml
expected := "failed to detect: refusing to run as root" tempGroupToml := filepath.Join(copyDir, "layers", "group.toml")
h.AssertStringContains(t, string(output), expected) var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
}) })
}) })
when("read buildpack order file failed", func() { when("only /cnb/order.toml is present", func() {
it("errors", func() { it("processes /cnb/order.toml", func() {
// no order.toml file in the default search locations h.DockerRunAndCopy(t,
command := exec.Command( containerName,
"docker", copyDir,
"run",
"--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
detectImage, detectImage,
"/layers",
h.WithFlags("--user", userID,
"--volume", expectedOrderTOMLPath+":/cnb/order.toml",
"--env", "CNB_ORDER_PATH=",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
),
h.WithArgs("-log-level=debug"),
) )
output, err := command.CombinedOutput()
h.AssertNotNil(t, err) // check group.toml
expected := "failed to initialize detector: reading order" tempGroupToml := filepath.Join(copyDir, "layers", "group.toml")
h.AssertStringContains(t, string(output), expected) var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
}) })
}) })
when("only /layers/order.toml is present", func() {
it("processes /layers/order.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
detectImage,
"/layers",
h.WithFlags("--user", userID,
"--volume", expectedOrderTOMLPath+":/layers/order.toml",
"--env", "CNB_ORDER_PATH=",
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
),
h.WithArgs("-log-level=debug"),
)
// check group.toml
tempGroupToml := filepath.Join(copyDir, "layers", "group.toml")
var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
})
})
when("platform api < 0.6", func() {
when("/cnb/order.toml and /layers/order.toml are present", func() {
it("only processes /cnb/order.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
detectImage,
"/layers",
h.WithFlags("--user", userID,
"--volume", expectedOrderTOMLPath+":/cnb/order.toml",
"--volume", otherOrderTOMLPath+":/layers/order.toml",
"--env", "CNB_PLATFORM_API=0.5",
"--env", "CNB_ORDER_PATH=",
),
h.WithArgs("-log-level=debug"),
)
// check group.toml
tempGroupToml := filepath.Join(copyDir, "layers", "group.toml")
var buildpackGroup buildpack.Group
_, err := toml.DecodeFile(tempGroupToml, &buildpackGroup)
h.AssertNil(t, err)
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
})
})
when("only /layers/order.toml is present", func() {
it("errors", func() {
command := exec.Command("docker", "run",
"--user", userID,
"--volume", otherOrderTOMLPath+":/layers/order.toml",
"--env", "CNB_PLATFORM_API=0.5",
"--env", "CNB_ORDER_PATH=",
"--rm", detectImage)
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to read buildpack order file: open /cnb/order.toml: no such file or directory"
h.AssertStringContains(t, string(output), expected)
})
})
})
})
when("platform api < 0.6", func() {
when("no buildpack group passed detection", func() { when("no buildpack group passed detection", func() {
it("errors and exits with the expected code", func() { it("errors", func() {
command := exec.Command( command := exec.Command(
"docker", "docker",
"run", "run",
"--rm", "--rm",
"--env", "CNB_ORDER_PATH=/cnb/orders/fail_detect_order.toml", "--env", "CNB_ORDER_PATH=/cnb/orders/empty_order.toml",
"--env", "CNB_PLATFORM_API="+platformAPI, "--env", "CNB_PLATFORM_API=0.5",
detectImage, detectImage,
) )
output, err := command.CombinedOutput() output, err := command.CombinedOutput()
@ -127,296 +457,10 @@ func testDetectorFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
if !ok { if !ok {
t.Fatalf("expected an error of type exec.ExitError") t.Fatalf("expected an error of type exec.ExitError")
} }
h.AssertEq(t, failErr.ExitCode(), 20) // platform code for failed detect h.AssertEq(t, failErr.ExitCode(), 100) // platform code for cmd.FailedDetect
expected := "No buildpack groups passed detection."
expected1 := `======== Output: fail_detect_buildpack@some_version ======== h.AssertStringContains(t, string(output), expected)
Opted out of detection
======== Results ========
fail: fail_detect_buildpack@some_version`
h.AssertStringContains(t, string(output), expected1)
expected2 := "No buildpack groups passed detection."
h.AssertStringContains(t, string(output), expected2)
}) })
}) })
})
when("there is a buildpack group that passes detection", func() {
var copyDir, containerName string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
it("writes group.toml and plan.toml at the default locations", func() {
output := h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
detectImage,
h.WithFlags("--user", userID,
"--env", "CNB_ORDER_PATH=/cnb/orders/simple_order.toml",
"--env", "CNB_PLATFORM_API="+platformAPI,
),
h.WithArgs(),
)
// check group.toml
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
group, err := files.Handler.ReadGroup(foundGroupTOML)
h.AssertNil(t, err)
h.AssertEq(t, group.Group[0].ID, "simple_buildpack")
h.AssertEq(t, group.Group[0].Version, "simple_buildpack_version")
// check plan.toml
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
buildPlan, err := files.Handler.ReadPlan(foundPlanTOML)
h.AssertNil(t, err)
h.AssertEq(t, buildPlan.Entries[0].Providers[0].ID, "simple_buildpack")
h.AssertEq(t, buildPlan.Entries[0].Providers[0].Version, "simple_buildpack_version")
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Name, "some_requirement")
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["some_metadata_key"], "some_metadata_val")
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["version"], "some_version")
// check output
h.AssertStringContains(t, output, "simple_buildpack simple_buildpack_version")
h.AssertStringDoesNotContain(t, output, "======== Results ========") // log output is info level as detect passed
})
})
when("environment variables are provided for buildpack and app directories and for the output files", func() {
var copyDir, containerName string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
it("writes group.toml and plan.toml in the right locations and with the right names", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
detectImage,
h.WithFlags("--user", userID,
"--env", "CNB_ORDER_PATH=/cnb/orders/always_detect_order.toml",
"--env", "CNB_BUILDPACKS_DIR=/cnb/custom_buildpacks",
"--env", "CNB_APP_DIR=/custom_workspace",
"--env", "CNB_GROUP_PATH=./custom_group.toml",
"--env", "CNB_PLAN_PATH=./custom_plan.toml",
"--env", "CNB_PLATFORM_DIR=/custom_platform",
"--env", "CNB_PLATFORM_API="+platformAPI,
),
h.WithArgs("-log-level=debug"),
)
// check group.toml
foundGroupTOML := filepath.Join(copyDir, "layers", "custom_group.toml")
group, err := files.Handler.ReadGroup(foundGroupTOML)
h.AssertNil(t, err)
h.AssertEq(t, group.Group[0].ID, "always_detect_buildpack")
h.AssertEq(t, group.Group[0].Version, "always_detect_buildpack_version")
// check plan.toml - should be empty since we're using always_detect_order.toml so there is no "actual plan"
tempPlanToml := filepath.Join(copyDir, "layers", "custom_plan.toml")
planContents, err := os.ReadFile(tempPlanToml)
h.AssertNil(t, err)
h.AssertEq(t, len(planContents) == 0, true)
// check platform directory
logs := h.Run(t, exec.Command("docker", "logs", containerName))
expectedPlatformPath := "platform_path: /custom_platform"
expectedAppDir := "app_dir: /custom_workspace"
h.AssertStringContains(t, logs, expectedPlatformPath)
h.AssertStringContains(t, logs, expectedAppDir)
})
})
when("-order is provided", func() {
var copyDir, containerName, expectedOrderTOMLPath string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
h.AssertNil(t, err)
simpleOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "simple_order.toml")
expectedOrderTOMLPath, err = filepath.Abs(simpleOrderTOML)
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
when("the order.toml exists", func() {
it("processes the provided order.toml", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
detectImage,
h.WithFlags("--user", userID,
"--volume", expectedOrderTOMLPath+":/custom/order.toml",
"--env", "CNB_PLATFORM_API="+platformAPI,
),
h.WithArgs(
"-log-level=debug",
"-order=/custom/order.toml",
),
)
// check group.toml
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
group, err := files.Handler.ReadGroup(foundGroupTOML)
h.AssertNil(t, err)
h.AssertEq(t, group.Group[0].ID, "simple_buildpack")
h.AssertEq(t, group.Group[0].Version, "simple_buildpack_version")
})
})
when("the order.toml does not exist", func() {
it("errors", func() {
command := exec.Command("docker", "run",
"--user", userID,
"--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
detectImage,
"-order=/custom/order.toml")
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
expected := "failed to initialize detector: reading order: failed to read order file: open /custom/order.toml: no such file or directory"
h.AssertStringContains(t, string(output), expected)
})
})
when("the order.toml contains a buildpack using an unsupported api", func() {
it("errors", func() {
command := exec.Command("docker", "run",
"--user", userID,
"--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
detectImage,
"-order=/cnb/orders/bad_api.toml")
output, err := command.CombinedOutput()
h.AssertNotNil(t, err)
failErr, ok := err.(*exec.ExitError)
if !ok {
t.Fatalf("expected an error of type exec.ExitError")
}
h.AssertEq(t, failErr.ExitCode(), 12) // platform code for buildpack api error
expected := "buildpack API version '0.1' is incompatible with the lifecycle"
h.AssertStringContains(t, string(output), expected)
})
})
})
when("-order contains extensions", func() {
var containerName, copyDir, orderPath string
it.Before(func() {
containerName = "test-container-" + h.RandString(10)
var err error
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
h.AssertNil(t, err)
orderPath, err = filepath.Abs(filepath.Join("testdata", "detector", "container", "cnb", "orders", "order_with_ext.toml"))
h.AssertNil(t, err)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
it("processes the provided order.toml", func() {
experimentalMode := "warn"
if api.MustParse(platformAPI).AtLeast("0.13") {
experimentalMode = "error"
}
output := h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
detectImage,
h.WithFlags(
"--user", userID,
"--volume", orderPath+":/layers/order.toml",
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
),
h.WithArgs(
"-analyzed=/layers/analyzed.toml",
"-extensions=/cnb/extensions",
"-generated=/layers/generated",
"-log-level=debug",
"-run=/layers/run.toml", // /cnb/run.toml is the default location of run.toml
),
)
t.Log("runs /bin/detect for buildpacks and extensions")
if api.MustParse(platformAPI).LessThan("0.13") {
h.AssertStringContains(t, output, "Platform requested experimental feature 'Dockerfiles'")
}
h.AssertStringContains(t, output, "FOO=val-from-build-config")
h.AssertStringContains(t, output, "simple_extension: output from /bin/detect")
t.Log("writes group.toml")
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
group, err := files.Handler.ReadGroup(foundGroupTOML)
h.AssertNil(t, err)
h.AssertEq(t, group.GroupExtensions[0].ID, "simple_extension")
h.AssertEq(t, group.GroupExtensions[0].Version, "simple_extension_version")
h.AssertEq(t, group.Group[0].ID, "buildpack_for_ext")
h.AssertEq(t, group.Group[0].Version, "buildpack_for_ext_version")
h.AssertEq(t, group.Group[0].Extension, false)
t.Log("writes plan.toml")
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
buildPlan, err := files.Handler.ReadPlan(foundPlanTOML)
h.AssertNil(t, err)
h.AssertEq(t, len(buildPlan.Entries), 0) // this shows that the plan was filtered to remove `requires` provided by extensions
t.Log("runs /bin/generate for extensions")
h.AssertStringContains(t, output, "simple_extension: output from /bin/generate")
var dockerfilePath string
if api.MustParse(platformAPI).LessThan("0.13") {
t.Log("copies the generated Dockerfiles to the output directory")
dockerfilePath = filepath.Join(copyDir, "layers", "generated", "run", "simple_extension", "Dockerfile")
} else {
dockerfilePath = filepath.Join(copyDir, "layers", "generated", "simple_extension", "run.Dockerfile")
}
h.AssertPathExists(t, dockerfilePath)
contents, err := os.ReadFile(dockerfilePath)
h.AssertEq(t, string(contents), "FROM some-run-image-from-extension\n")
t.Log("records the new run image in analyzed.toml")
foundAnalyzedTOML := filepath.Join(copyDir, "layers", "analyzed.toml")
analyzedMD, err := files.Handler.ReadAnalyzed(foundAnalyzedTOML, cmd.DefaultLogger)
h.AssertNil(t, err)
h.AssertEq(t, analyzedMD.RunImage.Image, "some-run-image-from-extension")
})
})
}
} }

View File

@ -1,677 +0,0 @@
//go:build acceptance
package acceptance
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/buildpacks/imgutil"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/auth"
"github.com/buildpacks/lifecycle/cache"
"github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/internal/fsutil"
"github.com/buildpacks/lifecycle/internal/path"
"github.com/buildpacks/lifecycle/platform/files"
h "github.com/buildpacks/lifecycle/testhelpers"
)
var (
exportImage string
exportRegAuthConfig string
exportRegNetwork string
exporterPath string
exportDaemonFixtures *daemonImageFixtures
exportRegFixtures *regImageFixtures
exportTest *PhaseTest
)
func TestExporter(t *testing.T) {
testImageDockerContext := filepath.Join("testdata", "exporter")
exportTest = NewPhaseTest(t, "exporter", testImageDockerContext)
exportTest.Start(t, updateTOMLFixturesWithTestRegistry)
defer exportTest.Stop(t)
exportImage = exportTest.testImageRef
exporterPath = exportTest.containerBinaryPath
exportRegAuthConfig = exportTest.targetRegistry.authConfig
exportRegNetwork = exportTest.targetRegistry.network
exportDaemonFixtures = exportTest.targetDaemon.fixtures
exportRegFixtures = exportTest.targetRegistry.fixtures
for _, platformAPI := range api.Platform.Supported {
spec.Run(t, "acceptance-exporter/"+platformAPI.String(), testExporterFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
}
}
func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) {
when("daemon case", func() {
var exportedImageName string
it.After(func() {
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
})
it("app is created", func() {
exportFlags := []string{"-daemon", "-log-level", "debug"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = "some-exported-image-" + h.RandString(10)
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
)...),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
if api.MustParse(platformAPI).AtLeast("0.11") {
extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"}
for _, extension := range extensions {
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension)))
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension)))
}
} else {
h.AssertStringDoesNotContain(t, output, "Copying SBOM")
}
if api.MustParse(platformAPI).AtLeast("0.12") {
expectedHistory := []string{
"Buildpacks Launcher Config",
"Buildpacks Application Launcher",
"Application Layer",
"Software Bill-of-Materials",
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
"", // run image layer
}
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
} else {
assertDaemonImageDoesNotHaveHistory(t, exportedImageName)
}
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
})
when("using extensions", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
})
it("app is created from the extended run image", func() {
exportFlags := []string{
"-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
"-daemon",
"-extended", "/layers/some-extended-dir",
"-log-level", "debug",
"-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = "some-exported-image-" + h.RandString(10)
exportArgs = append(exportArgs, exportedImageName)
// get run image top layer
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
h.AssertNil(t, err)
layers := inspect.RootFS.Layers
runImageFixtureTopLayerSHA := layers[len(layers)-1]
runImageFixtureSHA := inspect.ID
experimentalMode := "warn"
if api.MustParse(platformAPI).AtLeast("0.13") {
experimentalMode = "error"
}
output := h.DockerRun(t,
exportImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
"--env", "CNB_PLATFORM_API="+platformAPI,
)...),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
expectedHistory := []string{
"Buildpacks Launcher Config",
"Buildpacks Application Launcher",
"Application Layer",
"Software Bill-of-Materials",
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
"Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension",
"Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension",
"", // run image layer
}
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
t.Log("bases the exported image on the extended run image")
inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
h.AssertNil(t, err)
h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
t.Log("Adds extension layers")
type testCase struct {
expectedDiffID string
layerIndex int
}
testCases := []testCase{
{
expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps
layerIndex: 1,
},
{
expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps
layerIndex: 2,
},
}
for _, tc := range testCases {
h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID)
}
t.Log("sets the layers metadata label according to the new spec")
var lmd files.LayersMetadata
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA)
h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:"))
})
})
when("SOURCE_DATE_EPOCH is set", func() {
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
exportFlags := []string{"-daemon"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = "some-exported-image-" + h.RandString(10)
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
"--network", exportRegNetwork,
)...),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
})
})
})
when("registry case", func() {
var exportedImageName string
it.After(func() {
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
})
it("app is created", func() {
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
})
when("registry is insecure", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
})
it("uses http protocol", func() {
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
insecureRegistry := "host.docker.internal/bar"
insecureAnalyzed := "/layers/analyzed_insecure.toml"
_, _, err := h.DockerRunWithError(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
"--env", "CNB_ANALYZED_PATH="+insecureAnalyzed,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
})
})
when("SOURCE_DATE_EPOCH is set", func() {
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
})
})
// FIXME: move this out of the registry block
when("cache", func() {
when("image case", func() {
it("cache is created", func() {
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
// To detect whether the export of cacheImage and exportedImage is successful
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
h.Run(t, exec.Command("docker", "pull", cacheImageName))
})
when("parallel export is enabled", func() {
it("cache is created", func() {
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName, "-parallel"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
h.Run(t, exec.Command("docker", "pull", cacheImageName))
})
})
when("cache is provided but no data was cached", func() {
it("cache is created with an empty layer", func() {
cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
testEmptyLayerSHA := calculateEmptyLayerSha(t)
// Retrieve the cache image from the ephemeral registry
h.Run(t, exec.Command("docker", "pull", cacheImageName))
logger := cmd.DefaultLogger
subject, err := cache.NewImageCacheFromName(cacheImageName, authn.DefaultKeychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, api.MustParse(platformAPI).LessThan("0.13")))
h.AssertNil(t, err)
//Assert the cache image was created with an empty layer
layer, err := subject.RetrieveLayer(testEmptyLayerSHA)
h.AssertNil(t, err)
defer layer.Close()
})
})
})
when("directory case", func() {
when("original cache was corrupted", func() {
var cacheDir string
it.Before(func() {
var err error
cacheDir, err = os.MkdirTemp("", "cache")
h.AssertNil(t, err)
h.AssertNil(t, os.Chmod(cacheDir, 0777)) // Override umask
cacheFixtureDir := filepath.Join("testdata", "exporter", "cache-dir")
h.AssertNil(t, fsutil.Copy(cacheFixtureDir, cacheDir))
// We have to pre-create the tar files so that their digests do not change due to timestamps
// But, ':' in the filepath on Windows is not allowed
h.AssertNil(t, os.Rename(
filepath.Join(cacheDir, "committed", "sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
))
})
it.After(func() {
_ = os.RemoveAll(cacheDir)
})
it("overwrites the original layer", func() {
exportFlags := []string{
"-cache-dir", "/cache",
"-log-level", "debug",
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
"--volume", fmt.Sprintf("%s:/cache", cacheDir),
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Skipping reuse for layer corrupted_buildpack:corrupted-layer: expected layer contents to have SHA 'sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59'; found 'sha256:9e0b77ed599eafdab8611f7eeefef084077f91f02f1da0a3870c7ff20a08bee8'")
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
defer h.Run(t, exec.Command("docker", "image", "rm", exportedImageName))
// Verify the app has the correct sha for the layer
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
h.AssertNil(t, err)
var lmd files.LayersMetadata
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
h.AssertEq(t, lmd.Buildpacks[2].Layers["corrupted-layer"].SHA, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
// Verify the cache has correct contents now
foundDiffID, err := func() (string, error) {
layerPath := filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar")
layerRC, err := os.Open(layerPath)
if err != nil {
return "", err
}
defer func() {
_ = layerRC.Close()
}()
hasher := sha256.New()
if _, err = io.Copy(hasher, layerRC); err != nil {
return "", errors.Wrap(err, "hashing layer")
}
foundDiffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
return foundDiffID, nil
}()
h.AssertNil(t, err)
h.AssertEq(t, foundDiffID, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
})
})
})
})
when("using extensions", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
})
it("app is created from the extended run image", func() {
exportFlags := []string{
"-analyzed", "/layers/run-image-extended-analyzed.toml",
"-extended", "/layers/some-extended-dir",
"-log-level", "debug",
"-run", "/cnb/run.toml",
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
// get run image SHA & top layer
ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
h.AssertNil(t, err)
remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth))
h.AssertNil(t, err)
layers, err := remoteImage.Layers()
h.AssertNil(t, err)
runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID()
h.AssertNil(t, err)
runImageFixtureSHA, err := remoteImage.Digest()
h.AssertNil(t, err)
experimentalMode := "warn"
if api.MustParse(platformAPI).AtLeast("0.13") {
experimentalMode = "error"
}
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
t.Log("bases the exported image on the extended run image")
ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName)
h.AssertNil(t, err)
remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth))
h.AssertNil(t, err)
configFile, err := remoteImage.ConfigFile()
h.AssertNil(t, err)
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
t.Log("Adds extension layers")
layers, err = remoteImage.Layers()
h.AssertNil(t, err)
type testCase struct {
expectedDigest string
layerIndex int
}
testCases := []testCase{
{
expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing
layerIndex: 1,
},
{
expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing
layerIndex: 2,
},
}
for _, tc := range testCases {
layer := layers[tc.layerIndex]
digest, err := layer.Digest()
h.AssertNil(t, err)
h.AssertEq(t, digest.String(), tc.expectedDigest)
}
t.Log("sets the layers metadata label according to the new spec")
var lmd files.LayersMetadata
lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"]
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String())
h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String()))
})
})
})
when("layout case", func() {
var (
containerName string
err error
layoutDir string
tmpDir string
exportedImageName string
)
when("experimental mode is enabled", func() {
it.Before(func() {
// create the directory to save all OCI images on disk
tmpDir, err = os.MkdirTemp("", "layout")
h.AssertNil(t, err)
containerName = "test-container-" + h.RandString(10)
})
it.After(func() {
if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
// removes all images created
os.RemoveAll(tmpDir)
})
when("using a custom layout directory", func() {
it.Before(func() {
exportedImageName = "my-custom-layout-app"
layoutDir = filepath.Join(path.RootDir, "my-layout-dir")
})
it("app is created", func() {
var exportFlags []string
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...)
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage,
h.WithFlags(
"--env", "CNB_EXPERIMENTAL_MODE=warn",
"--env", "CNB_PLATFORM_API="+platformAPI,
),
h.WithArgs(exportArgs...))
h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest")
// assert the image was saved on disk in OCI layout format
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest"))
h.AssertEq(t, len(index.Manifests), 1)
})
})
})
when("experimental mode is not enabled", func() {
it.Before(func() {
layoutDir = filepath.Join(path.RootDir, "layout-dir")
})
it("errors", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
cmd := exec.Command(
"docker", "run", "--rm",
"--env", "CNB_PLATFORM_API="+platformAPI,
exportImage,
ctrPath(exporterPath),
"-layout",
"-layout-dir", layoutDir,
"some-image",
) // #nosec G204
output, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
h.AssertStringContains(t, string(output), expected)
})
})
})
}
}
func assertDaemonImageDoesNotHaveHistory(t *testing.T, repoName string) {
history, err := h.DockerCli(t).ImageHistory(context.TODO(), repoName)
h.AssertNil(t, err)
for _, hs := range history {
h.AssertEq(t, hs.Created, imgutil.NormalizedDateTime.Unix())
h.AssertEq(t, hs.CreatedBy, "")
}
}
func assertDaemonImageHasHistory(t *testing.T, repoName string, expectedHistory []string) {
history, err := h.DockerCli(t).ImageHistory(context.TODO(), repoName)
h.AssertNil(t, err)
h.AssertEq(t, len(history), len(expectedHistory))
for idx, hs := range history {
h.AssertEq(t, hs.Created, imgutil.NormalizedDateTime.Unix())
h.AssertEq(t, hs.CreatedBy, expectedHistory[idx])
}
}
func calculateEmptyLayerSha(t *testing.T) string {
tmpDir, err := os.MkdirTemp("", "")
h.AssertNil(t, err)
testLayerEmptyPath := filepath.Join(tmpDir, "empty.tar")
h.AssertNil(t, os.WriteFile(testLayerEmptyPath, []byte{}, 0600))
return "sha256:" + h.ComputeSHA256ForFile(t, testLayerEmptyPath)
}

View File

@ -1,288 +0,0 @@
//go:build acceptance
package acceptance
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/buildpacks/imgutil/layout/sparse"
"github.com/google/go-containerregistry/pkg/authn"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/auth"
"github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/platform/files"
h "github.com/buildpacks/lifecycle/testhelpers"
)
var (
extendImage string
extendRegAuthConfig string
extendRegNetwork string
extenderPath string
extendDaemonFixtures *daemonImageFixtures
extendRegFixtures *regImageFixtures
extendTest *PhaseTest
)
const (
// Log message emitted by kaniko;
// if we provide cache directory as an option, kaniko looks there for the base image as a tarball;
// however the base image is in OCI layout format, so we fail to initialize the base image;
// we manage to provide the base image because we override image.RetrieveRemoteImage,
// but the log message could be confusing to end users, hence we check that it is not printed.
msgErrRetrievingImageFromCache = "Error while retrieving image from cache"
)
func TestExtender(t *testing.T) {
testImageDockerContext := filepath.Join("testdata", "extender")
extendTest = NewPhaseTest(t, "extender", testImageDockerContext)
extendTest.Start(t)
defer extendTest.Stop(t)
extendImage = extendTest.testImageRef
extenderPath = extendTest.containerBinaryPath
extendRegAuthConfig = extendTest.targetRegistry.authConfig
extendRegNetwork = extendTest.targetRegistry.network
extendDaemonFixtures = extendTest.targetDaemon.fixtures
extendRegFixtures = extendTest.targetRegistry.fixtures
for _, platformAPI := range api.Platform.Supported {
if platformAPI.LessThan("0.10") {
continue
}
spec.Run(t, "acceptance-extender/"+platformAPI.String(), testExtenderFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
}
}
func testExtenderFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) {
var generatedDir = "/layers/generated"
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
if api.MustParse(platformAPI).AtLeast("0.13") {
generatedDir = "/layers/generated-with-contexts"
}
})
when("kaniko case", func() {
var extendedDir, kanikoDir, analyzedPath string
it.Before(func() {
var err error
extendedDir, err = os.MkdirTemp("", "lifecycle-acceptance")
h.AssertNil(t, err)
kanikoDir, err = os.MkdirTemp("", "lifecycle-acceptance")
h.AssertNil(t, err)
// push base image to test registry
h.Run(t, exec.Command("docker", "tag", extendImage, extendTest.RegRepoName(extendImage)))
h.AssertNil(t, h.PushImage(h.DockerCli(t), extendTest.RegRepoName(extendImage), extendTest.targetRegistry.registry.EncodedLabeledAuth()))
// mimic what the restorer would have done in the previous phase:
// warm kaniko cache
// get remote image
os.Setenv("DOCKER_CONFIG", extendTest.targetRegistry.dockerConfigDir)
ref, auth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, extendTest.RegRepoName(extendImage))
h.AssertNil(t, err)
remoteImage, err := remote.Image(ref, remote.WithAuth(auth))
h.AssertNil(t, err)
baseImageHash, err := remoteImage.Digest()
h.AssertNil(t, err)
baseImageDigest := baseImageHash.String()
baseCacheDir := filepath.Join(kanikoDir, "cache", "base")
h.AssertNil(t, os.MkdirAll(baseCacheDir, 0755))
// write sparse image
layoutImage, err := sparse.NewImage(filepath.Join(baseCacheDir, baseImageDigest), remoteImage)
h.AssertNil(t, err)
h.AssertNil(t, layoutImage.Save())
// write image reference in analyzed.toml
analyzedMD := files.Analyzed{
BuildImage: &files.ImageIdentifier{
Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), baseImageDigest),
},
RunImage: &files.RunImage{
Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), baseImageDigest),
Extend: true,
},
}
analyzedPath = h.TempFile(t, "", "analyzed.toml")
h.AssertNil(t, files.Handler.WriteAnalyzed(analyzedPath, &analyzedMD, cmd.DefaultLogger))
})
it.After(func() {
_ = os.RemoveAll(kanikoDir)
_ = os.RemoveAll(extendedDir)
})
when("extending the build image", func() {
it("succeeds", func() {
extendArgs := []string{
ctrPath(extenderPath),
"-analyzed", "/layers/analyzed.toml",
"-generated", generatedDir,
"-log-level", "debug",
"-gid", "1000",
"-uid", "1234",
}
extendFlags := []string{
"--env", "CNB_PLATFORM_API=" + platformAPI,
"--volume", fmt.Sprintf("%s:/layers/analyzed.toml", analyzedPath),
"--volume", fmt.Sprintf("%s:/kaniko", kanikoDir),
}
t.Log("first build extends the build image by running Dockerfile commands")
firstOutput := h.DockerRunWithCombinedOutput(t,
extendImage,
h.WithFlags(extendFlags...),
h.WithArgs(extendArgs...),
)
h.AssertStringDoesNotContain(t, firstOutput, msgErrRetrievingImageFromCache)
h.AssertStringContains(t, firstOutput, "ca-certificates")
h.AssertStringContains(t, firstOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl was installed on the build image
t.Log("sets environment variables from the extended build image in the build context")
h.AssertStringContains(t, firstOutput, "CNB_STACK_ID for buildpack: stack-id-from-ext-tree")
h.AssertStringContains(t, firstOutput, "HOME for buildpack: /home/cnb")
t.Log("cleans the kaniko directory")
fis, err := os.ReadDir(kanikoDir)
h.AssertNil(t, err)
h.AssertEq(t, len(fis), 1) // 1: /kaniko/cache
t.Log("second build extends the build image by pulling from the cache directory")
secondOutput := h.DockerRunWithCombinedOutput(t,
extendImage,
h.WithFlags(extendFlags...),
h.WithArgs(extendArgs...),
)
h.AssertStringDoesNotContain(t, secondOutput, msgErrRetrievingImageFromCache)
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that first cache layer was used
h.AssertStringDoesNotContain(t, secondOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree") // shows that second cache layer was used
h.AssertStringContains(t, secondOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl is still installed in the unpacked cached layer
})
})
when("extending the run image", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not support run image extension")
})
it("succeeds", func() {
extendArgs := []string{
ctrPath(extenderPath),
"-analyzed", "/layers/analyzed.toml",
"-extended", "/layers/extended",
"-generated", generatedDir,
"-kind", "run",
"-log-level", "debug",
"-gid", "1000",
"-uid", "1234",
}
extendFlags := []string{
"--env", "CNB_PLATFORM_API=" + platformAPI,
"--volume", fmt.Sprintf("%s:/layers/analyzed.toml", analyzedPath),
"--volume", fmt.Sprintf("%s:/layers/extended", extendedDir),
"--volume", fmt.Sprintf("%s:/kaniko", kanikoDir),
}
t.Log("first build extends the run image by running Dockerfile commands")
firstOutput := h.DockerRunWithCombinedOutput(t,
extendImage,
h.WithFlags(extendFlags...),
h.WithArgs(extendArgs...),
)
h.AssertStringDoesNotContain(t, firstOutput, msgErrRetrievingImageFromCache)
h.AssertStringContains(t, firstOutput, "ca-certificates")
h.AssertStringContains(t, firstOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree")
t.Log("does not run the build phase")
h.AssertStringDoesNotContain(t, firstOutput, "Hello Extensions buildpack\ncurl")
t.Log("outputs extended image layers to the extended directory")
images, err := os.ReadDir(filepath.Join(extendedDir, "run"))
h.AssertNil(t, err)
h.AssertEq(t, len(images), 1) // sha256:<extended image digest>
assertExpectedImage(t, filepath.Join(extendedDir, "run", images[0].Name()), platformAPI)
t.Log("cleans the kaniko directory")
caches, err := os.ReadDir(kanikoDir)
h.AssertNil(t, err)
h.AssertEq(t, len(caches), 1) // 1: /kaniko/cache
t.Log("second build extends the build image by pulling from the cache directory")
secondOutput := h.DockerRunWithCombinedOutput(t,
extendImage,
h.WithFlags(extendFlags...),
h.WithArgs(extendArgs...),
)
h.AssertStringDoesNotContain(t, secondOutput, msgErrRetrievingImageFromCache)
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that first cache layer was used
h.AssertStringDoesNotContain(t, secondOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree") // shows that second cache layer was used
t.Log("does not run the build phase")
h.AssertStringDoesNotContain(t, secondOutput, "Hello Extensions buildpack\ncurl")
t.Log("outputs extended image layers to the extended directory")
images, err = os.ReadDir(filepath.Join(extendedDir, "run"))
h.AssertNil(t, err)
h.AssertEq(t, len(images), 1) // sha256:<first extended image digest>
assertExpectedImage(t, filepath.Join(extendedDir, "run", images[0].Name()), platformAPI)
t.Log("cleans the kaniko directory")
caches, err = os.ReadDir(kanikoDir)
h.AssertNil(t, err)
h.AssertEq(t, len(caches), 1) // 1: /kaniko/cache
})
})
})
}
}
func assertExpectedImage(t *testing.T, imagePath, platformAPI string) {
image, err := readOCI(imagePath)
h.AssertNil(t, err)
configFile, err := image.ConfigFile()
h.AssertNil(t, err)
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false")
layers, err := image.Layers()
h.AssertNil(t, err)
history := configFile.History
h.AssertEq(t, len(history), len(configFile.RootFS.DiffIDs))
if api.MustParse(platformAPI).AtLeast("0.13") {
h.AssertEq(t, len(layers), 7) // base (3), curl (2), tree (2)
h.AssertEq(t, history[3].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y curl', Created by extension: curl")
h.AssertEq(t, history[4].CreatedBy, "Layer: 'COPY run-file /', Created by extension: curl")
h.AssertEq(t, history[5].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y tree', Created by extension: tree")
h.AssertEq(t, history[6].CreatedBy, "Layer: 'COPY shared-file /shared-run', Created by extension: tree")
} else {
h.AssertEq(t, len(layers), 5) // base (3), curl (1), tree (1)
h.AssertEq(t, history[3].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y curl', Created by extension: curl")
h.AssertEq(t, history[4].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y tree', Created by extension: tree")
}
}
func readOCI(fromPath string) (v1.Image, error) {
layoutPath, err := layout.FromPath(fromPath)
if err != nil {
return nil, fmt.Errorf("getting layout from path: %w", err)
}
hash, err := v1.NewHash(filepath.Base(fromPath))
if err != nil {
return nil, fmt.Errorf("getting hash from reference '%s': %w", fromPath, err)
}
v1Image, err := layoutPath.Image(hash)
if err != nil {
return nil, fmt.Errorf("getting image from hash '%s': %w", hash.String(), err)
}
return v1Image, nil
}

View File

@ -1,9 +1,11 @@
package acceptance package acceptance
import ( import (
"context"
"fmt" "fmt"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"testing" "testing"
@ -14,224 +16,330 @@ import (
) )
var ( var (
launchImage string launchImage = "lifecycle/acceptance/launcher"
launcherPath string launchDockerContext string
launchTest *PhaseTest launcherBinaryDir string
) )
func TestLauncher(t *testing.T) { func TestLauncher(t *testing.T) {
testImageDockerContext := filepath.Join("testdata", "launcher") info, err := h.DockerCli(t).Info(context.TODO())
launchTest = NewPhaseTest(t, "launcher", testImageDockerContext, withoutDaemonFixtures, withoutRegistry) h.AssertNil(t, err)
daemonOS = info.OSType
containerBinaryDir := filepath.Join("testdata", "launcher", "linux", "container", "cnb", "lifecycle") launchDockerContext = filepath.Join("testdata", "launcher")
withCustomContainerBinaryDir := func(_ *testing.T, phaseTest *PhaseTest) { if daemonOS == "windows" {
phaseTest.containerBinaryDir = containerBinaryDir launcherBinaryDir = filepath.Join("testdata", "launcher", "windows", "container", "cnb", "lifecycle")
} else {
launcherBinaryDir = filepath.Join("testdata", "launcher", "linux", "container", "cnb", "lifecycle")
} }
launchTest.Start(t, withCustomContainerBinaryDir)
defer launchTest.Stop(t)
launchImage = launchTest.testImageRef h.MakeAndCopyLauncher(t, daemonOS, launcherBinaryDir)
launcherPath = launchTest.containerBinaryPath
h.DockerBuild(t, launchImage, launchDockerContext, h.WithFlags("-f", filepath.Join(launchDockerContext, dockerfileName)))
defer h.DockerImageRemove(t, launchImage)
spec.Run(t, "acceptance", testLauncher, spec.Parallel(), spec.Report(report.Terminal{})) spec.Run(t, "acceptance", testLauncher, spec.Parallel(), spec.Report(report.Terminal{}))
} }
func testLauncher(t *testing.T, when spec.G, it spec.S) { func testLauncher(t *testing.T, when spec.G, it spec.S) {
when("exec.d", func() { when("Buildpack API >= 0.5", func() {
it("executes the binaries and modifies env before running profiles", func() { when("exec.d", func() {
cmd := exec.Command("docker", "run", "--rm", //nolint it("executes the binaries and modifies env before running profiles", func() {
"--env=CNB_PLATFORM_API=0.7", cmd := exec.Command("docker", "run", "--rm",
"--entrypoint=exec.d-checker"+exe, "--env=VAR_FROM_EXEC_D=orig-val",
"--env=VAR_FROM_EXEC_D=orig-val", launchImage, "exec.d-checker")
launchImage)
helper := "helper" + exe helper := "helper" + exe
execDHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d", helper) execDHelper := filepath.Join(rootDir, "layers", execDBpDir, "some_layer", "exec.d", helper)
execDCheckerHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d/exec.d-checker", helper) execDCheckerHelper := filepath.Join(rootDir, "layers", execDBpDir, "some_layer", "exec.d", "exec.d-checker", helper)
workDir := ctrPath("/workspace") workDir := filepath.Join(rootDir, "workspace")
expected := fmt.Sprintf("%s was executed\n", execDHelper) expected := fmt.Sprintf("%s was executed\n", execDHelper)
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir) expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
expected += fmt.Sprintf("%s was executed\n", execDCheckerHelper) expected += fmt.Sprintf("%s was executed\n", execDCheckerHelper)
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir) expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
expected += "sourced bp profile\n" expected += "sourced bp profile\n"
expected += "sourced app profile\n" expected += "sourced app profile\n"
expected += "VAR_FROM_EXEC_D: orig-val:val-from-exec.d:val-from-exec.d-for-process-type-exec.d-checker" expected += "VAR_FROM_EXEC_D: orig-val:val-from-exec.d:val-from-exec.d-for-process-type-exec.d-checker"
assertOutput(t, cmd, expected) assertOutput(t, cmd, expected)
})
}) })
}) })
when("entrypoint is a process", func() { when("Platform API >= 0.4", func() {
it("launches that process", func() { when("entrypoint is a process", func() {
cmd := exec.Command("docker", "run", "--rm", //nolint
"--entrypoint=web",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage)
assertOutput(t, cmd, "Executing web process-type")
})
when("process contains a period", func() {
it("launches that process", func() { it("launches that process", func() {
cmd := exec.Command("docker", "run", "--rm", cmd := exec.Command("docker", "run", "--rm",
"--entrypoint=process.with.period"+exe, "--entrypoint=web",
"--env=CNB_PLATFORM_API="+latestPlatformAPI, "--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage) launchImage)
assertOutput(t, cmd, "Executing process.with.period process-type") assertOutput(t, cmd, "Executing web process-type")
})
it("appends any args to the process args", func() {
cmd := exec.Command("docker", "run", "--rm",
"--entrypoint=web",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "with user provided args")
if runtime.GOOS == "windows" {
assertOutput(t, cmd, `Executing web process-type "with user provided args"`)
} else {
assertOutput(t, cmd, "Executing web process-type with user provided args")
}
}) })
}) })
it("appends any args to the process args", func() { when("entrypoint is a not a process", func() {
cmd := exec.Command( //nolint it("builds a process from the arguments", func() {
"docker", "run", "--rm", cmd := exec.Command("docker", "run", "--rm",
"--entrypoint=web", "--entrypoint=launcher",
"--env=CNB_PLATFORM_API="+latestPlatformAPI, "--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "with user provided args", launchImage, "--", "env")
) if runtime.GOOS == "windows" {
assertOutput(t, cmd, "Executing web process-type with user provided args") cmd = exec.Command("docker", "run", "--rm",
`--entrypoint=launcher`,
"--env=CNB_PLATFORM_API=0.4",
launchImage, "--", "cmd", "/c", "set",
)
}
assertOutput(t, cmd,
"SOME_VAR=some-bp-val",
"OTHER_VAR=other-bp-val",
)
})
})
when("CNB_PROCESS_TYPE is set", func() {
it("should warn", func() {
cmd := exec.Command("docker", "run", "--rm",
"--env=CNB_PROCESS_TYPE=direct-process",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
"--env=CNB_NO_COLOR=true",
launchImage,
)
out, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
h.AssertStringContains(t, string(out), "Warning: CNB_PROCESS_TYPE is not supported in Platform API "+latestPlatformAPI)
h.AssertStringContains(t, string(out), `Warning: Run with ENTRYPOINT 'direct-process' to invoke the 'direct-process' process type`)
h.AssertStringContains(t, string(out), "ERROR: failed to launch: determine start command: when there is no default process a command is required")
})
}) })
}) })
when("entrypoint is a not a process", func() { when("Platform API < 0.4", func() {
it("builds a process from the arguments", func() { when("there is no CMD provided", func() {
cmd := exec.Command( //nolint when("CNB_PROCESS_TYPE is NOT set", func() {
"docker", "run", "--rm", it("web is the default process-type", func() {
"--entrypoint=launcher", cmd := exec.Command("docker", "run", "--rm", launchImage)
"--env=CNB_PLATFORM_API="+latestPlatformAPI, assertOutput(t, cmd, "Executing web process-type")
launchImage, "--", })
"env", })
)
assertOutput(t, cmd, when("CNB_PROCESS_TYPE is set", func() {
"SOME_VAR=some-bp-val", it("should run the specified CNB_PROCESS_TYPE", func() {
"OTHER_VAR=other-bp-val", cmd := exec.Command("docker", "run", "--rm", "--env", "CNB_PROCESS_TYPE=direct-process", launchImage)
) if runtime.GOOS == "windows" {
assertOutput(t, cmd, "Usage: ping")
} else {
assertOutput(t, cmd, "Executing direct-process process-type")
}
})
})
}) })
})
when("CNB_PROCESS_TYPE is set", func() { when("process-type provided in CMD", func() {
it("should warn", func() { it("launches that process-type", func() {
cmd := exec.Command("docker", "run", "--rm", launchImage, "direct-process")
expected := "Executing direct-process process-type"
if runtime.GOOS == "windows" {
expected = "Usage: ping"
}
assertOutput(t, cmd, expected)
})
it("sets env vars from process specific directories", func() {
cmd := exec.Command("docker", "run", "--rm", launchImage, "worker")
expected := "worker-process-val"
assertOutput(t, cmd, expected)
})
})
when("process is direct=false", func() {
when("the process type has no args", func() {
it("runs command as script", func() {
h.SkipIf(t, runtime.GOOS == "windows", "scripts are unsupported on windows")
cmd := exec.Command("docker", "run", "--rm",
"--env", "VAR1=val1",
"--env", "VAR2=val with space",
launchImage, "indirect-process-with-script",
)
assertOutput(t, cmd, "'val1' 'val with space'")
})
})
when("the process type has args", func() {
when("buildpack API 0.4", func() {
// buildpack API is determined by looking up the API of the process buildpack in metadata.toml
it("command and args become shell-parsed tokens in a script", func() {
var val2 string
if runtime.GOOS == "windows" {
val2 = `"val with space"` //windows values with spaces must contain quotes
} else {
val2 = "val with space"
}
cmd := exec.Command("docker", "run", "--rm",
"--env", "VAR1=val1",
"--env", "VAR2="+val2,
launchImage, "indirect-process-with-args",
) // #nosec G204
assertOutput(t, cmd, "'val1' 'val with space'")
})
})
when("buildpack API < 0.4", func() {
// buildpack API is determined by looking up the API of the process buildpack in metadata.toml
it("args become arguments to bash", func() {
h.SkipIf(t, runtime.GOOS == "windows", "scripts are unsupported on windows")
cmd := exec.Command("docker", "run", "--rm",
launchImage, "legacy-indirect-process-with-args",
)
assertOutput(t, cmd, "'arg' 'arg with spaces'")
})
it("script must be explicitly written to accept bash args", func() {
h.SkipIf(t, runtime.GOOS == "windows", "scripts are unsupported on windows")
cmd := exec.Command("docker", "run", "--rm",
launchImage, "legacy-indirect-process-with-incorrect-args",
)
output, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
h.AssertStringContains(t, string(output), "printf: usage: printf [-v var] format [arguments]")
})
})
})
it("sources scripts from process specific directories", func() {
cmd := exec.Command("docker", "run", "--rm", launchImage, "profile-checker")
expected := "sourced bp profile\nsourced bp profile-checker profile\nsourced app profile\nval-from-profile"
assertOutput(t, cmd, expected)
})
})
it("respects CNB_APP_DIR and CNB_LAYERS_DIR environment variables", func() {
cmd := exec.Command("docker", "run", "--rm", cmd := exec.Command("docker", "run", "--rm",
"--env=CNB_PROCESS_TYPE=direct-process", "--env", "CNB_APP_DIR=/other-app",
"--env=CNB_PLATFORM_API="+latestPlatformAPI, "--env", "CNB_LAYERS_DIR=/other-layers",
"--env=CNB_NO_COLOR=true", launchImage)
launchImage, assertOutput(t, cmd, "sourced other app profile\nExecuting other-layers web process-type")
)
out, err := cmd.CombinedOutput()
h.AssertNotNil(t, err)
h.AssertStringContains(t, string(out), "Warning: CNB_PROCESS_TYPE is not supported in Platform API "+latestPlatformAPI)
h.AssertStringContains(t, string(out), `Warning: Run with ENTRYPOINT 'direct-process' to invoke the 'direct-process' process type`)
h.AssertStringContains(t, string(out), "ERROR: failed to launch: determine start command: when there is no default process a command is required")
})
})
when("provided CMD is not a process-type", func() {
it("sources profiles and executes the command in a shell", func() {
cmd := exec.Command( //nolint
"docker", "run", "--rm",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage,
"echo", "something",
)
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsomething")
}) })
it("sets env vars from layers", func() { when("provided CMD is not a process-type", func() {
cmd := exec.Command( //nolint it("sources profiles and executes the command in a shell", func() {
"docker", "run", "--rm", cmd := exec.Command("docker", "run", "--rm", launchImage, "echo", "something")
"--env=CNB_PLATFORM_API="+latestPlatformAPI, assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsomething")
launchImage, })
"echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR",
) it("sets env vars from layers", func() {
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val") cmd := exec.Command("docker", "run", "--rm", launchImage, "echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR")
if runtime.GOOS == "windows" {
cmd = exec.Command("docker", "run", "--rm", launchImage, "echo", "%SOME_VAR%", "%OTHER_VAR%", "%WORKER_VAR%")
}
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val")
})
it("passes through env vars from user, excluding excluded vars", func() {
args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"}
if runtime.GOOS == "windows" {
args = []string{"echo", "%SOME_USER_VAR%, %CNB_APP_DIR%, %OTHER_VAR%"}
}
cmd := exec.Command("docker",
append(
[]string{
"run", "--rm",
"--env", "CNB_APP_DIR=/workspace",
"--env", "SOME_USER_VAR=some-user-val",
"--env", "OTHER_VAR=other-user-val",
launchImage,
},
args...)...,
) // #nosec G204
if runtime.GOOS == "windows" {
// windows values with spaces will contain quotes
// empty values on windows preserve variable names instead of interpolating to empty strings
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\n\"some-user-val, %CNB_APP_DIR%, other-user-val**other-bp-val\"")
} else {
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val")
}
})
it("adds buildpack bin dirs to the path", func() {
cmd := exec.Command("docker", "run", "--rm", launchImage, "bp-executable")
assertOutput(t, cmd, "bp executable")
})
}) })
it("passes through env vars from user, excluding excluded vars", func() { when("CMD provided starts with --", func() {
args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"} it("launches command directly", func() {
cmd := exec.Command("docker", if runtime.GOOS == "windows" {
append( cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "ping", "/?")
[]string{ assertOutput(t, cmd, "Usage: ping")
"run", "--rm", } else {
"--env", "CNB_APP_DIR=" + ctrPath("/workspace"), cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "echo", "something")
"--env=CNB_PLATFORM_API=" + latestPlatformAPI, assertOutput(t, cmd, "something")
}
})
it("sets env vars from layers", func() {
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "env")
if runtime.GOOS == "windows" {
cmd = exec.Command("docker", "run", "--rm", launchImage, "--", "cmd", "/c", "set")
}
assertOutput(t, cmd,
"SOME_VAR=some-bp-val",
"OTHER_VAR=other-bp-val",
)
})
it("passes through env vars from user, excluding excluded vars", func() {
cmd := exec.Command("docker", "run", "--rm",
"--env", "CNB_APP_DIR=/workspace",
"--env", "SOME_USER_VAR=some-user-val",
launchImage, "--",
"env",
)
if runtime.GOOS == "windows" {
cmd = exec.Command("docker", "run", "--rm",
"--env", "CNB_APP_DIR=/workspace",
"--env", "SOME_USER_VAR=some-user-val", "--env", "SOME_USER_VAR=some-user-val",
"--env", "OTHER_VAR=other-user-val", launchImage, "--",
launchImage, "cmd", "/c", "set",
}, )
args...)..., }
) // #nosec G204
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val") output, err := cmd.CombinedOutput()
}) if err != nil {
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
}
expected := "SOME_USER_VAR=some-user-val"
if !strings.Contains(string(output), expected) {
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
}
it("adds buildpack bin dirs to the path", func() { if strings.Contains(string(output), "CNB_APP_DIR") {
cmd := exec.Command( //nolint t.Fatalf("env contained white listed env far CNB_APP_DIR:\n\t got: %s\n", output)
"docker", "run", "--rm", }
"--env=CNB_PLATFORM_API="+latestPlatformAPI, })
launchImage,
"bp-executable",
)
assertOutput(t, cmd, "bp executable")
})
})
when("CMD provided starts with --", func() { it("adds buildpack bin dirs to the path before looking up command", func() {
it("launches command directly", func() { cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "bp-executable")
cmd := exec.Command( //nolint assertOutput(t, cmd, "bp executable")
"docker", "run", "--rm", })
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "--",
"echo", "something",
)
assertOutput(t, cmd, "something")
})
it("sets env vars from layers", func() {
cmd := exec.Command( //nolint
"docker", "run", "--rm",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "--",
"env",
)
assertOutput(t, cmd,
"SOME_VAR=some-bp-val",
"OTHER_VAR=other-bp-val",
)
})
it("passes through env vars from user, excluding excluded vars", func() {
cmd := exec.Command( //nolint
"docker", "run", "--rm",
"--env", "CNB_APP_DIR=/workspace",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "SOME_USER_VAR=some-user-val",
launchImage, "--",
"env",
)
output, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
}
expected := "SOME_USER_VAR=some-user-val"
if !strings.Contains(string(output), expected) {
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
}
if strings.Contains(string(output), "CNB_APP_DIR") {
t.Fatalf("env contained white listed env far CNB_APP_DIR:\n\t got: %s\n", output)
}
})
it("adds buildpack bin dirs to the path before looking up command", func() {
cmd := exec.Command( //nolint
"docker", "run", "--rm",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "--",
"bp-executable",
)
assertOutput(t, cmd, "bp executable")
}) })
}) })
} }

View File

@ -1,522 +0,0 @@
package acceptance
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types/image"
ih "github.com/buildpacks/imgutil/testhelpers"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/registry"
"github.com/buildpacks/lifecycle/auth"
"github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/internal/encoding"
"github.com/buildpacks/lifecycle/platform"
"github.com/buildpacks/lifecycle/platform/files"
h "github.com/buildpacks/lifecycle/testhelpers"
)
type PhaseTest struct {
containerBinaryDir string // The path to copy lifecycle binaries to before building the test image.
containerBinaryPath string // The path to invoke when running the test container.
phaseName string // The phase name, such as detect, analyze, restore, build, export, or create.
testImageDockerContext string // The directory containing the Dockerfile for the test image.
testImageRef string // The test image to run.
targetDaemon *targetDaemon
targetRegistry *targetRegistry // The target registry to use. Remove by passing `withoutRegistry` to the constructor.
}
type targetDaemon struct {
os string
arch string
fixtures *daemonImageFixtures
}
type daemonImageFixtures struct {
AppImage string
CacheImage string
RunImage string
}
type targetRegistry struct {
authConfig string
dockerConfigDir string
network string
fixtures *regImageFixtures
registry *ih.DockerRegistry
}
type regImageFixtures struct {
InaccessibleImage string
ReadOnlyAppImage string
ReadOnlyCacheImage string
ReadOnlyRunImage string
ReadWriteAppImage string
ReadWriteCacheImage string
ReadWriteOtherAppImage string
SomeAppImage string
SomeCacheImage string
}
func NewPhaseTest(t *testing.T, phaseName, testImageDockerContext string, phaseOp ...func(*PhaseTest)) *PhaseTest {
phaseTest := &PhaseTest{
containerBinaryDir: filepath.Join(testImageDockerContext, "container", "cnb", "lifecycle"),
containerBinaryPath: "/cnb/lifecycle/" + phaseName,
phaseName: phaseName,
targetDaemon: newTargetDaemon(t),
targetRegistry: &targetRegistry{},
testImageDockerContext: testImageDockerContext,
testImageRef: "lifecycle/acceptance/" + phaseName,
}
for _, op := range phaseOp {
op(phaseTest)
}
return phaseTest
}
func newTargetDaemon(t *testing.T) *targetDaemon {
info, err := h.DockerCli(t).Info(context.TODO())
h.AssertNil(t, err)
arch := info.Architecture
if arch == "x86_64" {
arch = "amd64"
}
if arch == "aarch64" {
arch = "arm64"
}
return &targetDaemon{
os: info.OSType,
arch: arch,
fixtures: nil,
}
}
func (p *PhaseTest) RegRepoName(repoName string) string {
return p.targetRegistry.registry.RepoName(repoName)
}
func (p *PhaseTest) Start(t *testing.T, phaseOp ...func(*testing.T, *PhaseTest)) {
p.targetDaemon.createFixtures(t)
if p.targetRegistry != nil {
p.targetRegistry.start(t)
containerDockerConfigDir := filepath.Join(p.testImageDockerContext, "container", "docker-config")
h.AssertNil(t, os.RemoveAll(containerDockerConfigDir))
h.AssertNil(t, os.MkdirAll(containerDockerConfigDir, 0755))
h.RecursiveCopy(t, p.targetRegistry.dockerConfigDir, containerDockerConfigDir)
}
for _, op := range phaseOp {
op(t, p)
}
h.MakeAndCopyLifecycle(t, p.targetDaemon.os, p.targetDaemon.arch, p.containerBinaryDir)
// calculate lifecycle digest
hasher := sha256.New()
f, err := os.Open(filepath.Join(p.containerBinaryDir, "lifecycle"+exe)) //#nosec G304
h.AssertNil(t, err)
_, err = io.Copy(hasher, f)
h.AssertNil(t, err)
t.Logf("Built lifecycle binary with digest: %s", hex.EncodeToString(hasher.Sum(nil)))
copyFakeSboms(t)
h.DockerBuild(
t,
p.testImageRef,
p.testImageDockerContext,
h.WithArgs("-f", filepath.Join(p.testImageDockerContext, dockerfileName)),
)
t.Logf("Using image %s with lifecycle version %s",
p.testImageRef,
h.DockerRun(
t,
p.testImageRef,
h.WithFlags("--env", "CNB_PLATFORM_API="+latestPlatformAPI, "--entrypoint", ctrPath("/cnb/lifecycle/lifecycle"+exe)),
h.WithArgs("-version"),
))
}
func (p *PhaseTest) Stop(t *testing.T) {
p.targetDaemon.removeFixtures(t)
if p.targetRegistry != nil {
p.targetRegistry.stop(t)
// remove images that were built locally before being pushed to test registry
cleanupDaemonFixtures(t, *p.targetRegistry.fixtures)
}
h.DockerImageRemove(t, p.testImageRef)
}
func (d *targetDaemon) createFixtures(t *testing.T) {
if d.fixtures != nil {
return
}
var fixtures daemonImageFixtures
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), files.LayersMetadata{})
cacheMeta := minifyMetadata(t, filepath.Join("testdata", "cache_image_metadata.json"), platform.CacheMetadata{})
fixtures.AppImage = "some-app-image-" + h.RandString(10)
cmd := exec.Command(
"docker",
"build",
"-t", fixtures.AppImage,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+appMeta,
filepath.Join("testdata", "app-image"),
) // #nosec G204
h.Run(t, cmd)
fixtures.CacheImage = "some-cache-image-" + h.RandString(10)
cmd = exec.Command(
"docker",
"build",
"-t", fixtures.CacheImage,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+cacheMeta,
filepath.Join("testdata", "cache-image"),
) // #nosec G204
h.Run(t, cmd)
fixtures.RunImage = "some-run-image-" + h.RandString(10)
cmd = exec.Command(
"docker",
"build",
"-t", fixtures.RunImage,
"--build-arg", "fromImage="+containerBaseImage,
filepath.Join("testdata", "cache-image"),
) // #nosec G204
h.Run(t, cmd)
d.fixtures = &fixtures
}
func (d *targetDaemon) removeFixtures(t *testing.T) {
cleanupDaemonFixtures(t, *d.fixtures)
}
func (r *targetRegistry) start(t *testing.T) {
var err error
r.dockerConfigDir, err = os.MkdirTemp("", "test.docker.config.dir")
h.AssertNil(t, err)
sharedRegHandler := registry.New(registry.Logger(log.New(io.Discard, "", log.Lshortfile)))
r.registry = ih.NewDockerRegistry(
ih.WithAuth(r.dockerConfigDir),
ih.WithSharedHandler(sharedRegHandler),
ih.WithImagePrivileges(),
)
r.registry.Start(t)
// if registry is listening on localhost, use host networking to allow containers to reach it
r.network = "default"
if r.registry.Host == "localhost" {
r.network = "host"
}
// Save auth config
os.Setenv("DOCKER_CONFIG", r.dockerConfigDir)
r.authConfig, err = auth.BuildEnvVar(authn.DefaultKeychain, r.registry.RepoName("some-repo")) // repo name doesn't matter
h.AssertNil(t, err)
r.createFixtures(t)
}
func (r *targetRegistry) createFixtures(t *testing.T) {
var fixtures regImageFixtures
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), files.LayersMetadata{})
cacheMeta := minifyMetadata(t, filepath.Join("testdata", "cache_image_metadata.json"), platform.CacheMetadata{})
// With Permissions
fixtures.InaccessibleImage = r.registry.SetInaccessible("inaccessible-image")
someReadOnlyAppName := "some-read-only-app-image-" + h.RandString(10)
fixtures.ReadOnlyAppImage = buildRegistryImage(
t,
someReadOnlyAppName,
filepath.Join("testdata", "app-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+appMeta,
)
r.registry.SetReadOnly(someReadOnlyAppName)
someReadOnlyCacheImage := "some-read-only-cache-image-" + h.RandString(10)
fixtures.ReadOnlyCacheImage = buildRegistryImage(
t,
someReadOnlyCacheImage,
filepath.Join("testdata", "cache-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+cacheMeta,
)
r.registry.SetReadOnly(someReadOnlyCacheImage)
someRunImageName := "some-read-only-run-image-" + h.RandString(10)
buildRegistryImage(
t,
someRunImageName,
filepath.Join("testdata", "cache-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImageFull,
)
fixtures.ReadOnlyRunImage = r.registry.SetReadOnly(someRunImageName)
readWriteAppName := "some-read-write-app-image-" + h.RandString(10)
fixtures.ReadWriteAppImage = buildRegistryImage(
t,
readWriteAppName,
filepath.Join("testdata", "app-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+appMeta,
)
r.registry.SetReadWrite(readWriteAppName)
someReadWriteCacheName := "some-read-write-cache-image-" + h.RandString(10)
fixtures.ReadWriteCacheImage = buildRegistryImage(
t,
someReadWriteCacheName,
filepath.Join("testdata", "cache-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+cacheMeta,
)
r.registry.SetReadWrite(someReadWriteCacheName)
readWriteOtherAppName := "some-other-read-write-app-image-" + h.RandString(10)
fixtures.ReadWriteOtherAppImage = buildRegistryImage(
t,
readWriteOtherAppName,
filepath.Join("testdata", "app-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+appMeta,
)
r.registry.SetReadWrite(readWriteOtherAppName)
// Without Permissions
fixtures.SomeAppImage = buildRegistryImage(
t,
"some-app-image-"+h.RandString(10),
filepath.Join("testdata", "app-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+appMeta,
)
fixtures.SomeCacheImage = buildRegistryImage(
t,
"some-cache-image-"+h.RandString(10),
filepath.Join("testdata", "cache-image"),
r.registry,
"--build-arg", "fromImage="+containerBaseImage,
"--build-arg", "metadata="+cacheMeta,
)
r.fixtures = &fixtures
}
func (r *targetRegistry) stop(t *testing.T) {
r.registry.Stop(t)
os.Unsetenv("DOCKER_CONFIG")
os.RemoveAll(r.dockerConfigDir)
}
func buildRegistryImage(t *testing.T, repoName, context string, registry *ih.DockerRegistry, buildArgs ...string) string {
// Build image
regRepoName := registry.RepoName(repoName)
h.DockerBuild(t, regRepoName, context, h.WithArgs(buildArgs...))
// Push image
h.AssertNil(t, h.PushImage(h.DockerCli(t), regRepoName, registry.EncodedLabeledAuth()))
// Return registry repo name
return regRepoName
}
func cleanupDaemonFixtures(t *testing.T, fixtures interface{}) {
v := reflect.ValueOf(fixtures)
for i := 0; i < v.NumField(); i++ {
imageName := fmt.Sprintf("%v", v.Field(i).Interface())
if imageName == "" {
continue
}
if strings.Contains(imageName, "inaccessible") {
continue
}
h.DockerImageRemove(t, imageName)
}
}
func minifyMetadata(t *testing.T, path string, metadataStruct interface{}) string {
metadata, err := os.ReadFile(path)
h.AssertNil(t, err)
// Unmarshal and marshal to strip unnecessary whitespace
h.AssertNil(t, json.Unmarshal(metadata, &metadataStruct))
flatMetadata, err := json.Marshal(metadataStruct)
h.AssertNil(t, err)
return string(flatMetadata)
}
func withoutDaemonFixtures(phaseTest *PhaseTest) {
phaseTest.targetDaemon.fixtures = &daemonImageFixtures{}
}
func withoutRegistry(phaseTest *PhaseTest) {
phaseTest.targetRegistry = nil
}
func copyFakeSboms(t *testing.T) {
goos := runtime.GOOS
// Check Target Daemon != runtime.GOOS
if goos == "darwin" {
goos = "linux"
}
buildLifecycleDir, err := filepath.Abs(filepath.Join("..", "out", fmt.Sprintf("%s-%s", goos, runtime.GOARCH), "lifecycle"))
if err != nil {
t.Log("Fail to locate lifecycle directory")
}
extensions := SBOMExtensions()
components := SBOMComponents()
for _, component := range components {
for _, extension := range extensions {
if err := encoding.WriteJSON(filepath.Join(buildLifecycleDir, component+extension), "fake data"); err != nil {
t.Log("Fail to write:" + component + extension)
}
}
}
}
func SBOMExtensions() []string {
return []string{".sbom.cdx.json", ".sbom.spdx.json", ".sbom.syft.json"}
}
func SBOMComponents() []string {
return []string{"lifecycle", "launcher"}
}
func assertImageOSAndArch(t *testing.T, imageName string, phaseTest *PhaseTest) { //nolint - these functions are in fact used, i promise
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName)
h.AssertNil(t, err)
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
}
func assertImageOSAndArchAndCreatedAt(t *testing.T, imageName string, phaseTest *PhaseTest, expectedCreatedAt time.Time) { //nolint
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName)
if err != nil {
list, _ := h.DockerCli(t).ImageList(context.TODO(), image.ListOptions{})
fmt.Println("Error encountered running ImageInspectWithRaw. imageName: ", imageName)
fmt.Println(err)
for _, value := range list {
fmt.Println("Image Name: ", value)
}
if strings.Contains(err.Error(), "No such image") {
t.Log("Image not found, retrying...")
time.Sleep(1 * time.Second)
inspect, err = h.DockerCli(t).ImageInspect(context.TODO(), imageName)
}
}
h.AssertNil(t, err)
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
h.AssertEq(t, inspect.Created, expectedCreatedAt.Format(time.RFC3339))
}
func assertRunMetadata(t *testing.T, path string) *files.Run { //nolint
contents, err := os.ReadFile(path)
h.AssertNil(t, err)
h.AssertEq(t, len(contents) > 0, true)
runMD, err := files.Handler.ReadRun(path, cmd.DefaultLogger)
h.AssertNil(t, err)
return &runMD
}
func updateTOMLFixturesWithTestRegistry(t *testing.T, phaseTest *PhaseTest) { //nolint
analyzedTOMLPlaceholders := []string{
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "analyzed.toml.placeholder"),
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "run-image-extended-analyzed.toml.placeholder"),
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-analyzed.toml.placeholder"),
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-extend-false-analyzed.toml.placeholder"),
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-extend-true-analyzed.toml.placeholder"),
filepath.Join(phaseTest.testImageDockerContext, "container", "other_layers", "analyzed.toml.placeholder"),
}
runTOMLPlaceholders := []string{
filepath.Join(phaseTest.testImageDockerContext, "container", "cnb", "run.toml.placeholder"),
}
layoutPlaceholders := []string{
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "layout-analyzed.toml.placeholder"),
}
for _, pPath := range analyzedTOMLPlaceholders {
if _, err := os.Stat(pPath); os.IsNotExist(err) {
continue
}
analyzedMD := assertAnalyzedMetadata(t, pPath)
if analyzedMD.RunImage != nil {
analyzedMD.RunImage.Reference = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage // don't override extend
if analyzedMD.RunImage.Image == "REPLACE" {
analyzedMD.RunImage.Image = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage
}
}
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), analyzedMD))
}
for _, pPath := range runTOMLPlaceholders {
if _, err := os.Stat(pPath); os.IsNotExist(err) {
continue
}
runMD := assertRunMetadata(t, pPath)
for idx, image := range runMD.Images {
image.Image = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage
runMD.Images[idx] = image
}
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), runMD))
}
for _, pPath := range layoutPlaceholders {
if _, err := os.Stat(pPath); os.IsNotExist(err) {
continue
}
analyzedMD := assertAnalyzedMetadata(t, pPath)
if analyzedMD.RunImage != nil {
// Values from image acceptance/testdata/exporter/container/layout-repo in OCI layout format
analyzedMD.RunImage = &files.RunImage{Reference: "/layout-repo/index.docker.io/library/busybox/latest@sha256:445c45cc89fdeb64b915b77f042e74ab580559b8d0d5ef6950be1c0265834c33"}
}
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), analyzedMD))
}
}

View File

@ -1,58 +0,0 @@
//go:build acceptance
package acceptance
import (
"path/filepath"
"testing"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api"
h "github.com/buildpacks/lifecycle/testhelpers"
)
var (
rebaserTest *PhaseTest
rebaserPath string
rebaserImage string
)
func TestRebaser(t *testing.T) {
testImageDockerContextFolder := filepath.Join("testdata", "rebaser")
rebaserTest = NewPhaseTest(t, "rebaser", testImageDockerContextFolder)
rebaserTest.Start(t, updateTOMLFixturesWithTestRegistry)
defer rebaserTest.Stop(t)
rebaserImage = rebaserTest.testImageRef
rebaserPath = rebaserTest.containerBinaryPath
for _, platformAPI := range api.Platform.Supported {
spec.Run(t, "acceptance-rebaser/"+platformAPI.String(), testRebaser(platformAPI.String()), spec.Sequential(), spec.Report(report.Terminal{}))
}
}
func testRebaser(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) {
when("called with insecure registry flag", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
})
it("should do an http request", func() {
insecureRegistry := "host.docker.internal"
rebaserOutputImageName := insecureRegistry + "/bar"
_, _, err := h.DockerRunWithError(t,
rebaserImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
),
h.WithArgs(ctrPath(rebaserPath), rebaserOutputImageName),
)
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
})
})
}
}

View File

@ -1,338 +1,132 @@
//go:build acceptance // +build acceptance
package acceptance package acceptance
import ( import (
"io/ioutil"
"math/rand"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"time"
"github.com/google/go-containerregistry/pkg/name"
"github.com/sclevine/spec" "github.com/sclevine/spec"
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/platform/files"
h "github.com/buildpacks/lifecycle/testhelpers" h "github.com/buildpacks/lifecycle/testhelpers"
) )
const emptyImageSHA = "03cbce912ef1a8a658f73c660ab9c539d67188622f00b15c4f15b89b884f0e10"
var ( var (
restoreImage string restoreDockerContext = filepath.Join("testdata", "restorer")
restoreRegAuthConfig string restorerBinaryDir = filepath.Join("testdata", "restorer", "container", "cnb", "lifecycle")
restoreRegNetwork string restorerImage = "lifecycle/acceptance/restorer"
restorerPath string
restoreDaemonFixtures *daemonImageFixtures
restoreRegFixtures *regImageFixtures
restoreTest *PhaseTest
) )
func TestRestorer(t *testing.T) { func TestRestorer(t *testing.T) {
testImageDockerContext := filepath.Join("testdata", "restorer") h.SkipIf(t, runtime.GOOS == "windows", "Restorer acceptance tests are not yet supported on Windows")
restoreTest = NewPhaseTest(t, "restorer", testImageDockerContext)
restoreTest.Start(t, updateTOMLFixturesWithTestRegistry)
defer restoreTest.Stop(t)
restoreImage = restoreTest.testImageRef rand.Seed(time.Now().UTC().UnixNano())
restorerPath = restoreTest.containerBinaryPath
restoreRegAuthConfig = restoreTest.targetRegistry.authConfig
restoreRegNetwork = restoreTest.targetRegistry.network
restoreDaemonFixtures = restoreTest.targetDaemon.fixtures
restoreRegFixtures = restoreTest.targetRegistry.fixtures
for _, platformAPI := range api.Platform.Supported { h.MakeAndCopyLifecycle(t, "linux", restorerBinaryDir)
spec.Run(t, "acceptance-restorer/"+platformAPI.String(), testRestorerFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{})) h.DockerBuild(t, restorerImage, restoreDockerContext)
} defer h.DockerImageRemove(t, restorerImage)
spec.Run(t, "acceptance-restorer", testRestorer, spec.Parallel(), spec.Report(report.Terminal{}))
} }
func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) { func testRestorer(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) { when("called with arguments", func() {
var copyDir, containerName string it("errors", func() {
it.Before(func() { command := exec.Command("docker", "run", "--rm", restorerImage, "some-arg")
containerName = "test-container-" + h.RandString(10) output, err := command.CombinedOutput()
var err error h.AssertNotNil(t, err)
copyDir, err = os.MkdirTemp("", "test-docker-copy-") expected := "failed to parse arguments: received unexpected Args"
h.AssertStringContains(t, string(output), expected)
})
})
when("called without any cache flag", func() {
it("outputs it will not restore cache layer data", func() {
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+latestPlatformAPI, restorerImage)
output, err := command.CombinedOutput()
h.AssertNil(t, err) h.AssertNil(t, err)
expected := "Not restoring cached layer data, no cache flag specified"
h.AssertStringContains(t, string(output), expected)
}) })
})
it.After(func() { when("using cache-dir", func() {
if h.DockerContainerExists(t, containerName) { when("there is cache present from a previous build", func() {
h.Run(t, exec.Command("docker", "rm", containerName)) var copyDir, containerName string
}
_ = os.RemoveAll(copyDir)
})
when("called with arguments", func() { it.Before(func() {
it("errors", func() { containerName = "test-container-" + h.RandString(10)
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+platformAPI, restoreImage, "some-arg") var err error
output, err := command.CombinedOutput() copyDir, err = ioutil.TempDir("", "test-docker-copy-")
h.AssertNotNil(t, err)
expected := "failed to parse arguments: received unexpected Args"
h.AssertStringContains(t, string(output), expected)
})
})
when("called without any cache flag", func() {
it("outputs it will not restore cache layer data", func() {
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+platformAPI, restoreImage)
output, err := command.CombinedOutput()
h.AssertNil(t, err) h.AssertNil(t, err)
expected := "No cached data will be used, no cache specified"
h.AssertStringContains(t, string(output), expected)
}) })
})
when("analyzed.toml exists with app metadata", func() { it.After(func() {
it("restores app metadata", func() { if h.DockerContainerExists(t, containerName) {
h.Run(t, exec.Command("docker", "rm", containerName))
}
os.RemoveAll(copyDir)
})
it("restores cached layer data", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
restorerImage,
"/layers",
h.WithFlags("--env", "CNB_PLATFORM_API="+latestPlatformAPI),
h.WithArgs("-cache-dir", "/cache"),
)
// check restored cache file is present
cachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "cached-layer", "data")
h.AssertPathExists(t, cachedFile)
// check restored cache file content is correct
contents, err := ioutil.ReadFile(cachedFile)
h.AssertNil(t, err)
h.AssertEq(t, string(contents), "cached-data\n")
})
it("does not restore cache=true layers not in cache", func() {
output := h.DockerRunAndCopy(t, output := h.DockerRunAndCopy(t,
containerName, containerName,
copyDir, copyDir,
ctrPath("/layers"), restorerImage,
restoreImage, "/layers",
h.WithFlags(append( h.WithFlags("--env", "CNB_PLATFORM_API="+latestPlatformAPI),
dockerSocketMount, h.WithArgs("-cache-dir", "/cache"),
"--env", "CNB_PLATFORM_API="+platformAPI,
)...),
h.WithArgs(),
) )
h.AssertStringContains(t, output, "Restoring metadata for \"some-buildpack-id:launch-layer\"") // check uncached layer is not restored
uncachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "uncached-layer")
h.AssertPathDoesNotExist(t, uncachedFile)
// check output to confirm why this layer was not restored from cache
h.AssertStringContains(t, string(output), "Removing \"cacher_buildpack:layer-not-in-cache\", not in cache")
}) })
when("restores app metadata using an insecure registry", func() { it("does not restore unused buildpack layer data", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
})
it("does an http request ", func() {
insecureRegistry := "host.docker.internal"
_, _, err := h.DockerRunWithError(t,
restoreImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
"--env", "CNB_BUILD_IMAGE="+insecureRegistry+"/bar",
)...),
)
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
})
})
})
when("using cache-dir", func() {
when("there is cache present from a previous build", func() {
it("restores cached layer data", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
restoreImage,
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
h.WithArgs("-cache-dir", "/cache"),
)
// check restored cache file is present
cachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "cached-layer", "data")
h.AssertPathExists(t, cachedFile)
// check restored cache file content is correct
contents, err := os.ReadFile(cachedFile)
h.AssertNil(t, err)
h.AssertEq(t, string(contents), "cached-data\n")
})
it("does not restore cache=true layers not in cache", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
restoreImage,
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
h.WithArgs("-cache-dir", "/cache"),
)
// check uncached layer is not restored
uncachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "uncached-layer")
h.AssertPathDoesNotExist(t, uncachedFile)
})
it("does not restore layer data from unused buildpacks", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
restoreImage,
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
h.WithArgs("-cache-dir", "/cache"),
)
// check no content is not present from unused buildpack
unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack")
h.AssertPathDoesNotExist(t, unusedBpLayer)
})
it("does not restore corrupted layer data", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
restoreImage,
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
h.WithArgs("-cache-dir", "/cache"),
)
// check corrupted layer is not restored
corruptedFile := filepath.Join(copyDir, "layers", "corrupted_buildpack", "corrupted-layer")
h.AssertPathDoesNotExist(t, corruptedFile)
})
})
})
when("restoring builder image metadata for extensions", func() {
it("accepts -build-image and saves the metadata to /kaniko/cache", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "Platform API < 0.10 does not restore builder image metadata")
h.DockerRunAndCopy(t, h.DockerRunAndCopy(t,
containerName, containerName,
copyDir, copyDir,
"/", restorerImage,
restoreImage, "/layers",
h.WithFlags( h.WithFlags("--env", "CNB_PLATFORM_API="+latestPlatformAPI),
"--env", "CNB_PLATFORM_API="+platformAPI, h.WithArgs("-cache-dir", "/cache"),
"--env", "DOCKER_CONFIG=/docker-config",
"--network", restoreRegNetwork,
),
h.WithArgs("-build-image", restoreRegFixtures.SomeCacheImage), // some-cache-image simulates a builder image in a registry
) )
t.Log("records builder image digest in analyzed.toml")
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "analyzed.toml"), cmd.DefaultLogger) // check no content is not present from unused buildpack
h.AssertNil(t, err) unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack")
h.AssertStringContains(t, analyzedMD.BuildImage.Reference, restoreRegFixtures.SomeCacheImage+"@sha256:") h.AssertPathDoesNotExist(t, unusedBpLayer)
t.Log("writes builder manifest and config to the kaniko cache")
ref, err := name.ParseReference(analyzedMD.BuildImage.Reference)
h.AssertNil(t, err)
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko", "cache", "base"))
h.AssertNil(t, err)
h.AssertEq(t, len(fis), 1)
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", ref.Identifier(), "oci-layout"))
}) })
}) })
})
when("restoring run image metadata for extensions", func() {
it("saves metadata to /kaniko/cache", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not restore run image metadata")
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/",
restoreImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "DOCKER_CONFIG=/docker-config",
"--network", restoreRegNetwork,
),
h.WithArgs(
"-analyzed", "/layers/some-extend-true-analyzed.toml",
"-log-level", "debug",
),
)
t.Log("updates run image reference in analyzed.toml to include digest and target data")
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-true-analyzed.toml"), cmd.DefaultLogger)
h.AssertNil(t, err)
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
t.Log("does not return the digest for an empty image")
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
t.Log("writes run image manifest and config to the kaniko cache")
ref, err := name.ParseReference(analyzedMD.RunImage.Reference)
h.AssertNil(t, err)
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko", "cache", "base"))
h.AssertNil(t, err)
h.AssertEq(t, len(fis), 1)
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", ref.Identifier(), "oci-layout"))
})
})
when("target data", func() {
it("updates run image reference in analyzed.toml to include digest and target data on newer platforms", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/",
restoreImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "DOCKER_CONFIG=/docker-config",
"--network", restoreRegNetwork,
),
h.WithArgs(
"-analyzed", "/layers/some-extend-false-analyzed.toml",
"-log-level", "debug",
),
)
if api.MustParse(platformAPI).AtLeast("0.12") {
t.Log("updates run image reference in analyzed.toml to include digest and target data")
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
h.AssertNil(t, err)
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
t.Log("does not return the digest for an empty image")
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
t.Log("does not write run image manifest and config to the kaniko cache")
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko"))
h.AssertNil(t, err)
h.AssertEq(t, len(fis), 1) // .gitkeep
} else {
t.Log("updates run image reference in analyzed.toml to include digest only")
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
h.AssertNil(t, err)
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
h.AssertNil(t, analyzedMD.RunImage.TargetMetadata)
t.Log("does not return the digest for an empty image")
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
}
})
when("-daemon", func() {
it("updates run image reference in analyzed.toml to include digest and target data on newer platforms", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not support -daemon flag")
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/",
restoreImage,
h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "DOCKER_CONFIG=/docker-config",
"--network", restoreRegNetwork,
)...),
h.WithArgs(
"-analyzed", "/layers/some-extend-false-analyzed.toml",
"-daemon",
"-log-level", "debug",
),
)
t.Log("updates run image reference in analyzed.toml to include digest and target data")
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
h.AssertNil(t, err)
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, "@sha256:") // daemon image ID
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
t.Log("does not write run image manifest and config to the kaniko cache")
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko"))
h.AssertNil(t, err)
h.AssertEq(t, len(fis), 1) // .gitkeep
})
})
})
}
} }

View File

@ -10,9 +10,6 @@ ENV CNB_USER_ID=2222
ENV CNB_GROUP_ID=3333 ENV CNB_GROUP_ID=3333
ARG cnb_platform_api
ENV CNB_PLATFORM_API=${cnb_platform_api}
RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /some-dir RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /some-dir
RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /layers RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /layers

View File

@ -1,4 +1,4 @@
[[group]] [[group]]
id = "some-buildpack-id" id = "some-buildpack-id"
version = "some-buildpack-version" version = "some-buildpack-version"
api = "0.10" api = "0.2"

View File

@ -1,4 +1,4 @@
[[group]] [[group]]
id = "some-other-buildpack-id" id = "some-other-buildpack-id"
version = "some-other-buildpack-version" version = "some-other-buildpack-version"
api = "0.10" api = "0.3"

View File

@ -1,4 +1,4 @@
[[group]] [[group]]
id = "another-buildpack-id" id = "another-buildpack-id"
version = "another-buildpack-version" version = "another-buildpack-version"
api = "0.10" api = "0.2"

View File

@ -1,6 +1,4 @@
ARG fromImage FROM ubuntu:bionic
FROM $fromImage
ARG metadata ARG metadata

View File

@ -1,6 +1,4 @@
ARG fromImage FROM ubuntu:bionic
FROM $fromImage
ARG metadata ARG metadata

View File

@ -1,5 +0,0 @@
[[order]]
[[order.group]]
id = "simple_buildpack"
version = "simple_buildpack_version"

View File

@ -1,2 +0,0 @@
[run-image]
image = "some-run-image"

View File

@ -1,5 +0,0 @@
[[images]]
image = "some-run-image-from-run-toml"
[[images]]
image = "some-other-run-image"

View File

@ -1,4 +0,0 @@
[[group]]
id = "some-other-buildpack-id"
version = "some-other-buildpack-version"
api = "0.1"

View File

@ -1 +0,0 @@
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}

View File

@ -1,16 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1457,
"digest": "sha256:9d5226e6ce3fb6aee2822206a5ef85f38c303d2b37bfc894b419fca2c0501269"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 772999,
"digest": "sha256:405fecb6a2fa4f29683f977e7e3b852bf6f8975a2aba647d234d2371894943da"
}
]
}

View File

@ -1,14 +0,0 @@
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 527,
"digest": "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
]
}

View File

@ -1,3 +0,0 @@
{
"imageLayoutVersion": "1.0.0"
}

View File

@ -1,17 +0,0 @@
FROM ubuntu:bionic
ARG cnb_uid=1234
ARG cnb_gid=1000
COPY ./container/ /
ENTRYPOINT ["/cnb/lifecycle/builder"]
RUN groupadd cnb --gid ${cnb_gid} && \
useradd --uid ${cnb_uid} --gid ${cnb_gid} -m -s /bin/bash cnb
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layers"
WORKDIR /layers
USER ${cnb_uid}:${cnb_gid}

View File

@ -1,35 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
echo "---> Hello World buildpack"
# INPUT ARGUMENTS
platform_dir=$2
env_dir=${platform_dir}/env
layers_dir=$1
plan_path=$3
# CNB_APP_DIR
echo "CNB_APP_DIR: ${PWD}"
# PLATFORM DIR
echo "PLATFORM_DIR: ${platform_dir}"
# LAYERS
echo "LAYERS_DIR: ${layers_dir}"
# PLAN
echo "PLAN_PATH: ${plan_path}"
echo "plan contents:"
cat ${plan_path}
echo
# Set default start command
cat > "${layers_dir}/launch.toml" << EOL
[[processes]]
type = "hello"
command = ["echo world"]
args = ["arg1"]
EOL
echo "---> Done"

View File

@ -1,7 +0,0 @@
# Buildpack API version
api = "0.10"
# Buildpack ID and metadata
[buildpack]
id = "hello_world"
version = "0.0.1"

View File

@ -1,27 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
echo "---> Hello World 2 buildpack"
# INPUT ARGUMENTS
platform_dir=$2
env_dir=${platform_dir}/env
layers_dir=$1
plan_path=$3
# CNB_APP_DIR
echo "CNB_APP_DIR: ${PWD}"
# PLATFORM DIR
echo "PLATFORM_DIR: ${platform_dir}"
# LAYERS
echo "LAYERS_DIR: ${layers_dir}"
# PLAN
echo "PLAN_PATH: ${plan_path}"
echo "plan contents:"
cat ${plan_path}
echo
echo "---> Done"

View File

@ -1,7 +0,0 @@
# Buildpack API version
api = "0.10"
# Buildpack ID and metadata
[buildpack]
id = "hello_world_2"
version = "0.0.2"

View File

@ -1,33 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
echo "---> Hello World 3 buildpack"
# INPUT ARGUMENTS
platform_dir=$2
env_dir=${platform_dir}/env
layers_dir=$1
plan_path=$3
# CNB_APP_DIR
echo "CNB_APP_DIR: ${PWD}"
# PLATFORM DIR
echo "PLATFORM_DIR: ${platform_dir}"
# LAYERS
echo "LAYERS_DIR: ${layers_dir}"
# PLAN
echo "PLAN_PATH: ${plan_path}"
echo "plan contents:"
cat ${plan_path}
echo
echo "CNB_TARGET_ARCH:" `printenv CNB_TARGET_ARCH`
echo "CNB_TARGET_ARCH_VARIANT:" `printenv CNB_TARGET_ARCH_VARIANT`
echo "CNB_TARGET_OS:" `printenv CNB_TARGET_OS`
echo "CNB_TARGET_DISTRO_NAME:" `printenv CNB_TARGET_DISTRO_NAME`
echo "CNB_TARGET_DISTRO_VERSION:" `printenv CNB_TARGET_DISTRO_VERSION`
echo "---> Done"

View File

@ -1,7 +0,0 @@
# Buildpack API version
api = "0.10"
# Buildpack ID and metadata
[buildpack]
id = "hello_world_3"
version = "0.0.3"

View File

@ -1,4 +0,0 @@
[[group]]
api = "0.10"
id = "hello_world"
version = "0.0.1"

View File

@ -1,4 +0,0 @@
[[group]]
api = "0.10"
id = "hello_world_2"
version = "0.0.2"

View File

@ -1,9 +0,0 @@
[[group]]
api = "0.10"
id = "hello_world"
version = "0.0.1"
[[group-extensions]]
api = "0.10"
id = "hello_world"
version = "0.0.1"

View File

@ -1,5 +0,0 @@
[[group]]
api = "0.2wrongapiblabla"
id = "hello_world"
version = "0.0.1"

View File

@ -1,3 +0,0 @@
I am a group.toml file which not correct as context and syntax.
For more info please look at https://github.com/buildpacks/spec/blob/main/platform.md#grouptoml-toml

View File

@ -1,10 +0,0 @@
[[entries]]
[[entries.providers]]
id = "hello_world"
version = "0.0.1"
[[entries.requires]]
name = "different_plan_from_env.toml_reqires_subset_content"
[entries.requires.metadata]
# arbitrary data describing the required dependency

View File

@ -1,10 +0,0 @@
[[entries]]
[[entries.providers]]
id = "hello_world_2"
version = "0.0.2"
[[entries.requires]]
name = "different_plan_from_env.toml_reqires_subset_content"
[entries.requires.metadata]
# arbitrary data describing the required dependency

View File

@ -1,10 +0,0 @@
[[entries]]
[[entries.providers]]
id = "hello_world_3"
version = "0.0.3"
[[entries.requires]]
name = "03_plan.toml_requires_subset_content_idk"
[entries.requires.metadata]
# arbitrary data describing the required dependency

View File

@ -1,11 +0,0 @@
[[entries]]
[[entries.providers]]
id = "hello_world"
version = "0.0.1"
[[entries.requires]]
name = "different_plan_from_env.toml_reqires_subset_content"
[entries.requires.metadata]
# arbitrary data describing the required dependency

View File

@ -1,3 +0,0 @@
I am a plan.toml file which not correct as context and syntax.
For more info please look at https://github.com/buildpacks/spec/blob/main/platform.md#plantoml-toml

View File

@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
echo "---> Hello World buildpack"
# INPUT ARGUMENTS
platform_dir=$2
env_dir=${platform_dir}/env
layers_dir=$1
plan_path=$3
# acceptance test
echo "CNB_BUILDPACK_DIR: ${CNB_BUILDPACK_DIR}"

View File

@ -1,7 +0,0 @@
# Buildpack API version
api = "0.10"
# Buildpack ID and metadata
[buildpack]
id = "hello_world"
version = "0.0.1"

View File

@ -1,9 +0,0 @@
[run-image.target]
id = "my id"
os = "linux"
arch = "amd64"
arch-variant = "some-variant"
[run-image.target.distro]
name = "ubuntu"
version = "some-cute-version"

View File

@ -1,4 +0,0 @@
[[group]]
api = "0.10"
id = "hello_world_3"
version = "0.0.3"

View File

@ -1,6 +0,0 @@
[[entries]]
[[entries.providers]]
id = "hello_world_3"
version = "0.0.3"

View File

@ -1,6 +0,0 @@
[run-image]
[target]
id = "software"
os = "linux"
arch = "amd64"

View File

@ -1,4 +0,0 @@
[[group]]
api = "0.10"
id = "hello_world_2"
version = "0.0.2"

View File

@ -1,6 +0,0 @@
[[entries]]
[[entries.providers]]
id = "hello_world_2"
version = "0.0.2"

View File

@ -1,20 +0,0 @@
FROM ubuntu:bionic
ARG cnb_uid=1234
ARG cnb_gid=1000
ENV CNB_USER_ID=${cnb_uid}
ENV CNB_GROUP_ID=${cnb_gid}
COPY ./container/ /
RUN groupadd cnb --gid ${cnb_gid} && \
useradd --uid ${cnb_uid} --gid ${cnb_gid} -m -s /bin/bash cnb
# chown the directories so the tests do not have to run as root
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layers"
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layout-repo"
WORKDIR /layers
USER ${cnb_uid}:${cnb_gid}

View File

@ -1,106 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
echo "---> Hello World buildpack"
# INPUT ARGUMENTS
platform_dir=$2
env_dir=${platform_dir}/env
layers_dir=$1
plan_path=$3
if test -d /layers/sbom; then
echo "/layers/sbom should not exist during buildpack builds"
exit 1
fi
# LAYERS
echo " layers_dir: ${layers_dir}"
# launch=true layer
mkdir -p ${layers_dir}/some-layer/env
echo -n "some-val" > ${layers_dir}/some-layer/env/SOME_VAR
if test -f ${layers_dir}/some-layer.sbom.cdx.json; then
echo "${layers_dir}/some-layer.sbom.cdx.json restored with content: $(cat ${layers_dir}/some-layer.sbom.cdx.json)"
fi
echo -n "{\"key\": \"some-launch-true-bom-content\"}" > ${layers_dir}/some-layer.sbom.cdx.json
if test -f ${layers_dir}/some-layer.toml; then
# mimic not downloading new content
echo "nop"
else
# mimic downloading new content
sleep 1
fi
cat <<EOF > ${layers_dir}/some-layer.toml
[types]
launch = true
EOF
# cache=true layer
mkdir -p ${layers_dir}/some-cache-layer
if test -f ${layers_dir}/some-cache-layer.sbom.cdx.json; then
echo "${layers_dir}/some-cache-layer.sbom.cdx.json restored with content: $(cat ${layers_dir}/some-cache-layer.sbom.cdx.json)"
fi
echo -n "{\"key\": \"some-cache-true-bom-content\"}" > ${layers_dir}/some-cache-layer.sbom.cdx.json
cat <<EOF > ${layers_dir}/some-cache-layer.toml
[types]
cache = true
EOF
# launch=true cache=true layer
mkdir -p ${layers_dir}/some-launch-cache-layer
if test -f ${layers_dir}/some-launch-cache-layer.sbom.cdx.json; then
echo "${layers_dir}/some-launch-cache-layer.sbom.cdx.json restored with content: $(cat ${layers_dir}/some-launch-cache-layer.sbom.cdx.json)"
fi
echo -n "{\"key\": \"some-launch-true-cache-true-bom-content\"}" > ${layers_dir}/some-launch-cache-layer.sbom.cdx.json
cat <<EOF > ${layers_dir}/some-launch-cache-layer.toml
[types]
launch = true
cache = true
EOF
# build=true layer
mkdir -p ${layers_dir}/some-build-layer
if test -f ${layers_dir}/some-build-layer.sbom.cdx.json; then
echo "${layers_dir}/some-build-layer.sbom.cdx.json" should never be restored
exit 1
fi
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/some-build-layer.sbom.cdx.json
cat <<EOF > ${layers_dir}/some-build-layer.toml
[types]
build = true
EOF
# launch bom
if test -f ${layers_dir}/launch.sbom.cdx.json; then
echo "${layers_dir}/launch.sbom.cdx.json should never be restored"
exit 1
fi
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/launch.sbom.cdx.json
# build bom
if test -f ${layers_dir}/build.sbom.cdx.json; then
echo "${layers_dir}/build.sbom.cdx.json should never be restored"
exit 1
fi
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/build.sbom.cdx.json
# store.toml
if test -f ${layers_dir}/store.toml; then
echo "${layers_dir}/store.toml restored with content: $(cat ${layers_dir}/store.toml)"
fi
printf "[metadata]\n\"some-key\" = \"some-value\"" > ${layers_dir}/store.toml

View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
# 1. GET ARGS
plan_path=$2
exit 0

View File

@ -1,13 +0,0 @@
# Buildpack API version
api = "0.7"
# Buildpack ID and metadata
[buildpack]
id = "samples/hello-world"
version = "0.0.1"
name = "Hello World Buildpack"
sbom-formats = ["application/vnd.cyclonedx+json"]
# Stacks that the buildpack will work with
[[stacks]]
id = "*"

View File

@ -1,8 +0,0 @@
# Buildpack API version
api = "0.9"
# Extension ID and metadata
[extension]
id = "samples/hello-world"
version = "0.0.1"
name = "Hello World Extension"

View File

@ -1,9 +0,0 @@
[[order]]
[[order.group]]
id = "samples/hello-world"
version = "0.0.1"
[[order-extensions]]
[[order-extensions.group]]
id = "samples/hello-world"
version = "0.0.1"

View File

@ -1,4 +0,0 @@
[[order]]
[[order.group]]
id = "samples/hello-world"
version = "0.0.1"

View File

@ -1,5 +0,0 @@
[[images]]
image = "some-run-image-from-run-toml"
[[images]]
image = "some-other-run-image"

View File

@ -1 +0,0 @@
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}

View File

@ -1,16 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1457,
"digest": "sha256:9d5226e6ce3fb6aee2822206a5ef85f38c303d2b37bfc894b419fca2c0501269"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 772999,
"digest": "sha256:405fecb6a2fa4f29683f977e7e3b852bf6f8975a2aba647d234d2371894943da"
}
]
}

View File

@ -1,14 +0,0 @@
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 527,
"digest": "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
]
}

View File

@ -1,3 +0,0 @@
{
"imageLayoutVersion": "1.0.0"
}

View File

@ -1,4 +1,4 @@
FROM ubuntu:jammy FROM ubuntu:bionic
ARG cnb_uid=1234 ARG cnb_uid=1234
ARG cnb_gid=1000 ARG cnb_gid=1000

View File

@ -1 +0,0 @@
val-from-build-config

View File

@ -1,5 +0,0 @@
api = "0.1"
[buildpack]
id = "bad_api"
version = "bad_api_version"
name = "Bad API Buildpack"

Some files were not shown because too many files have changed in this diff Show More