Compare commits
No commits in common. "main" and "v0.15.1" have entirely different histories.
|
@ -2,14 +2,12 @@
|
|||
name: Bug
|
||||
about: Bug report
|
||||
title: ''
|
||||
labels: type/bug, status/triage
|
||||
labels: status/triage, type/bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Summary
|
||||
<!-- Please provide a general summary of the issue. -->
|
||||
|
||||
<!--- Please provide a general summary of the issue. -->
|
||||
|
||||
|
||||
---
|
||||
|
@ -17,20 +15,17 @@ assignees: ''
|
|||
### Reproduction
|
||||
|
||||
##### Steps
|
||||
<!-- What steps should be taken to reproduce the issue? -->
|
||||
<!--- What steps should be taken to reproduce the issue? -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
##### Current behavior
|
||||
<!-- What happened? Logs, etc. could go here. -->
|
||||
|
||||
|
||||
|
||||
##### Expected behavior
|
||||
<!-- What did you expect to happen? -->
|
||||
<!--- What happened? Logs, etc. could go here. -->
|
||||
|
||||
##### Expected
|
||||
<!--- What did you expect to happen? -->
|
||||
|
||||
|
||||
---
|
||||
|
@ -38,15 +33,10 @@ assignees: ''
|
|||
### Context
|
||||
|
||||
##### lifecycle version
|
||||
<!-- If you can find this, it helps us pin down the issue. For example, run `pack builder inspect <builder name>` which should report the lifecycle version in question. -->
|
||||
|
||||
|
||||
<!--- If you can find this, it helps us pin down the issue. For example, run `pack inspect-builder BUILDER` which should report the lifecycle version in question. -->
|
||||
|
||||
##### platform version(s)
|
||||
<!-- For example run `pack report` and `docker info` and copy output here, redacting any sensitive information. -->
|
||||
|
||||
|
||||
<!--- For example run `pack report` and `docker info` and copy output here. -->
|
||||
|
||||
##### anything else?
|
||||
<!-- Add any other context that may help (e.g., Tekton task version, kpack version, etc.). -->
|
||||
|
||||
<!--- Tekton task version, kpack version, etc. -->
|
||||
|
|
|
@ -7,20 +7,11 @@ assignees: ''
|
|||
|
||||
---
|
||||
|
||||
### Summary
|
||||
<!-- Please describe why this chore matters, who will enjoy it and how. -->
|
||||
### Description
|
||||
<!-- A concise description of why this chore matters, who will enjoy it and how. -->
|
||||
|
||||
### Proposed solution
|
||||
<!-- A clear and concise description of how you think the chore should be implemented. -->
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Proposal
|
||||
<!-- How do you think the chore should be implemented? -->
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Context
|
||||
<!-- Add any other context that may help. -->
|
||||
|
||||
### Additional context
|
||||
<!-- Add any other context or screenshots about the chore that may help. -->
|
||||
|
|
|
@ -7,27 +7,14 @@ assignees: ''
|
|||
|
||||
---
|
||||
|
||||
### Summary
|
||||
<!-- Please describe the feature and why it matters. -->
|
||||
### Description
|
||||
<!-- A concise description of what problem the feature solves and why solving it matters. -->
|
||||
|
||||
### Proposed solution
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
### Describe alternatives you've considered
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
---
|
||||
|
||||
### Proposal
|
||||
<!-- How do you think the feature should be implemented? -->
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Related
|
||||
<!-- If this feature addresses an RFC, please provide the RFC number below. -->
|
||||
|
||||
RFC #___
|
||||
|
||||
---
|
||||
|
||||
### Context
|
||||
<!-- Add any other context that may help. -->
|
||||
|
||||
### Additional context
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
|
|
|
@ -3,14 +3,5 @@ updates:
|
|||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
# Group all minor/patch go dependencies into a single PR.
|
||||
go-dependencies:
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
<!-- 🎉🎉🎉 Thank you for the PR!!! 🎉🎉🎉 -->
|
||||
|
||||
### Summary
|
||||
<!-- Please describe your changes at a high level. -->
|
||||
|
||||
|
||||
|
||||
#### Release notes
|
||||
<!-- Please provide 1-2 sentences for release notes. -->
|
||||
<!-- Example: When using platform API `0.7` or greater, the `creator` logs the expected phase header for the analyze phase -->
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Related
|
||||
<!-- If this PR addresses an issue, please provide the issue number below. -->
|
||||
|
||||
Resolves #___
|
||||
|
||||
---
|
||||
|
||||
### Context
|
||||
<!-- Add any other context that may help reviewers (e.g., code that requires special attention, etc.). -->
|
||||
|
|
@ -14,14 +14,11 @@ jobs:
|
|||
test-linux-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
check-latest: true
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.18'
|
||||
- name: Install jq
|
||||
run: |
|
||||
mkdir -p deps/bin
|
||||
|
@ -33,9 +30,8 @@ jobs:
|
|||
TEST_COVERAGE: 1
|
||||
run: make test
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
file: ./out/tests/coverage-unit.txt
|
||||
flags: unit,os_linux
|
||||
fail_ci_if_error: true
|
||||
|
@ -43,47 +39,91 @@ jobs:
|
|||
test-linux-arm64:
|
||||
runs-on: linux-arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
check-latest: true
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.18'
|
||||
- name: Test
|
||||
run: |
|
||||
make format || true
|
||||
make test
|
||||
test-windows:
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- name: Set git to use LF and symlinks
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
git config --global core.symlinks true
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- name: Add runner IP to daemon insecure-registries and firewall
|
||||
shell: powershell
|
||||
run: |
|
||||
# Get IP from default gateway interface
|
||||
$IPAddress=(Get-NetIPAddress -InterfaceAlias ((Get-NetRoute "0.0.0.0/0").InterfaceAlias) -AddressFamily IPv4)[0].IPAddress
|
||||
|
||||
# Allow container-to-host registry traffic (from public interface, to the same interface)
|
||||
New-NetfirewallRule -DisplayName test-registry -LocalAddress $IPAddress -RemoteAddress $IPAddress
|
||||
|
||||
# create or update daemon config to allow host as insecure-registry
|
||||
$config=@{}
|
||||
if (Test-Path C:\ProgramData\docker\config\daemon.json) {
|
||||
$config=(Get-Content C:\ProgramData\docker\config\daemon.json | ConvertFrom-json)
|
||||
}
|
||||
$config | Add-Member -Force -Name "insecure-registries" -value @("$IPAddress/32") -MemberType NoteProperty
|
||||
ConvertTo-json $config | Out-File -Encoding ASCII C:\ProgramData\docker\config\daemon.json
|
||||
|
||||
Restart-Service docker
|
||||
|
||||
# dump docker info for auditing
|
||||
docker version
|
||||
docker info
|
||||
- name: Test
|
||||
env:
|
||||
TEST_COVERAGE: 1
|
||||
run: |
|
||||
make test
|
||||
- name: Prepare Codecov
|
||||
uses: crazy-max/ghaction-chocolatey@v2
|
||||
with:
|
||||
args: install codecov -y
|
||||
- name: Run Codecov
|
||||
run: |
|
||||
codecov.exe -f .\out\tests\coverage-unit.txt -v --flag os_windows
|
||||
build-and-publish:
|
||||
needs:
|
||||
- test-linux-amd64
|
||||
- test-linux-arm64
|
||||
- test-windows
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # fetch all history for all branches and tags
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
check-latest: true
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.18'
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3
|
||||
uses: sigstore/cosign-installer@v1.0.0
|
||||
with:
|
||||
cosign-release: 'v1.0.0'
|
||||
- name: Set version
|
||||
run: |
|
||||
echo "LIFECYCLE_VERSION=$(go run tools/version/main.go)" | tee -a $GITHUB_ENV version.txt
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: version
|
||||
path: version.txt
|
||||
- name: Set tag
|
||||
run: |
|
||||
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> tag.txt
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: tag
|
||||
path: tag.txt
|
||||
|
@ -92,65 +132,71 @@ jobs:
|
|||
make clean
|
||||
make build
|
||||
make package
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-x86-64
|
||||
path: out/lifecycle-v*+linux.x86-64.tgz
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-x86-64-sha256
|
||||
path: out/lifecycle-v*+linux.x86-64.tgz.sha256
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-arm64
|
||||
path: out/lifecycle-v*+linux.arm64.tgz
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-arm64-sha256
|
||||
path: out/lifecycle-v*+linux.arm64.tgz.sha256
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-ppc64le
|
||||
path: out/lifecycle-v*+linux.ppc64le.tgz
|
||||
- uses: actions/upload-artifact@v4
|
||||
name: lifecycle-windows-x86-64
|
||||
path: out/lifecycle-v*+windows.x86-64.tgz
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-ppc64le-sha256
|
||||
path: out/lifecycle-v*+linux.ppc64le.tgz.sha256
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: lifecycle-linux-s390x
|
||||
path: out/lifecycle-v*+linux.s390x.tgz
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: lifecycle-linux-s390x-sha256
|
||||
path: out/lifecycle-v*+linux.s390x.tgz.sha256
|
||||
name: lifecycle-windows-x86-64-sha256
|
||||
path: out/lifecycle-v*+windows.x86-64.tgz.sha256
|
||||
- name: Generate SBOM JSON
|
||||
uses: CycloneDX/gh-gomod-generate-sbom@v2
|
||||
uses: CycloneDX/gh-gomod-generate-sbom@v1
|
||||
with:
|
||||
args: mod -licenses -json -output lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json
|
||||
version: ^v1
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-bom-cdx
|
||||
path: lifecycle-v*-bom.cdx.json
|
||||
- name: Calculate SBOM sha
|
||||
run: |
|
||||
shasum -a 256 lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json > lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json.sha256
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-bom-cdx-sha256
|
||||
path: lifecycle-v*-bom.cdx.json.sha256
|
||||
- uses: azure/docker-login@v2
|
||||
- uses: azure/docker-login@v1
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: tag
|
||||
- name: Set env
|
||||
run: |
|
||||
cat tag.txt >> $GITHUB_ENV
|
||||
- name: Rename cosign public key
|
||||
run: |
|
||||
cp cosign.pub lifecycle-v${{ env.LIFECYCLE_VERSION }}-cosign.pub
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-cosign-public-key
|
||||
path: lifecycle-v${{ env.LIFECYCLE_VERSION }}-cosign.pub
|
||||
- name: Calculate cosign sha
|
||||
run: |
|
||||
shasum -a 256 lifecycle-v${{ env.LIFECYCLE_VERSION }}-cosign.pub > lifecycle-v${{ env.LIFECYCLE_VERSION }}-cosign.pub.sha256
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lifecycle-cosign-public-key-sha256
|
||||
path: lifecycle-v${{ env.LIFECYCLE_VERSION }}-cosign.pub.sha256
|
||||
- name: Publish images
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
|
@ -163,32 +209,25 @@ jobs:
|
|||
LINUX_ARM64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.arm64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64 -arch arm64 | awk '{print $NF}')
|
||||
echo "LINUX_ARM64_SHA: $LINUX_ARM64_SHA"
|
||||
|
||||
LINUX_PPC64LE_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.ppc64le.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le -arch ppc64le | awk '{print $NF}')
|
||||
echo "LINUX_PPC64LE_SHA: LINUX_PPC64LE_SHA"
|
||||
|
||||
LINUX_S390X_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.s390x.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x -arch s390x | awk '{print $NF}')
|
||||
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
|
||||
WINDOWS_AMD64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+windows.x86-64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-windows -os windows | awk '{print $NF}')
|
||||
echo "WINDOWS_AMD64_SHA: $WINDOWS_AMD64_SHA"
|
||||
|
||||
docker manifest create buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} \
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64@${LINUX_AMD64_SHA} \
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64@${LINUX_ARM64_SHA} \
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le@${LINUX_PPC64LE_SHA} \
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x@${LINUX_S390X_SHA}
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-windows@${WINDOWS_AMD64_SHA}
|
||||
|
||||
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG})
|
||||
echo "MANIFEST_SHA: $MANIFEST_SHA"
|
||||
|
||||
cosign sign -r -y \
|
||||
COSIGN_PASSWORD=${{ secrets.COSIGN_PASSWORD }} cosign sign -r \
|
||||
-key <(echo -n "${{ secrets.COSIGN_PRIVATE_KEY }}" | base64 --decode) \
|
||||
-a tag=${LIFECYCLE_IMAGE_TAG} \
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}@${MANIFEST_SHA}
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
-a tag=${LIFECYCLE_IMAGE_TAG} \
|
||||
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}
|
||||
cosign verify -key cosign.pub -a tag=${LIFECYCLE_IMAGE_TAG} buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}
|
||||
- name: Scan image
|
||||
if: github.event_name == 'push'
|
||||
uses: anchore/scan-action@v6
|
||||
uses: anchore/scan-action@v3
|
||||
with:
|
||||
image: buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}
|
||||
pack-acceptance-linux:
|
||||
|
@ -196,27 +235,27 @@ jobs:
|
|||
needs: build-and-publish
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: 'buildpacks/pack'
|
||||
path: 'pack'
|
||||
ref: 'main'
|
||||
fetch-depth: 0 # fetch all history for all branches and tags
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'pack/go.mod'
|
||||
- uses: actions/download-artifact@v5
|
||||
go-version: '1.18'
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: version
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: tag
|
||||
- name: Set env
|
||||
run: |
|
||||
cat version.txt >> $GITHUB_ENV
|
||||
cat tag.txt >> $GITHUB_ENV
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: lifecycle-linux-x86-64
|
||||
path: pack
|
||||
|
@ -227,3 +266,70 @@ jobs:
|
|||
LIFECYCLE_PATH="../lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz" \
|
||||
LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}" \
|
||||
make acceptance
|
||||
pack-acceptance-windows:
|
||||
if: github.event_name == 'push'
|
||||
needs: build-and-publish
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: 'buildpacks/pack'
|
||||
path: 'pack'
|
||||
ref: 'main'
|
||||
fetch-depth: 0 # fetch all history for all branches and tags
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- name: Add runner IP to daemon insecure-registries and firewall
|
||||
shell: powershell
|
||||
run: |
|
||||
# Get IP from default gateway interface
|
||||
$IPAddress=(Get-NetIPAddress -InterfaceAlias ((Get-NetRoute "0.0.0.0/0").InterfaceAlias) -AddressFamily IPv4)[0].IPAddress
|
||||
|
||||
# Allow container-to-host registry traffic (from public interface, to the same interface)
|
||||
New-NetfirewallRule -DisplayName test-registry -LocalAddress $IPAddress -RemoteAddress $IPAddress
|
||||
|
||||
# create or update daemon config to allow host as insecure-registry
|
||||
$config=@{}
|
||||
if (Test-Path C:\ProgramData\docker\config\daemon.json) {
|
||||
$config=(Get-Content C:\ProgramData\docker\config\daemon.json | ConvertFrom-json)
|
||||
}
|
||||
$config | Add-Member -Force -Name "insecure-registries" -value @("$IPAddress/32") -MemberType NoteProperty
|
||||
ConvertTo-json $config | Out-File -Encoding ASCII C:\ProgramData\docker\config\daemon.json
|
||||
|
||||
Restart-Service docker
|
||||
|
||||
# dump docker info for auditing
|
||||
docker version
|
||||
docker info
|
||||
- name: Modify etc\hosts to include runner IP
|
||||
shell: powershell
|
||||
run: |
|
||||
$IPAddress=(Get-NetIPAddress -InterfaceAlias ((Get-NetRoute "0.0.0.0/0").InterfaceAlias) -AddressFamily IPv4)[0].IPAddress
|
||||
"# Modified by CNB: https://github.com/buildpacks/ci/tree/main/gh-runners/windows
|
||||
${IPAddress} host.docker.internal
|
||||
${IPAddress} gateway.docker.internal
|
||||
" | Out-File -Filepath C:\Windows\System32\drivers\etc\hosts -Encoding utf8
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: version
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: tag
|
||||
- name: Set env
|
||||
run: |
|
||||
cat version.txt >> $env:GITHUB_ENV
|
||||
cat tag.txt >> $env:GITHUB_ENV
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: lifecycle-windows-x86-64
|
||||
path: pack
|
||||
- name: Run pack acceptance
|
||||
run: |
|
||||
cd pack
|
||||
git checkout $(git describe --abbrev=0 --tags) # check out the latest tag
|
||||
$env:LIFECYCLE_PATH="..\lifecycle-v${{ env.LIFECYCLE_VERSION }}+windows.x86-64.tgz"
|
||||
$env:LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}"
|
||||
make acceptance
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: check-latest-release
|
||||
name: Check latest lifecycle release
|
||||
|
||||
on:
|
||||
schedule:
|
||||
|
@ -9,29 +9,13 @@ jobs:
|
|||
check-release:
|
||||
runs-on:
|
||||
- ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
check-latest: true
|
||||
go-version-file: 'go.mod'
|
||||
- name: Get previous release tag
|
||||
id: get-previous-release-tag
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||
result-encoding: string
|
||||
script: |
|
||||
return github.rest.repos.getLatestRelease({
|
||||
owner: "buildpacks",
|
||||
repo: "lifecycle",
|
||||
}).then(result => {
|
||||
return result.data.tag_name
|
||||
})
|
||||
- name: Read go and release versions
|
||||
id: read-versions
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- name: Read go versions
|
||||
id: read-go
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
|
@ -41,19 +25,19 @@ jobs:
|
|||
|
||||
LATEST_GO_VERSION=$(go version | cut -d ' ' -f 3)
|
||||
|
||||
LATEST_RELEASE_VERSION=${{ steps.get-previous-release-tag.outputs.result }}
|
||||
LATEST_RELEASE_VERSION=$(gh release list -L 1 | cut -d $'\t' -f 1 | cut -d ' ' -f 2)
|
||||
|
||||
wget https://github.com/buildpacks/lifecycle/releases/download/$LATEST_RELEASE_VERSION/lifecycle-$LATEST_RELEASE_VERSION+linux.x86-64.tgz -O lifecycle.tgz
|
||||
tar xzf lifecycle.tgz
|
||||
LATEST_RELEASE_GO_VERSION=$(go version ./lifecycle/lifecycle | cut -d ' ' -f 2)
|
||||
|
||||
echo "latest-go-version=${LATEST_GO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "latest-release-go-version=${LATEST_RELEASE_GO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "::set-output name=latest-go-version::${LATEST_GO_VERSION}"
|
||||
echo "::set-output name=latest-release-go-version::${LATEST_RELEASE_GO_VERSION}"
|
||||
|
||||
LATEST_RELEASE_VERSION=$(echo $LATEST_RELEASE_VERSION | cut -d \v -f 2)
|
||||
echo "latest-release-version=${LATEST_RELEASE_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "::set-output name=latest-release-version::${LATEST_RELEASE_VERSION}"
|
||||
- name: Create issue if needed
|
||||
if: ${{ steps.read-versions.outputs.latest-go-version != steps.read-versions.outputs.latest-release-go-version }}
|
||||
if: ${{ steps.read-go.outputs.latest-go-version != steps.read-go.outputs.latest-release-go-version }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
|
@ -61,15 +45,15 @@ jobs:
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
title="Upgrade lifecycle to ${{ steps.read-versions.outputs.latest-go-version }}"
|
||||
label=${{ steps.read-versions.outputs.latest-go-version }}
|
||||
title="Upgrade lifecycle to ${{ steps.read-go.outputs.latest-go-version }}"
|
||||
label=${{ steps.read-go.outputs.latest-go-version }}
|
||||
|
||||
# Create label to use for exact search
|
||||
gh label create "$label" || true
|
||||
|
||||
search_output=$(gh issue list --search "$title" --label "$label")
|
||||
|
||||
body="Latest lifecycle release v${{ steps.read-versions.outputs.latest-release-version }} is built with Go version ${{ steps.read-versions.outputs.latest-release-go-version }}; newer version ${{ steps.read-versions.outputs.latest-go-version }} is available."
|
||||
body="Latest lifecycle release v${{ steps.read-go.outputs.latest-release-version }} is built with Go version ${{ steps.read-go.outputs.latest-release-go-version }}; newer version ${{ steps.read-go.outputs.latest-go-version }} is available."
|
||||
|
||||
if [ -z "${search_output// }" ]
|
||||
then
|
||||
|
@ -86,12 +70,9 @@ jobs:
|
|||
fi
|
||||
- name: Scan latest release image
|
||||
id: scan-image
|
||||
uses: anchore/scan-action@v6
|
||||
uses: anchore/scan-action@v3
|
||||
with:
|
||||
image: buildpacksio/lifecycle:${{ steps.read-versions.outputs.latest-release-version }}
|
||||
fail-build: true
|
||||
severity-cutoff: medium
|
||||
output-format: json
|
||||
image: buildpacksio/lifecycle:${{ steps.read-go.outputs.latest-release-version }}
|
||||
- name: Create issue if needed
|
||||
if: failure() && steps.scan-image.outcome == 'failure'
|
||||
env:
|
||||
|
@ -101,7 +82,7 @@ jobs:
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
title="CVE(s) found in v${{ steps.read-versions.outputs.latest-release-version }}"
|
||||
title="CVE(s) found"
|
||||
label=cve
|
||||
|
||||
# Create label to use for exact search
|
||||
|
@ -110,7 +91,7 @@ jobs:
|
|||
search_output=$(gh issue list --search "$title" --label "$label")
|
||||
|
||||
GITHUB_WORKFLOW_URL=https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID
|
||||
body="Latest lifecycle release v${{ steps.read-versions.outputs.latest-release-version }} triggered CVE(s) from Grype. For further details, see: $GITHUB_WORKFLOW_URL json: $(cat ${{ steps.scan-image.outputs.json }} | jq '.matches[] | .vulnerability | {id, severity, description}' )"
|
||||
body="Latest lifecycle release v${{ steps.read-go.outputs.latest-release-version }} triggered CVE(s) from Grype. For further details, see: $GITHUB_WORKFLOW_URL"
|
||||
|
||||
if [ -z "${search_output// }" ]
|
||||
then
|
||||
|
|
|
@ -6,10 +6,8 @@ on:
|
|||
jobs:
|
||||
draft-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install jq
|
||||
run: |
|
||||
mkdir -p deps/bin
|
||||
|
@ -24,10 +22,9 @@ jobs:
|
|||
exit 1
|
||||
fi
|
||||
echo "LIFECYCLE_VERSION=$version" >> $GITHUB_ENV
|
||||
- name: Determine download urls for linux-x86-64, linux-arm64, linux-ppc64le, linux-s390x
|
||||
- name: Determine download urls for linux-x86-64, linux-arm64 and windows
|
||||
id: artifact-urls
|
||||
# FIXME: this script should be updated to work with actions/github-script@v6
|
||||
uses: actions/github-script@v3
|
||||
uses: actions/github-script@v3.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
@ -83,10 +80,7 @@ jobs:
|
|||
throw "no artifacts found"
|
||||
}
|
||||
if (urlList.length != 10) {
|
||||
// found too many artifacts
|
||||
// list them and throw
|
||||
console.log(urlList);
|
||||
throw "there should be exactly 10 artifacts, found " + urlList.length + " artifacts"
|
||||
throw "there should be exactly ten artifacts"
|
||||
}
|
||||
return urlList.join(",")
|
||||
})
|
||||
|
@ -110,80 +104,34 @@ jobs:
|
|||
cat *.sha256 | sort > lifecycle-v${{ env.LIFECYCLE_VERSION }}-checksums.txt
|
||||
rm *.sha256
|
||||
- name: Set pre-release kind
|
||||
if: "contains(env.LIFECYCLE_VERSION, 'rc') || contains(env.LIFECYCLE_VERSION, 'pre')" # e.g., 0.99.0-rc.1
|
||||
if: contains(env.LIFECYCLE_VERSION, 'rc') # e.g., 0.99.0-rc.1
|
||||
run: |
|
||||
echo "RELEASE_KIND=pre-release" >> $GITHUB_ENV
|
||||
- name: Set release kind
|
||||
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
|
||||
if: "!contains(env.LIFECYCLE_VERSION, 'rc')"
|
||||
run: |
|
||||
echo "RELEASE_KIND=release" >> $GITHUB_ENV
|
||||
- name: Get previous release tag
|
||||
id: get-previous-release-tag
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||
result-encoding: string
|
||||
script: |
|
||||
return github.rest.repos.getLatestRelease({
|
||||
owner: "buildpacks",
|
||||
repo: "lifecycle",
|
||||
}).then(result => {
|
||||
return result.data.tag_name
|
||||
})
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
check-latest: true
|
||||
go-version-file: 'go.mod'
|
||||
- name: Get go version
|
||||
id: get-go-version
|
||||
run: |
|
||||
mkdir tmp
|
||||
tar xzvf ${{ env.ARTIFACTS_PATH }}/lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz -C tmp/
|
||||
echo "GO_VERSION=$(go version tmp/lifecycle/lifecycle | cut -d ' ' -f 2 | sed -e 's/^go//')" >> $GITHUB_ENV
|
||||
- name: Set release body text
|
||||
run: |
|
||||
cat << EOF > body.txt
|
||||
# lifecycle v${{ env.LIFECYCLE_VERSION }}
|
||||
|
||||
Welcome to v${{ env.LIFECYCLE_VERSION }}, a ${{ env.RELEASE_KIND }} of the Cloud Native Buildpacks Lifecycle.
|
||||
Welcome to v${{ env.LIFECYCLE_VERSION }}, a **beta** ${{ env.RELEASE_KIND }} of the Cloud Native Buildpacks Lifecycle.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The lifecycle runs as a normal user in a series of unprivileged containers. To export images and cache image layers, it requires access to a Docker (compatible) daemon **or** an OCI registry.
|
||||
The lifecycle runs as a normal user in a series of unprivileged containers. To export images and cache image layers, it requires access to a Docker daemon **or** Docker registry.
|
||||
|
||||
## Install
|
||||
|
||||
Extract the .tgz file and copy the lifecycle binaries into a [build image](https://github.com/buildpacks/spec/blob/main/platform.md#build-image). The build image can then be orchestrated by a platform implementation such as the [pack CLI](https://github.com/buildpack/pack) or [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks).
|
||||
Extract the .tgz file and copy the lifecycle binaries into a [build stack base image](https://github.com/buildpack/spec/blob/master/platform.md#stacks). The build image can then be orchestrated by a platform implementation such as the [pack CLI](https://github.com/buildpack/pack) or [tekton](https://github.com/tektoncd/catalog/blob/master/task/buildpacks/0.1/README.md).
|
||||
|
||||
## Lifecycle Image
|
||||
|
||||
An OCI image containing the lifecycle binaries is available at buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}.
|
||||
|
||||
## Features
|
||||
|
||||
* TODO
|
||||
* Updates go to version ${{ env.GO_VERSION }}
|
||||
|
||||
## Bugfixes
|
||||
|
||||
* TODO
|
||||
|
||||
## Chores
|
||||
|
||||
* TODO
|
||||
|
||||
**Full Changelog**: https://github.com/buildpacks/lifecycle/compare/${{ steps.get-previous-release-tag.outputs.result }}...release/${{ env.LIFECYCLE_VERSION }}
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to acknowledge that this release wouldn't be as good without the help of the following amazing contributors:
|
||||
|
||||
TODO
|
||||
|
||||
EOF
|
||||
- name: Create pre-release
|
||||
if: "contains(env.LIFECYCLE_VERSION, 'rc') || contains(env.LIFECYCLE_VERSION, 'pre')" # e.g., 0.99.0-rc.1
|
||||
- name: Create Pre Release
|
||||
if: contains(env.LIFECYCLE_VERSION, 'rc') # e.g., 0.99.0-rc.1
|
||||
run: |
|
||||
cd ${{ env.ARTIFACTS_PATH }}
|
||||
gh release create v${{ env.LIFECYCLE_VERSION }} \
|
||||
|
@ -191,19 +139,19 @@ jobs:
|
|||
--draft \
|
||||
--notes-file ../body.txt \
|
||||
--prerelease \
|
||||
--target $GITHUB_REF_NAME \
|
||||
--target $GITHUB_REF \
|
||||
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create release
|
||||
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
|
||||
- name: Create Release
|
||||
if: "!contains(env.LIFECYCLE_VERSION, 'rc')"
|
||||
run: |
|
||||
cd ${{ env.ARTIFACTS_PATH }}
|
||||
gh release create v${{ env.LIFECYCLE_VERSION }} \
|
||||
$(ls | sort | paste -sd " " -) \
|
||||
--draft \
|
||||
--notes-file ../body.txt \
|
||||
--target $GITHUB_REF_NAME \
|
||||
--target $GITHUB_REF \
|
||||
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
@ -8,21 +8,20 @@ on:
|
|||
jobs:
|
||||
retag-lifecycle-images:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
check-latest: true
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.18'
|
||||
- name: Install crane
|
||||
run: |
|
||||
go install github.com/google/go-containerregistry/cmd/crane@latest
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@v3
|
||||
- uses: azure/docker-login@v2
|
||||
uses: sigstore/cosign-installer@main
|
||||
with:
|
||||
cosign-release: 'v1.2.0'
|
||||
- uses: azure/docker-login@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
@ -32,22 +31,17 @@ jobs:
|
|||
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> $GITHUB_ENV
|
||||
- name: Verify lifecycle images
|
||||
run: |
|
||||
LINUX_AMD64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64 | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
LINUX_AMD64_SHA=$(cosign verify -key cosign.pub buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64 | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
echo "LINUX_AMD64_SHA: $LINUX_AMD64_SHA"
|
||||
echo "LINUX_AMD64_SHA=$LINUX_AMD64_SHA" >> $GITHUB_ENV
|
||||
|
||||
LINUX_ARM64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64 | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
LINUX_ARM64_SHA=$(cosign verify -key cosign.pub buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64 | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
echo "LINUX_ARM64_SHA: $LINUX_ARM64_SHA"
|
||||
echo "LINUX_ARM64_SHA=$LINUX_ARM64_SHA" >> $GITHUB_ENV
|
||||
|
||||
LINUX_PPC64LE_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
echo "LINUX_PPC64LE_SHA: $LINUX_PPC64LE_SHA"
|
||||
echo "LINUX_PPC64LE_SHA=$LINUX_PPC64LE_SHA" >> $GITHUB_ENV
|
||||
|
||||
LINUX_S390X_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
|
||||
echo "LINUX_S390X_SHA=$LINUX_S390X_SHA" >> $GITHUB_ENV
|
||||
|
||||
WINDOWS_AMD64_SHA=$(cosign verify -key cosign.pub buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||
echo "WINDOWS_AMD64_SHA: $WINDOWS_AMD64_SHA"
|
||||
echo "WINDOWS_AMD64_SHA=$WINDOWS_AMD64_SHA" >> $GITHUB_ENV
|
||||
- name: Download SBOM
|
||||
run: |
|
||||
gh release download --pattern '*-bom.cdx.json' ${{ github.event.release.tag_name }}
|
||||
|
@ -59,70 +53,54 @@ jobs:
|
|||
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-x86-64
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-arm64
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-ppc64le
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-s390x
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows@${{ env.WINDOWS_AMD64_SHA }} ${{ env.LIFECYCLE_VERSION }}-windows
|
||||
|
||||
docker manifest create buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }} \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-s390x@${{ env.LINUX_S390X_SHA }}
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-windows@${{ env.WINDOWS_AMD64_SHA }}
|
||||
|
||||
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }})
|
||||
echo "MANIFEST_SHA: $MANIFEST_SHA"
|
||||
|
||||
cosign sign -r -y \
|
||||
COSIGN_PASSWORD=${{ secrets.COSIGN_PASSWORD }} cosign sign -r \
|
||||
-key <(echo -n "${{ secrets.COSIGN_PRIVATE_KEY }}" | base64 --decode) \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||
cosign verify -key cosign.pub -a tag=${{ env.LIFECYCLE_VERSION }} buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||
|
||||
cosign attach sbom --sbom ./*-bom.cdx.json --type cyclonedx buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||
cosign sign -r -y \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||
cosign attach sbom -sbom ./*-bom.cdx.json -type cyclonedx buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||
COSIGN_PASSWORD=${{ secrets.COSIGN_PASSWORD }} cosign sign -r \
|
||||
-key <(echo -n "${{ secrets.COSIGN_PRIVATE_KEY }}" | base64 --decode) \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} -attachment sbom \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||
cosign verify -key cosign.pub -a tag=${{ env.LIFECYCLE_VERSION }} -attachment sbom buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
|
||||
- name: Retag lifecycle images & create manifest list - latest
|
||||
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
|
||||
if: "!contains(env.LIFECYCLE_VERSION, 'rc')"
|
||||
run: |
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} latest-linux-x86-64
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} latest-linux-arm64
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} latest-linux-ppc64le
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} latest-linux-s390x
|
||||
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows@${{ env.WINDOWS_AMD64_SHA }} latest-windows
|
||||
|
||||
docker manifest create buildpacksio/lifecycle:latest \
|
||||
buildpacksio/lifecycle:latest-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
|
||||
buildpacksio/lifecycle:latest-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
|
||||
buildpacksio/lifecycle:latest-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
|
||||
buildpacksio/lifecycle:latest-linux-s390x@${{ env.LINUX_S390X_SHA }}
|
||||
buildpacksio/lifecycle:latest-windows@${{ env.WINDOWS_AMD64_SHA }}
|
||||
|
||||
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:latest)
|
||||
echo "MANIFEST_SHA: $MANIFEST_SHA"
|
||||
|
||||
cosign sign -r -y \
|
||||
COSIGN_PASSWORD=${{ secrets.COSIGN_PASSWORD }} cosign sign -r \
|
||||
-key <(echo -n "${{ secrets.COSIGN_PRIVATE_KEY }}" | base64 --decode) \
|
||||
-a tag=latest \
|
||||
buildpacksio/lifecycle:latest@${MANIFEST_SHA}
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
-a tag=latest \
|
||||
buildpacksio/lifecycle:latest
|
||||
cosign verify -key cosign.pub -a tag=latest buildpacksio/lifecycle:latest
|
||||
|
||||
cosign attach sbom --sbom ./*-bom.cdx.json --type cyclonedx buildpacksio/lifecycle:latest
|
||||
cosign sign -r -y \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||
cosign attach sbom -sbom ./*-bom.cdx.json -type cyclonedx buildpacksio/lifecycle:latest
|
||||
COSIGN_PASSWORD=${{ secrets.COSIGN_PASSWORD }} cosign sign -r \
|
||||
-key <(echo -n "${{ secrets.COSIGN_PRIVATE_KEY }}" | base64 --decode) \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} -attachment sbom \
|
||||
buildpacksio/lifecycle:latest@${MANIFEST_SHA}
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||
buildpacksio/lifecycle:latest
|
||||
cosign verify -key cosign.pub -a tag=${{ env.LIFECYCLE_VERSION }} -attachment sbom buildpacksio/lifecycle:latest@${MANIFEST_SHA}
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
name: test-s390x
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'release/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'release/**'
|
||||
|
||||
jobs:
|
||||
test-linux-s390x:
|
||||
if: (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release*')
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ZVSI_FP_NAME: bp-floating-ci-${{ github.run_id }}
|
||||
ZVSI_INSTANCE_NAME: bp-zvsi-ci-${{ github.run_id }}
|
||||
ZVSI_ZONE_NAME: ca-tor-1
|
||||
ZVSI_PROFILE_NAME: bz2-4x16
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: install ibmcli and setup ibm login
|
||||
run: |
|
||||
curl -fsSL https://clis.cloud.ibm.com/install/linux | sh
|
||||
ibmcloud login -q --apikey ${{ secrets.IBMCLOUD_API_KEY }} -r ca-tor
|
||||
ibmcloud plugin install vpc-infrastructure
|
||||
- name: Creation of ZVSI
|
||||
id: ZVSI
|
||||
run: |
|
||||
#creation of zvsi
|
||||
ibmcloud is instance-create $ZVSI_INSTANCE_NAME ${{ secrets.ZVSI_VPC }} $ZVSI_ZONE_NAME $ZVSI_PROFILE_NAME ${{ secrets.ZVSI_SUBNET }} --image ${{ secrets.ZVSI_IMAGE }} --keys ${{ secrets.ZVSI_KEY }} --resource-group-id ${{ secrets.ZVSI_RG_ID }} --primary-network-interface "{\"name\":\"eth0\",\"allow_ip_spoofing\":false,\"subnet\": {\"name\":\"${{ secrets.ZVSI_SUBNET }}\"},\"security_groups\":[{\"id\":\"${{ secrets.ZVSI_SG }}\"}]}"
|
||||
#Reserving a floating ip to the ZVSI
|
||||
ibmcloud is floating-ip-reserve $ZVSI_FP_NAME --zone $ZVSI_ZONE_NAME --resource-group-id ${{ secrets.ZVSI_RG_ID }} --in $ZVSI_INSTANCE_NAME
|
||||
#Bouding the Floating ip to the ZVSI
|
||||
ibmcloud is floating-ip-update $ZVSI_FP_NAME --nic eth0 --in $ZVSI_INSTANCE_NAME
|
||||
sleep 60
|
||||
#Saving the Floating IP to login ZVSI
|
||||
ZVSI_HOST=$(ibmcloud is floating-ip $ZVSI_FP_NAME | awk '/Address/{print $2}')
|
||||
echo $ZVSI_HOST
|
||||
echo "IP=${ZVSI_HOST}" >> $GITHUB_OUTPUT
|
||||
- name: Status of ZVSI
|
||||
run: |
|
||||
check=$(ibmcloud is ins| awk '/'$ZVSI_INSTANCE_NAME'/{print $3}')
|
||||
while [[ $check != "running" ]]
|
||||
do
|
||||
check=$(ibmcloud is ins | awk '/'$ZVSI_INSTANCE_NAME'/{print $3}')
|
||||
if [[ $check == 'failed' ]]
|
||||
then
|
||||
echo "Failed to run the ZVSI"
|
||||
break
|
||||
fi
|
||||
done
|
||||
- name: Install dependencies and run all tests on s390x ZVSI
|
||||
uses: appleboy/ssh-action@v1.2.2
|
||||
env:
|
||||
GH_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
|
||||
GH_REF: ${{ github.ref }}
|
||||
with:
|
||||
host: ${{ steps.ZVSI.outputs.IP }}
|
||||
username: ${{ secrets.ZVSI_SSH_USER }}
|
||||
key: ${{ secrets.ZVSI_PR_KEY }}
|
||||
envs: GH_REPOSITORY,GH_REF
|
||||
command_timeout: 100m
|
||||
script: |
|
||||
apt-get update -y
|
||||
apt-get install -y wget curl git make gcc jq docker.io
|
||||
wget https://go.dev/dl/go1.24.6.linux-s390x.tar.gz
|
||||
rm -rf /usr/local/go && tar -C /usr/local -xzf go1.24.6.linux-s390x.tar.gz
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
git clone ${GH_REPOSITORY} lifecycle
|
||||
cd lifecycle && git checkout ${GH_REF}
|
||||
go env
|
||||
export PATH=$PATH:~/go/bin
|
||||
make format || true
|
||||
make test
|
||||
- name: Cleanup ZVSI
|
||||
if: ${{ steps.ZVSI.conclusion == 'success' && always() }}
|
||||
run: |
|
||||
#Delete the created ZVSI
|
||||
ibmcloud is instance-delete $ZVSI_INSTANCE_NAME --force
|
||||
sleep 20
|
||||
#Release the created FP
|
||||
ibmcloud is floating-ip-release $ZVSI_FP_NAME --force
|
|
@ -5,13 +5,8 @@
|
|||
*~
|
||||
.tool-versions
|
||||
/out
|
||||
.vscode
|
||||
|
||||
acceptance/testdata/*/**/container/cnb/lifecycle/*
|
||||
acceptance/testdata/*/**/container/docker-config/*
|
||||
|
||||
acceptance/testdata/exporter/container/cnb/run.toml
|
||||
acceptance/testdata/exporter/container/layers/*analyzed.toml
|
||||
acceptance/testdata/exporter/container/other_layers/*analyzed.toml
|
||||
|
||||
acceptance/testdata/restorer/container/layers/*analyzed.toml
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ignore:
|
||||
- vulnerability: CVE-2015-5237 # false positive, see https://github.com/anchore/grype/issues/558
|
||||
- vulnerability: CVE-2021-22570 # false positive, see https://github.com/anchore/grype/issues/558
|
||||
- vulnerability: CVE-2024-41110 # non-impactful as we only use docker as a client
|
||||
- vulnerability: GHSA-v23v-6jw2-98fq # non-impactful as we only use docker as a client
|
||||
- vulnerability: GHSA-f3fp-gc8g-vw66 # can't update github.com/opencontainers/runc until it is updated in github.com/docker/docker
|
||||
- vulnerability: GHSA-v95c-p5hm-xq8f # can't update github.com/opencontainers/runc until it is updated in github.com/docker/docker
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
* Windows:
|
||||
* `choco install cygwin make -y`
|
||||
* `[Environment]::SetEnvironmentVariable("PATH", "C:\tools\cygwin\bin;$ENV:PATH", "MACHINE")`
|
||||
|
||||
|
||||
### Caveats
|
||||
|
||||
* The acceptance tests require the docker daemon to be able to communicate with a local containerized insecure registry. On Docker Desktop 3.3.x, this may result in failures such as: `Expected nil: push response: : Get http://localhost:<port>/v2/: dial tcp [::1]:<port>: connect: connection refused`. To fix these failures, it may be necessary to add the following to the Docker Desktop Engine config:
|
||||
|
@ -36,18 +36,20 @@
|
|||
|
||||
The lifecycle release process involves chaining a series of GitHub actions together such that:
|
||||
* The "build" workflow creates the artifacts
|
||||
* .tgz files containing the lifecycle binaries, shasums for the .tgz files, an SBOM, etc.
|
||||
* .tgz files containing the lifecycle binaries, shasums for the .tgz files, a cosign public key, an SBOM, etc.
|
||||
* OCI images containing the lifecycle binaries, tagged with their commit sha (for more information, see RELEASE.md)
|
||||
* The "draft-release" workflow finds the artifacts and downloads them, creating the draft release
|
||||
* The "post-release" workflow re-tags the OCI images that were created during the "build" workflow with the release version
|
||||
|
||||
It can be rather cumbersome to test changes to these workflows, as they are heavily intertwined. Thus we recommend forking the buildpacks/lifecycle repository in GitHub and running through the entire release process end-to-end.
|
||||
For the fork, it is necessary to add the following secrets:
|
||||
* COSIGN_PASSWORD (see [cosign](https://github.com/sigstore/cosign#generate-a-keypair))
|
||||
* COSIGN_PRIVATE_KEY
|
||||
* DOCKER_PASSWORD (if not using ghcr.io)
|
||||
* DOCKER_USERNAME (if not using ghcr.io)
|
||||
|
||||
The tools/test-fork.sh script can be used to update the source code to reflect the state of the fork.
|
||||
It can be invoked like so: `./tools/test-fork.sh <registry repo name>`
|
||||
The tools/test-fork.sh script can be used to update the source code to reflect the state of the fork.
|
||||
It can be invoked like so: `./tools/test-fork.sh <registry repo name> <path to cosign public key>`
|
||||
|
||||
## Tasks
|
||||
|
||||
|
@ -75,16 +77,6 @@ Formats, vets, and tests the code.
|
|||
$ make test
|
||||
```
|
||||
|
||||
#### Mocks
|
||||
|
||||
We use mock generators like most golang projects to help with our testing. To make new mocks:
|
||||
```bash
|
||||
$ make generate
|
||||
$ make format lint
|
||||
```
|
||||
|
||||
This is because the mock generator will make a larger diff that the formatter will fix.
|
||||
|
||||
### Build
|
||||
|
||||
Builds binaries to `out/linux/lifecycle/` and `out/windows/lifecycle/`.
|
||||
|
|
19
IMAGE.md
19
IMAGE.md
|
@ -7,28 +7,23 @@ This image is maintained by the [Cloud Native Buildpacks project](https://buildp
|
|||
Supported tags are semver-versioned manifest lists - e.g., `0.12.0` or `0.12.0-rc.1`, pointing to one of the following os/architectures:
|
||||
* `linux/amd64`
|
||||
* `linux/arm64`
|
||||
* `windows/amd64`
|
||||
|
||||
# About this image
|
||||
|
||||
Images are built in [GitHub actions](https://github.com/buildpacks/lifecycle/actions) and signed with [`cosign`](https://github.com/sigstore/cosign). To verify:
|
||||
* Locate the public key `lifecycle-v<tag>-cosign.pub` on the [releases page](https://github.com/buildpacks/lifecycle/releases)
|
||||
* Run:
|
||||
```
|
||||
cosign version # must be at least 2.0.0
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/buildpacks/lifecycle/.github/workflows/post-release.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
buildpacksio/lifecycle:<tag>
|
||||
cosign verify -key lifecycle-v<tag>-cosign.pub buildpacksio/lifecycle:<tag>
|
||||
```
|
||||
|
||||
A CycloneDX SBOM is "attached" to the image and signed with [`cosign`](https://github.com/sigstore/cosign). To verify:
|
||||
* Locate the public key `lifecycle-v<tag>-cosign.pub` on the [releases page](https://github.com/buildpacks/lifecycle/releases)
|
||||
* Run:
|
||||
```
|
||||
cosign version # must be at least 2.0.0
|
||||
cosign verify \
|
||||
--certificate-identity-regexp "https://github.com/buildpacks/lifecycle/.github/workflows/post-release.yml" \
|
||||
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||
-a tag=<tag> -attachment sbom \
|
||||
buildpacksio/lifecycle:<tag>
|
||||
cosign version # must be at least 1.2.0
|
||||
cosign verify -key cosign.pub -a tag=<tag> -attachment sbom buildpacksio/lifecycle:<tag>
|
||||
cosign download sbom buildpacksio/lifecycle:<tag>
|
||||
```
|
||||
|
||||
|
@ -41,4 +36,4 @@ With [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks-phas
|
|||
* Provide as param `LIFECYCLE_IMAGE` in taskrun
|
||||
|
||||
***
|
||||
[Source](https://github.com/buildpacks/lifecycle/blob/main/IMAGE.md) for this page
|
||||
[Source](https://github.com/buildpacks/lifecycle/blob/main/IMAGE.md) for this page
|
275
Makefile
275
Makefile
|
@ -30,6 +30,7 @@ LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.Version=$(LIFECYCLE_VERSION)'
|
|||
GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)"
|
||||
GOTEST=$(GOCMD) test $(GOFLAGS)
|
||||
BUILD_DIR?=$(PWD)$/out
|
||||
WINDOWS_COMPILATION_IMAGE?=golang:1.18-windowsservercore-1809
|
||||
SOURCE_COMPILATION_IMAGE?=lifecycle-img
|
||||
BUILD_CTR?=lifecycle-ctr
|
||||
DOCKER_CMD?=make test
|
||||
|
@ -38,9 +39,11 @@ GOFILES := $(shell $(GOCMD) run tools$/lister$/main.go)
|
|||
|
||||
all: test build package
|
||||
|
||||
GOOS_ARCHS = linux/amd64 linux/arm64 linux/ppc64le linux/s390x darwin/amd64 darwin/arm64
|
||||
build: build-linux-amd64 build-linux-arm64 build-windows-amd64
|
||||
|
||||
build: build-linux-amd64 build-linux-arm64 build-linux-ppc64le build-linux-s390x
|
||||
build-linux-amd64: build-linux-amd64-lifecycle build-linux-amd64-symlinks build-linux-amd64-launcher
|
||||
build-linux-arm64: build-linux-arm64-lifecycle build-linux-arm64-symlinks build-linux-arm64-launcher
|
||||
build-windows-amd64: build-windows-amd64-lifecycle build-windows-amd64-symlinks build-windows-amd64-launcher
|
||||
|
||||
build-image-linux-amd64: build-linux-amd64 package-linux-amd64
|
||||
build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz
|
||||
|
@ -52,117 +55,179 @@ build-image-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSIO
|
|||
build-image-linux-arm64:
|
||||
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch arm64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||
|
||||
build-image-linux-ppc64le: build-linux-ppc64le package-linux-ppc64le
|
||||
build-image-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.ppc64le.tgz
|
||||
build-image-linux-ppc64le:
|
||||
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch ppc64le -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||
build-image-windows-amd64: build-windows-amd64 package-windows-amd64
|
||||
build-image-windows-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+windows.x86-64.tgz
|
||||
build-image-windows-amd64:
|
||||
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os windows -arch amd64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||
|
||||
build-image-linux-s390x: build-linux-s390x package-linux-s390x
|
||||
build-image-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.s390x.tgz
|
||||
build-image-linux-s390x:
|
||||
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch s390x -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||
build-linux-amd64-lifecycle: $(BUILD_DIR)/linux-amd64/lifecycle/lifecycle
|
||||
|
||||
define build_targets
|
||||
build-$(1)-$(2): build-$(1)-$(2)-lifecycle build-$(1)-$(2)-symlinks build-$(1)-$(2)-launcher
|
||||
build-linux-arm64-lifecycle: $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle
|
||||
|
||||
build-$(1)-$(2)-lifecycle: $(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: export GOOS:=linux
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: export GOARCH:=amd64
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: $(GOFILES)
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle:
|
||||
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
|
||||
mkdir -p $(OUT_DIR)
|
||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
|
||||
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOOS:=$(1)
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOARCH:=$(2)
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: $$(GOFILES)
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle:
|
||||
@echo "> Building lifecycle/lifecycle for $$(GOOS)/$$(GOARCH)..."
|
||||
mkdir -p $$(OUT_DIR)
|
||||
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/lifecycle -a ./cmd/lifecycle
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: export GOOS:=linux
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: export GOARCH:=arm64
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: $(GOFILES)
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle:
|
||||
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
|
||||
mkdir -p $(OUT_DIR)
|
||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
|
||||
|
||||
build-$(1)-$(2)-symlinks: export GOOS:=$(1)
|
||||
build-$(1)-$(2)-symlinks: export GOARCH:=$(2)
|
||||
build-$(1)-$(2)-symlinks: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
|
||||
build-$(1)-$(2)-symlinks:
|
||||
@echo "> Creating phase symlinks for $$(GOOS)/$$(GOARCH)..."
|
||||
ln -sf lifecycle $$(OUT_DIR)/detector
|
||||
ln -sf lifecycle $$(OUT_DIR)/analyzer
|
||||
ln -sf lifecycle $$(OUT_DIR)/restorer
|
||||
ln -sf lifecycle $$(OUT_DIR)/builder
|
||||
ln -sf lifecycle $$(OUT_DIR)/exporter
|
||||
ln -sf lifecycle $$(OUT_DIR)/rebaser
|
||||
ln -sf lifecycle $$(OUT_DIR)/creator
|
||||
ln -sf lifecycle $$(OUT_DIR)/extender
|
||||
build-linux-amd64-launcher: $(BUILD_DIR)/linux-amd64/lifecycle/launcher
|
||||
|
||||
build-$(1)-$(2)-launcher: $$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: export GOOS:=linux
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: export GOARCH:=amd64
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: $(GOFILES)
|
||||
$(BUILD_DIR)/linux-amd64/lifecycle/launcher:
|
||||
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
|
||||
mkdir -p $(OUT_DIR)
|
||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
|
||||
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
|
||||
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOOS:=$(1)
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOARCH:=$(2)
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: $$(GOFILES)
|
||||
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher:
|
||||
@echo "> Building lifecycle/launcher for $$(GOOS)/$$(GOARCH)..."
|
||||
mkdir -p $$(OUT_DIR)
|
||||
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/launcher -a ./cmd/launcher
|
||||
test $$$$(du -m $$(OUT_DIR)/launcher|cut -f 1) -le 3
|
||||
endef
|
||||
build-linux-arm64-launcher: $(BUILD_DIR)/linux-arm64/lifecycle/launcher
|
||||
|
||||
$(foreach ga,$(GOOS_ARCHS),$(eval $(call build_targets,$(word 1, $(subst /, ,$(ga))),$(word 2, $(subst /, ,$(ga))))))
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: export GOOS:=linux
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: export GOARCH:=arm64
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: $(GOFILES)
|
||||
$(BUILD_DIR)/linux-arm64/lifecycle/launcher:
|
||||
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
|
||||
mkdir -p $(OUT_DIR)
|
||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
|
||||
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
|
||||
|
||||
generate-sbom: run-syft-linux-amd64 run-syft-linux-arm64 run-syft-linux-ppc64le run-syft-linux-s390x
|
||||
build-linux-amd64-symlinks: export GOOS:=linux
|
||||
build-linux-amd64-symlinks: export GOARCH:=amd64
|
||||
build-linux-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
build-linux-amd64-symlinks:
|
||||
@echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..."
|
||||
ln -sf lifecycle $(OUT_DIR)/detector
|
||||
ln -sf lifecycle $(OUT_DIR)/analyzer
|
||||
ln -sf lifecycle $(OUT_DIR)/restorer
|
||||
ln -sf lifecycle $(OUT_DIR)/builder
|
||||
ln -sf lifecycle $(OUT_DIR)/exporter
|
||||
ln -sf lifecycle $(OUT_DIR)/rebaser
|
||||
ln -sf lifecycle $(OUT_DIR)/creator
|
||||
ln -sf lifecycle $(OUT_DIR)/extender
|
||||
|
||||
run-syft-linux-amd64: install-syft
|
||||
run-syft-linux-amd64: export GOOS:=linux
|
||||
run-syft-linux-amd64: export GOARCH:=amd64
|
||||
run-syft-linux-amd64:
|
||||
@echo "> Running syft..."
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||
build-linux-arm64-symlinks: export GOOS:=linux
|
||||
build-linux-arm64-symlinks: export GOARCH:=arm64
|
||||
build-linux-arm64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
build-linux-arm64-symlinks:
|
||||
@echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..."
|
||||
ln -sf lifecycle $(OUT_DIR)/detector
|
||||
ln -sf lifecycle $(OUT_DIR)/analyzer
|
||||
ln -sf lifecycle $(OUT_DIR)/restorer
|
||||
ln -sf lifecycle $(OUT_DIR)/builder
|
||||
ln -sf lifecycle $(OUT_DIR)/exporter
|
||||
ln -sf lifecycle $(OUT_DIR)/rebaser
|
||||
ln -sf lifecycle $(OUT_DIR)/creator
|
||||
ln -sf lifecycle $(OUT_DIR)/extender
|
||||
|
||||
run-syft-linux-arm64: install-syft
|
||||
run-syft-linux-arm64: export GOOS:=linux
|
||||
run-syft-linux-arm64: export GOARCH:=arm64
|
||||
run-syft-linux-arm64:
|
||||
@echo "> Running syft..."
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||
build-windows-amd64-lifecycle: $(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe
|
||||
|
||||
run-syft-linux-ppc64le: install-syft
|
||||
run-syft-linux-ppc64le: export GOOS:=linux
|
||||
run-syft-linux-ppc64le: export GOARCH:=ppc64le
|
||||
run-syft-linux-ppc64le:
|
||||
@echo "> Running syft..."
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: export GOOS:=windows
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: export GOARCH:=amd64
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: $(GOFILES)
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe:
|
||||
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
|
||||
$(GOBUILD) -o $(OUT_DIR)$/lifecycle.exe -a .$/cmd$/lifecycle
|
||||
|
||||
run-syft-linux-s390x: install-syft
|
||||
run-syft-linux-s390x: export GOOS:=linux
|
||||
run-syft-linux-s390x: export GOARCH:=s390x
|
||||
run-syft-linux-s390x:
|
||||
@echo "> Running syft..."
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||
build-windows-amd64-launcher: $(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe
|
||||
|
||||
install-syft:
|
||||
@echo "> Installing syft..."
|
||||
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: export GOOS:=windows
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: export GOARCH:=amd64
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: $(GOFILES)
|
||||
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe:
|
||||
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
|
||||
$(GOBUILD) -o $(OUT_DIR)$/launcher.exe -a .$/cmd$/launcher
|
||||
|
||||
define install-go-tool
|
||||
@echo "> Installing $(1)..."
|
||||
$(GOCMD) install $(1)@$(shell $(GOCMD) list -m -f '{{.Version}}' $(2))
|
||||
endef
|
||||
build-windows-amd64-symlinks: export GOOS:=windows
|
||||
build-windows-amd64-symlinks: export GOARCH:=amd64
|
||||
build-windows-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
|
||||
build-windows-amd64-symlinks:
|
||||
@echo "> Creating phase symlinks for Windows..."
|
||||
ifeq ($(OS),Windows_NT)
|
||||
call del $(OUT_DIR)$/detector.exe
|
||||
call del $(OUT_DIR)$/analyzer.exe
|
||||
call del $(OUT_DIR)$/restorer.exe
|
||||
call del $(OUT_DIR)$/builder.exe
|
||||
call del $(OUT_DIR)$/exporter.exe
|
||||
call del $(OUT_DIR)$/rebaser.exe
|
||||
call del $(OUT_DIR)$/creator.exe
|
||||
call mklink $(OUT_DIR)$/detector.exe lifecycle.exe
|
||||
call mklink $(OUT_DIR)$/analyzer.exe lifecycle.exe
|
||||
call mklink $(OUT_DIR)$/restorer.exe lifecycle.exe
|
||||
call mklink $(OUT_DIR)$/builder.exe lifecycle.exe
|
||||
call mklink $(OUT_DIR)$/exporter.exe lifecycle.exe
|
||||
call mklink $(OUT_DIR)$/rebaser.exe lifecycle.exe
|
||||
call mklink $(OUT_DIR)$/creator.exe lifecycle.exe
|
||||
else
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/detector.exe
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/analyzer.exe
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/restorer.exe
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/builder.exe
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/exporter.exe
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/rebaser.exe
|
||||
ln -sf lifecycle.exe $(OUT_DIR)$/creator.exe
|
||||
endif
|
||||
|
||||
build-darwin-amd64: build-darwin-amd64-lifecycle build-darwin-amd64-launcher
|
||||
|
||||
build-darwin-amd64-lifecycle: $(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: export GOOS:=darwin
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: export GOARCH:=amd64
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: OUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: $(GOFILES)
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle:
|
||||
@echo "> Building lifecycle for darwin/amd64..."
|
||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
|
||||
@echo "> Creating lifecycle symlinks for darwin/amd64..."
|
||||
ln -sf lifecycle $(OUT_DIR)/detector
|
||||
ln -sf lifecycle $(OUT_DIR)/analyzer
|
||||
ln -sf lifecycle $(OUT_DIR)/restorer
|
||||
ln -sf lifecycle $(OUT_DIR)/builder
|
||||
ln -sf lifecycle $(OUT_DIR)/exporter
|
||||
ln -sf lifecycle $(OUT_DIR)/rebaser
|
||||
|
||||
build-darwin-amd64-launcher: $(BUILD_DIR)/darwin-amd64/lifecycle/launcher
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: export GOOS:=darwin
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: export GOARCH:=amd64
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: OUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: $(GOFILES)
|
||||
$(BUILD_DIR)/darwin-amd64/lifecycle/launcher:
|
||||
@echo "> Building launcher for darwin/amd64..."
|
||||
mkdir -p $(OUT_DIR)
|
||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
|
||||
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 4
|
||||
|
||||
install-goimports:
|
||||
@echo "> Installing goimports..."
|
||||
$(call install-go-tool,golang.org/x/tools/cmd/goimports,golang.org/x/tools)
|
||||
$(GOCMD) install golang.org/x/tools/cmd/goimports@v0.1.2
|
||||
|
||||
install-yj:
|
||||
@echo "> Installing yj..."
|
||||
$(call install-go-tool,github.com/sclevine/yj,github.com/sclevine/yj)
|
||||
$(GOCMD) install github.com/sclevine/yj@v0.0.0-20210612025309-737bdf40a5d1
|
||||
|
||||
install-mockgen:
|
||||
@echo "> Installing mockgen..."
|
||||
$(call install-go-tool,github.com/golang/mock/mockgen,github.com/golang/mock)
|
||||
$(GOCMD) install github.com/golang/mock/mockgen@v1.5.0
|
||||
|
||||
install-golangci-lint:
|
||||
@echo "> Installing golangci-lint..."
|
||||
$(call install-go-tool,github.com/golangci/golangci-lint/v2/cmd/golangci-lint,github.com/golangci/golangci-lint/v2)
|
||||
$(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0
|
||||
|
||||
lint: install-golangci-lint
|
||||
@echo "> Linting code..."
|
||||
|
@ -179,7 +244,7 @@ format: install-goimports
|
|||
|
||||
tidy:
|
||||
@echo "> Tidying modules..."
|
||||
$(GOCMD) mod tidy
|
||||
$(GOCMD) mod tidy -compat=1.18
|
||||
|
||||
test: unit acceptance
|
||||
|
||||
|
@ -205,7 +270,7 @@ clean:
|
|||
@echo "> Cleaning workspace..."
|
||||
rm -rf $(BUILD_DIR)
|
||||
|
||||
package: generate-sbom package-linux-amd64 package-linux-arm64 package-linux-ppc64le package-linux-s390x
|
||||
package: package-linux-amd64 package-linux-arm64 package-windows-amd64
|
||||
|
||||
package-linux-amd64: GOOS:=linux
|
||||
package-linux-amd64: GOARCH:=amd64
|
||||
|
@ -225,20 +290,26 @@ package-linux-arm64:
|
|||
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||
|
||||
package-linux-ppc64le: GOOS:=linux
|
||||
package-linux-ppc64le: GOARCH:=ppc64le
|
||||
package-linux-ppc64le: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
package-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).ppc64le.tgz
|
||||
package-linux-ppc64le: PACKAGER=./tools/packager/main.go
|
||||
package-linux-ppc64le:
|
||||
package-windows-amd64: GOOS:=windows
|
||||
package-windows-amd64: GOARCH:=amd64
|
||||
package-windows-amd64: INPUT_DIR:=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
|
||||
package-windows-amd64: ARCHIVE_PATH=$(BUILD_DIR)$/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz
|
||||
package-windows-amd64: PACKAGER=.$/tools$/packager$/main.go
|
||||
package-windows-amd64:
|
||||
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||
|
||||
package-linux-s390x: GOOS:=linux
|
||||
package-linux-s390x: GOARCH:=s390x
|
||||
package-linux-s390x: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||
package-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).s390x.tgz
|
||||
package-linux-s390x: PACKAGER=./tools/packager/main.go
|
||||
package-linux-s390x:
|
||||
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||
# Ensure workdir is clean and build image from .git
|
||||
docker-build-source-image-windows: $(GOFILES)
|
||||
docker-build-source-image-windows:
|
||||
$(if $(shell git status --short), @echo Uncommitted changes. Refusing to run. && exit 1)
|
||||
docker build .git -f tools/Dockerfile.windows --tag $(SOURCE_COMPILATION_IMAGE) --build-arg image_tag=$(WINDOWS_COMPILATION_IMAGE) --cache-from=$(SOURCE_COMPILATION_IMAGE) --isolation=process --compress
|
||||
|
||||
docker-run-windows: docker-build-source-image-windows
|
||||
docker-run-windows:
|
||||
@echo "> Running '$(DOCKER_CMD)' in docker windows..."
|
||||
@docker volume rm -f lifecycle-out
|
||||
docker run -v lifecycle-out:c:/lifecycle/out -e LIFECYCLE_VERSION -e PLATFORM_API -e BUILDPACK_API -v gopathcache:c:/gopath -v '\\.\pipe\docker_engine:\\.\pipe\docker_engine' --isolation=process --interactive --tty --rm $(SOURCE_COMPILATION_IMAGE) $(DOCKER_CMD)
|
||||
docker run -v lifecycle-out:c:/lifecycle/out --rm $(SOURCE_COMPILATION_IMAGE) tar -cf- out | tar -xf-
|
||||
@docker volume rm -f lifecycle-out
|
||||
|
||||
|
|
28
README.md
28
README.md
|
@ -9,16 +9,18 @@
|
|||
A reference implementation of the [Cloud Native Buildpacks specification](https://github.com/buildpacks/spec).
|
||||
|
||||
## Supported APIs
|
||||
| Lifecycle Version | Platform APIs | Buildpack APIs |
|
||||
|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 0.20.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13], [0.14][p/0.14] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] |
|
||||
| 0.19.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] |
|
||||
| 0.18.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] |
|
||||
| 0.17.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] |
|
||||
| 0.16.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] |
|
||||
| 0.15.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] |
|
||||
| 0.14.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8] |
|
||||
| 0.13.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7] |
|
||||
| Lifecycle Version | Platform APIs | Buildpack APIs |
|
||||
|-------------------|------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------|
|
||||
| 0.15.x* | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] |
|
||||
| 0.14.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8] |
|
||||
| 0.13.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7] |
|
||||
| 0.12.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6] |
|
||||
| 0.11.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6] |
|
||||
| 0.10.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5] |
|
||||
| 0.9.x | [0.3][p/0.3], [0.4][p/0.4] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4] |
|
||||
| 0.8.x | [0.3][p/0.3] | [0.2][b/0.2] |
|
||||
| 0.7.x | [0.2][p/0.2] | [0.2][b/0.2] |
|
||||
| 0.6.x | [0.2][p/0.2] | [0.2][b/0.2] |
|
||||
|
||||
[b/0.2]: https://github.com/buildpacks/spec/blob/buildpack/v0.2/buildpack.md
|
||||
[b/0.3]: https://github.com/buildpacks/spec/tree/buildpack/v0.3/buildpack.md
|
||||
|
@ -28,8 +30,6 @@ A reference implementation of the [Cloud Native Buildpacks specification](https:
|
|||
[b/0.7]: https://github.com/buildpacks/spec/tree/buildpack/v0.7/buildpack.md
|
||||
[b/0.8]: https://github.com/buildpacks/spec/tree/buildpack/v0.8/buildpack.md
|
||||
[b/0.9]: https://github.com/buildpacks/spec/tree/buildpack/v0.9/buildpack.md
|
||||
[b/0.10]: https://github.com/buildpacks/spec/tree/buildpack/v0.10/buildpack.md
|
||||
[b/0.11]: https://github.com/buildpacks/spec/tree/buildpack/v0.11/buildpack.md
|
||||
[p/0.2]: https://github.com/buildpacks/spec/blob/platform/v0.2/platform.md
|
||||
[p/0.3]: https://github.com/buildpacks/spec/blob/platform/v0.3/platform.md
|
||||
[p/0.4]: https://github.com/buildpacks/spec/blob/platform/v0.4/platform.md
|
||||
|
@ -39,10 +39,6 @@ A reference implementation of the [Cloud Native Buildpacks specification](https:
|
|||
[p/0.8]: https://github.com/buildpacks/spec/blob/platform/v0.8/platform.md
|
||||
[p/0.9]: https://github.com/buildpacks/spec/blob/platform/v0.9/platform.md
|
||||
[p/0.10]: https://github.com/buildpacks/spec/blob/platform/v0.10/platform.md
|
||||
[p/0.11]: https://github.com/buildpacks/spec/blob/platform/v0.11/platform.md
|
||||
[p/0.12]: https://github.com/buildpacks/spec/blob/platform/v0.12/platform.md
|
||||
[p/0.13]: https://github.com/buildpacks/spec/blob/platform/v0.13/platform.md
|
||||
[p/0.14]: https://github.com/buildpacks/spec/blob/platform/v0.14/platform.md
|
||||
|
||||
\* denotes unreleased version
|
||||
|
||||
|
|
83
RELEASE.md
83
RELEASE.md
|
@ -1,73 +1,22 @@
|
|||
# Release Finalization
|
||||
## Release Finalization
|
||||
|
||||
## Types of releases
|
||||
|
||||
#### New minor
|
||||
* For newly supported Platform or Buildpack API versions, or breaking changes (e.g., API deprecations).
|
||||
|
||||
#### Pre-release aka release candidate
|
||||
* Ideally we should ship a pre-release (waiting a few days for folks to try it out) before we ship a new minor.
|
||||
* We typically don't ship pre-releases for patches or backports.
|
||||
|
||||
#### New patch
|
||||
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
|
||||
* Review the latest commits on `main` to determine if any are unacceptable for a patch - if there are commits that should be excluded, branch off the latest tag for the current minor and cherry-pick commits over.
|
||||
|
||||
#### Backport
|
||||
* New patch for an old minor. Typically, to help folks out who haven't yet upgraded from [unsupported APIs](https://github.com/buildpacks/rfcs/blob/main/text/0110-deprecate-apis.md).
|
||||
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
|
||||
* Branch off the latest tag for the desired minor.
|
||||
|
||||
## Release Finalization Steps
|
||||
|
||||
### Step 1 - Prepare
|
||||
|
||||
Determine the type of release ([new minor](#new-minor), [pre-release](#pre-release-aka-release-candidate), [new patch](#new-patch), or [backport](#backport)) and prepare the branch accordingly.
|
||||
|
||||
**To prepare the release branch:**
|
||||
1. Check open PRs for any dependabot updates that should be merged.
|
||||
1. Create a release branch in the format `release/0.99.0-rc.1` (for pre-releases) or `release/0.99.0` (for final releases).
|
||||
* New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
|
||||
To cut a pre-release:
|
||||
1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550).
|
||||
* For final releases (not pre-releases), remove the pre-release note (`*`) for the latest apis.
|
||||
1. Create a release branch in the format `release/0.99.0-rc.1`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
|
||||
1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0-rc.1`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
|
||||
1. Edit the release notes as necessary.
|
||||
1. Perform any manual validation of the artifacts.
|
||||
1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:0.99.0` but will NOT update the `latest` tag.
|
||||
|
||||
**For final releases (not pre-releases):**
|
||||
To cut a release:
|
||||
1. Ensure the relevant spec APIs have been released.
|
||||
1. Ensure the `lifecycle/0.99.0` milestone on the [docs repo](https://github.com/buildpacks/docs/blob/main/RELEASE.md#lump-changes) is complete, such that every new feature in the lifecycle is fully explained in the `release/lifecycle/0.99` branch on the docs repo, and [migration guides](https://github.com/buildpacks/docs/tree/main/content/docs/reference/spec/migration) (if relevant) are included.
|
||||
|
||||
### Step 2 - Publish the Release
|
||||
|
||||
1. Manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/<release version>`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
|
||||
1. Create a release branch in the format `release/0.99.0`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
|
||||
1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550) and remove the pre-release note for the latest apis.
|
||||
1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
|
||||
1. Edit the release notes as necessary.
|
||||
1. Perform any manual validation of the artifacts as necessary (usually none).
|
||||
1. Edit the release page and click "Publish release".
|
||||
* This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:<release version>`.
|
||||
* For final releases ONLY, this will also re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:latest`.
|
||||
|
||||
### Step 3 - Follow-up
|
||||
|
||||
**For pre-releases:**
|
||||
* Ask the relevant teams to try out the pre-released artifacts.
|
||||
|
||||
**For final releases:**
|
||||
* Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`.
|
||||
* Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo.
|
||||
|
||||
## Go version updates
|
||||
|
||||
Go version updates should be released as a [new minor](#new-minor) or [new patch](#new-patch) release.
|
||||
|
||||
### New Patch
|
||||
|
||||
If the go patch is in [actions/go-versions](https://github.com/actions/go-versions/pulls?q=is%3Apr+is%3Aclosed) then CI should pull it in automatically without any action needed.
|
||||
We simply need to create the release branch and let the pipeline run.
|
||||
|
||||
### New Minor
|
||||
|
||||
We typically do this when the existing patch version exceeds 6 - e.g., `1.22.6`. This means we have about 6 months to upgrade before the current minor becomes unsupported due to the introduction of the new n+2 minor.
|
||||
|
||||
#### Steps
|
||||
1. Update go.mod
|
||||
1. Search for the old `major.minor`, there are a few files that need to be updated (example PR: https://github.com/buildpacks/lifecycle/pull/1405/files)
|
||||
1. Update the linter to a version that supports the current `major.minor`
|
||||
1. Fix any lint errors as necessary
|
||||
1. Perform any manual validation of the artifacts.
|
||||
1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:0.99.0` and `buildpacksio/lifecycle:latest`.
|
||||
1. Once released
|
||||
- Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`.
|
||||
- Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo.
|
||||
|
|
|
@ -141,7 +141,6 @@ func testVersion(t *testing.T, when spec.G, it spec.S) {
|
|||
w(tc.description, func() {
|
||||
it("only prints the version", func() {
|
||||
cmd := lifecycleCmd(tc.command, tc.args...)
|
||||
cmd.Env = []string{fmt.Sprintf("CNB_PLATFORM_API=%s", api.Platform.Latest().String())}
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
||||
|
|
|
@ -2,22 +2,26 @@ package acceptance
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
"github.com/buildpacks/lifecycle/cmd"
|
||||
"github.com/buildpacks/lifecycle/internal/path"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
"github.com/buildpacks/lifecycle/platform"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
var cacheFixtureDir string
|
||||
|
||||
var (
|
||||
analyzeImage string
|
||||
analyzeRegAuthConfig string
|
||||
|
@ -29,6 +33,8 @@ var (
|
|||
)
|
||||
|
||||
func TestAnalyzer(t *testing.T) {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testImageDockerContext := filepath.Join("testdata", "analyzer")
|
||||
analyzeTest = NewPhaseTest(t, "analyzer", testImageDockerContext)
|
||||
analyzeTest.Start(t)
|
||||
|
@ -36,6 +42,7 @@ func TestAnalyzer(t *testing.T) {
|
|||
|
||||
analyzeImage = analyzeTest.testImageRef
|
||||
analyzerPath = analyzeTest.containerBinaryPath
|
||||
cacheFixtureDir = filepath.Join("testdata", "cache-dir")
|
||||
analyzeRegAuthConfig = analyzeTest.targetRegistry.authConfig
|
||||
analyzeRegNetwork = analyzeTest.targetRegistry.network
|
||||
analyzeDaemonFixtures = analyzeTest.targetDaemon.fixtures
|
||||
|
@ -67,23 +74,6 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
when("CNB_PLATFORM_API not provided", func() {
|
||||
it("errors", func() {
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API= ",
|
||||
analyzeImage,
|
||||
ctrPath(analyzerPath),
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "please set 'CNB_PLATFORM_API'"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("called without an app image", func() {
|
||||
it("errors", func() {
|
||||
cmd := exec.Command(
|
||||
|
@ -100,34 +90,73 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
})
|
||||
|
||||
when("called with skip layers", func() {
|
||||
it("writes analyzed.toml and does not restore previous image SBOM", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "Platform API < 0.9 does not accept a -skip-layers flag")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
when("called with group", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 accepts a -group flag")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||
"-skip-layers",
|
||||
analyzeDaemonFixtures.AppImage,
|
||||
),
|
||||
)
|
||||
assertAnalyzedMetadata(t, filepath.Join(copyDir, "layers", "analyzed.toml"))
|
||||
h.AssertStringDoesNotContain(t, output, "Restoring data for SBOM from previous image")
|
||||
ctrPath(analyzerPath),
|
||||
"-group", "group.toml",
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "flag provided but not defined: -group"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("called with skip layers", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t,
|
||||
api.MustParse(platformAPI).LessThan("0.7") || api.MustParse(platformAPI).AtLeast("0.9"),
|
||||
"Platform API < 0.7 or Platform API > 0.9 accepts a -skip-layers flag")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
analyzeImage,
|
||||
ctrPath(analyzerPath),
|
||||
"-skip-layers",
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "flag provided but not defined: -skip-layers"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("called with cache dir", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 accepts a -cache-dir flag")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
analyzeImage,
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-dir", "/cache",
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "flag provided but not defined: -cache-dir"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("the provided layers directory isn't writeable", func() {
|
||||
it("recursively chowns the directory", func() {
|
||||
analyzeFlags := []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Not relevant on Windows")
|
||||
|
||||
var analyzeFlags []string
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeFlags = append(analyzeFlags, []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
output := h.DockerRun(t,
|
||||
analyzeImage,
|
||||
|
@ -149,11 +178,61 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
})
|
||||
|
||||
when("called with analyzed", func() {
|
||||
it("uses the provided analyzed.toml path", func() {
|
||||
analyzeFlags := []string{
|
||||
"-analyzed", ctrPath("/some-dir/some-analyzed.toml"),
|
||||
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||
when("group path is provided", func() {
|
||||
it("uses the provided group path", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not accept a -group flag")
|
||||
|
||||
h.DockerSeedRunAndCopy(t,
|
||||
containerName,
|
||||
cacheFixtureDir, ctrPath("/cache"),
|
||||
copyDir, ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-dir", ctrPath("/cache"),
|
||||
"-group", ctrPath("/layers/other-group.toml"),
|
||||
"some-image",
|
||||
),
|
||||
)
|
||||
|
||||
h.AssertPathExists(t, filepath.Join(copyDir, "layers", "some-other-buildpack-id"))
|
||||
h.AssertPathDoesNotExist(t, filepath.Join(copyDir, "layers", "some-buildpack-id"))
|
||||
})
|
||||
|
||||
when("group contains unsupported buildpacks", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not accept a -group flag")
|
||||
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
analyzeImage,
|
||||
ctrPath(analyzerPath),
|
||||
"-group", ctrPath("/layers/unsupported-group.toml"),
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
failErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
t.Fatalf("expected an error of type exec.ExitError")
|
||||
}
|
||||
h.AssertEq(t, failErr.ExitCode(), 12) // platform code for buildpack api incompatibility
|
||||
expected := "buildpack API version '0.1' is incompatible with the lifecycle"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("analyzed path is provided", func() {
|
||||
it("uses the provided analyzed path", func() {
|
||||
analyzeFlags := []string{"-analyzed", ctrPath("/some-dir/some-analyzed.toml")}
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeFlags = append(analyzeFlags, "-run-image", analyzeRegFixtures.ReadOnlyRunImage)
|
||||
}
|
||||
|
||||
var execArgs []string
|
||||
|
@ -177,31 +256,12 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
})
|
||||
|
||||
when("called with run", func() {
|
||||
it("uses the provided run.toml path", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept -run")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||
"--network", analyzeRegNetwork,
|
||||
analyzeImage,
|
||||
ctrPath(analyzerPath),
|
||||
"-run", "/cnb/run.toml",
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to find accessible run image"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
it("drops privileges", func() {
|
||||
analyzeArgs := []string{
|
||||
"-analyzed", "/some-dir/some-analyzed.toml",
|
||||
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Not relevant on Windows")
|
||||
|
||||
analyzeArgs := []string{"-analyzed", "/some-dir/some-analyzed.toml"}
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeArgs = append(analyzeArgs, "-run-image", analyzeRegFixtures.ReadOnlyRunImage)
|
||||
}
|
||||
|
||||
output := h.DockerRun(t,
|
||||
|
@ -226,6 +286,8 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
when("run image", func() {
|
||||
when("provided", func() {
|
||||
it("is recorded in analyzed.toml", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 does not accept run image")
|
||||
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
|
@ -246,6 +308,8 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
when("not provided", func() {
|
||||
it("falls back to CNB_RUN_IMAGE", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 does not accept run image")
|
||||
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
|
@ -268,9 +332,9 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
when("daemon case", func() {
|
||||
it("writes analyzed.toml", func() {
|
||||
analyzeFlags := []string{
|
||||
"-daemon",
|
||||
"-run-image", "some-run-image",
|
||||
analyzeFlags := []string{"-daemon"}
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeFlags = append(analyzeFlags, []string{"-run-image", "some-run-image"}...)
|
||||
}
|
||||
|
||||
var execArgs []string
|
||||
|
@ -293,7 +357,9 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
|
||||
when("app image exists", func() {
|
||||
it("does not restore app metadata to the layers directory", func() {
|
||||
it("does not restore app metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 restores app metadata")
|
||||
|
||||
analyzeFlags := []string{"-daemon", "-run-image", "some-run-image"}
|
||||
|
||||
var execArgs []string
|
||||
|
@ -314,12 +380,248 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
assertNoRestoreOfAppMetadata(t, copyDir, output)
|
||||
})
|
||||
|
||||
it("restores app metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not restore app metadata")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
analyzeDaemonFixtures.AppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresAppMetadata(t, copyDir, output)
|
||||
})
|
||||
|
||||
when("skip layers is provided", func() {
|
||||
it("writes analyzed.toml and does not write buildpack layer metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not accept a -skip-layers flag")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
"-skip-layers",
|
||||
analyzeDaemonFixtures.AppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertAnalyzedMetadata(t, filepath.Join(copyDir, "layers", "analyzed.toml"))
|
||||
assertWritesStoreTomlOnly(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("cache is provided", func() {
|
||||
when("cache image case", func() {
|
||||
when("cache image is in a daemon", func() {
|
||||
it("ignores the cache", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
"-cache-image", analyzeDaemonFixtures.CacheImage,
|
||||
"some-image",
|
||||
),
|
||||
)
|
||||
|
||||
h.AssertPathDoesNotExist(t, filepath.Join(copyDir, "layers", "some-buildpack-id", "some-layer.sha"))
|
||||
h.AssertPathDoesNotExist(t, filepath.Join(copyDir, "layers", "some-buildpack-id", "some-layer.toml"))
|
||||
})
|
||||
})
|
||||
|
||||
when("cache image is in a registry", func() {
|
||||
when("auth registry", func() {
|
||||
when("registry creds are provided in CNB_REGISTRY_AUTH", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||
"--network", analyzeRegNetwork,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
"-cache-image", analyzeRegFixtures.SomeCacheImage,
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
|
||||
when("registry creds are provided in the docker config.json", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "DOCKER_CONFIG=/docker-config",
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-image",
|
||||
analyzeRegFixtures.SomeCacheImage,
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("no auth registry", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--network", analyzeRegNetwork,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
"-cache-image",
|
||||
analyzeRegFixtures.ReadOnlyCacheImage,
|
||||
analyzeRegFixtures.ReadOnlyAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("cache directory case", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerSeedRunAndCopy(t,
|
||||
containerName,
|
||||
cacheFixtureDir, ctrPath("/cache"),
|
||||
copyDir, ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-daemon",
|
||||
"-cache-dir", ctrPath("/cache"),
|
||||
"some-image",
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
|
||||
when("the provided cache directory isn't writeable by the CNB user's group", func() {
|
||||
it("recursively chowns the directory", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Not relevant on Windows")
|
||||
|
||||
cacheVolume := h.SeedDockerVolume(t, cacheFixtureDir)
|
||||
defer h.DockerVolumeRemove(t, cacheVolume)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--volume", cacheVolume+":/cache",
|
||||
)...),
|
||||
h.WithBash(
|
||||
fmt.Sprintf("chown -R 9999:9999 /cache; chmod -R 775 /cache; %s -daemon -cache-dir /cache some-image; ls -alR /cache", analyzerPath),
|
||||
),
|
||||
)
|
||||
|
||||
h.AssertMatch(t, output, "2222 3333 .+ \\.")
|
||||
h.AssertMatch(t, output, "2222 3333 .+ committed")
|
||||
h.AssertMatch(t, output, "2222 3333 .+ staging")
|
||||
})
|
||||
})
|
||||
|
||||
when("the provided cache directory is writeable by the CNB user's group", func() {
|
||||
it("doesn't chown the directory", func() {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Not relevant on Windows")
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
|
||||
cacheVolume := h.SeedDockerVolume(t, cacheFixtureDir)
|
||||
defer h.DockerVolumeRemove(t, cacheVolume)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
analyzeImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--volume", cacheVolume+":/cache",
|
||||
)...),
|
||||
h.WithBash(
|
||||
fmt.Sprintf("chown -R 9999:3333 /cache; chmod -R 775 /cache; %s -daemon -cache-dir /cache some-image; ls -alR /cache", analyzerPath),
|
||||
),
|
||||
)
|
||||
|
||||
h.AssertMatch(t, output, "9999 3333 .+ \\.")
|
||||
h.AssertMatch(t, output, "9999 3333 .+ committed")
|
||||
h.AssertMatch(t, output, "2222 3333 .+ staging")
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("registry case", func() {
|
||||
it("writes analyzed.toml", func() {
|
||||
analyzeFlags := []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}
|
||||
var analyzeFlags []string
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeFlags = append(analyzeFlags, []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
var execArgs []string
|
||||
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||
|
@ -341,13 +643,139 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||
})
|
||||
|
||||
when("app image exists", func() {
|
||||
when("auth registry", func() {
|
||||
when("registry creds are provided in CNB_REGISTRY_AUTH", func() {
|
||||
it("restores app metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read app layer metadata")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresAppMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
|
||||
when("registry creds are provided in the docker config.json", func() {
|
||||
it("restores app metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read app layer metadata")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "DOCKER_CONFIG=/docker-config",
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresAppMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
|
||||
when("skip layers is provided", func() {
|
||||
it("writes analyzed.toml and does not write buildpack layer metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not accept a -skip-layers flag")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-skip-layers",
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertAnalyzedMetadata(t, filepath.Join(copyDir, "layers", "analyzed.toml"))
|
||||
assertWritesStoreTomlOnly(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("no auth registry", func() {
|
||||
it("restores app metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read app layer metadata")
|
||||
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
analyzeRegFixtures.ReadOnlyAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresAppMetadata(t, copyDir, output)
|
||||
})
|
||||
|
||||
when("skip layers is provided", func() {
|
||||
it("writes analyzed.toml and does not write buildpack layer metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not accept a -skip-layers flag")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-skip-layers",
|
||||
analyzeRegFixtures.ReadOnlyAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertAnalyzedMetadata(t, filepath.Join(copyDir, "layers", "analyzed.toml"))
|
||||
assertWritesStoreTomlOnly(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("called with previous image", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 does not support -previous-image")
|
||||
})
|
||||
|
||||
when("auth registry", func() {
|
||||
when("the destination image does not exist", func() {
|
||||
it("writes analyzed.toml with previous image identifier", func() {
|
||||
analyzeFlags := []string{
|
||||
"-previous-image", analyzeRegFixtures.ReadWriteAppImage,
|
||||
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||
analyzeFlags := []string{"-previous-image", analyzeRegFixtures.ReadWriteAppImage}
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeFlags = append(analyzeFlags, []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
var execArgs []string
|
||||
|
@ -367,15 +795,15 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
h.WithArgs(execArgs...),
|
||||
)
|
||||
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||
h.AssertStringContains(t, analyzedMD.PreviousImageRef(), analyzeRegFixtures.ReadWriteAppImage)
|
||||
h.AssertStringContains(t, analyzedMD.PreviousImage.Reference, analyzeRegFixtures.ReadWriteAppImage)
|
||||
})
|
||||
})
|
||||
|
||||
when("the destination image exists", func() {
|
||||
it("writes analyzed.toml with previous image identifier", func() {
|
||||
analyzeFlags := []string{
|
||||
"-previous-image", analyzeRegFixtures.ReadWriteAppImage,
|
||||
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||
analyzeFlags := []string{"-previous-image", analyzeRegFixtures.ReadWriteAppImage}
|
||||
if api.MustParse(platformAPI).AtLeast("0.7") {
|
||||
analyzeFlags = append(analyzeFlags, []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
var execArgs []string
|
||||
|
@ -396,15 +824,117 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
)
|
||||
|
||||
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||
h.AssertStringContains(t, analyzedMD.PreviousImageRef(), analyzeRegFixtures.ReadWriteAppImage)
|
||||
h.AssertStringContains(t, analyzedMD.PreviousImage.Reference, analyzeRegFixtures.ReadWriteAppImage)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("cache is provided", func() {
|
||||
when("cache image case", func() {
|
||||
when("auth registry", func() {
|
||||
when("registry creds are provided in CNB_REGISTRY_AUTH", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-image", analyzeRegFixtures.SomeCacheImage,
|
||||
"some-image",
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
|
||||
when("registry creds are provided in the docker config.json", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "DOCKER_CONFIG=/docker-config",
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-image",
|
||||
analyzeRegFixtures.SomeCacheImage,
|
||||
analyzeRegFixtures.SomeAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("no auth registry", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--network", analyzeRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-image", analyzeRegFixtures.ReadOnlyCacheImage,
|
||||
analyzeRegFixtures.ReadOnlyAppImage,
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("cache directory case", func() {
|
||||
it("restores cache metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 does not read from the cache")
|
||||
output := h.DockerSeedRunAndCopy(t,
|
||||
containerName,
|
||||
cacheFixtureDir, ctrPath("/cache"),
|
||||
copyDir, ctrPath("/layers"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs(
|
||||
ctrPath(analyzerPath),
|
||||
"-cache-dir", ctrPath("/cache"),
|
||||
"some-image",
|
||||
),
|
||||
)
|
||||
|
||||
assertLogsAndRestoresCacheMetadata(t, copyDir, output)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("called with tag", func() {
|
||||
when("read/write access to registry", func() {
|
||||
it("passes read/write validation and writes analyzed.toml", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 does not use tag flag")
|
||||
execArgs := []string{
|
||||
ctrPath(analyzerPath),
|
||||
"-tag", analyzeRegFixtures.ReadWriteOtherAppImage,
|
||||
|
@ -424,12 +954,13 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
h.WithArgs(execArgs...),
|
||||
)
|
||||
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||
h.AssertStringContains(t, analyzedMD.PreviousImageRef(), analyzeRegFixtures.ReadWriteAppImage)
|
||||
h.AssertStringContains(t, analyzedMD.PreviousImage.Reference, analyzeRegFixtures.ReadWriteAppImage)
|
||||
})
|
||||
})
|
||||
|
||||
when("no read/write access to registry", func() {
|
||||
it("throws read/write error accessing destination tag", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 does not use tag flag")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
|
@ -444,83 +975,44 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "ensure registry read/write access to " + analyzeRegFixtures.InaccessibleImage
|
||||
expected := "validating registry write access: ensure registry read/write access to " + analyzeRegFixtures.InaccessibleImage
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("layout case", func() {
|
||||
layoutDir := filepath.Join(path.RootDir, "layout-repo")
|
||||
when("experimental mode is enabled", func() {
|
||||
it("writes analyzed.toml", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||
|
||||
analyzeFlags := []string{
|
||||
"-layout",
|
||||
"-layout-dir", layoutDir,
|
||||
"-run-image", "busybox",
|
||||
}
|
||||
var execArgs []string
|
||||
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||
execArgs = append(execArgs, "my-app")
|
||||
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
ctrPath("/layers/analyzed.toml"),
|
||||
analyzeImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_EXPERIMENTAL_MODE=warn",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs(execArgs...),
|
||||
)
|
||||
|
||||
analyzer := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||
h.AssertNotNil(t, analyzer.RunImage)
|
||||
analyzedImagePath := filepath.Join(path.RootDir, "layout-repo", "index.docker.io", "library", "busybox", "latest")
|
||||
reference := fmt.Sprintf("%s@%s", analyzedImagePath, "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab")
|
||||
h.AssertEq(t, analyzer.RunImage.Reference, reference)
|
||||
})
|
||||
})
|
||||
|
||||
when("experimental mode is not enabled", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_LAYOUT_DIR="+layoutDir,
|
||||
analyzeImage,
|
||||
ctrPath(analyzerPath),
|
||||
"-layout",
|
||||
"-run-image", "busybox",
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertAnalyzedMetadata(t *testing.T, path string) *files.Analyzed {
|
||||
func assertAnalyzedMetadata(t *testing.T, path string) *platform.AnalyzedMetadata {
|
||||
contents, err := os.ReadFile(path)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(contents) > 0, true)
|
||||
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(path, cmd.DefaultLogger)
|
||||
var analyzedMD platform.AnalyzedMetadata
|
||||
_, err = toml.Decode(string(contents), &analyzedMD)
|
||||
h.AssertNil(t, err)
|
||||
|
||||
return &analyzedMD
|
||||
}
|
||||
|
||||
func assertLogsAndRestoresAppMetadata(t *testing.T, dir, output string) {
|
||||
layerFilenames := []string{
|
||||
"launch-layer.sha",
|
||||
"launch-layer.toml",
|
||||
"store.toml",
|
||||
}
|
||||
for _, filename := range layerFilenames {
|
||||
h.AssertPathExists(t, filepath.Join(dir, "layers", "some-buildpack-id", filename))
|
||||
}
|
||||
layerNames := []string{
|
||||
"launch-layer",
|
||||
}
|
||||
for _, layerName := range layerNames {
|
||||
h.AssertStringContains(t, output, fmt.Sprintf("Restoring metadata for \"some-buildpack-id:%s\"", layerName))
|
||||
}
|
||||
}
|
||||
|
||||
func assertNoRestoreOfAppMetadata(t *testing.T, dir, output string) {
|
||||
layerFilenames := []string{
|
||||
"launch-build-cache-layer.sha",
|
||||
|
@ -536,6 +1028,28 @@ func assertNoRestoreOfAppMetadata(t *testing.T, dir, output string) {
|
|||
}
|
||||
}
|
||||
|
||||
func assertLogsAndRestoresCacheMetadata(t *testing.T, dir, output string) {
|
||||
h.AssertPathExists(t, filepath.Join(dir, "layers", "some-buildpack-id", "some-layer.sha"))
|
||||
h.AssertPathExists(t, filepath.Join(dir, "layers", "some-buildpack-id", "some-layer.toml"))
|
||||
h.AssertStringContains(t, output, "Restoring metadata for \"some-buildpack-id:some-layer\" from cache")
|
||||
}
|
||||
|
||||
func assertWritesStoreTomlOnly(t *testing.T, dir, output string) {
|
||||
h.AssertPathExists(t, filepath.Join(dir, "layers", "some-buildpack-id", "store.toml"))
|
||||
layerFilenames := []string{
|
||||
"launch-build-cache-layer.sha",
|
||||
"launch-build-cache-layer.toml",
|
||||
"launch-cache-layer.sha",
|
||||
"launch-cache-layer.toml",
|
||||
"launch-layer.sha",
|
||||
"launch-layer.toml",
|
||||
}
|
||||
for _, filename := range layerFilenames {
|
||||
h.AssertPathDoesNotExist(t, filepath.Join(dir, "layers", "some-buildpack-id", filename))
|
||||
}
|
||||
h.AssertStringContains(t, output, "Skipping buildpack layer analysis")
|
||||
}
|
||||
|
||||
func flatPrint(arr []string) string {
|
||||
return strings.Join(arr, " ")
|
||||
}
|
||||
|
|
|
@ -3,16 +3,20 @@ package acceptance
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
"github.com/buildpacks/lifecycle/platform"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
|
@ -24,6 +28,11 @@ var (
|
|||
)
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Builder acceptance tests are not yet supported on Windows")
|
||||
h.SkipIf(t, runtime.GOARCH != "amd64", "Builder acceptance tests are not yet supported on non-amd64")
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
info, err := h.DockerCli(t).Info(context.TODO())
|
||||
h.AssertNil(t, err)
|
||||
|
||||
|
@ -34,8 +43,6 @@ func TestBuilder(t *testing.T) {
|
|||
builderDaemonArch = info.Architecture
|
||||
if builderDaemonArch == "x86_64" {
|
||||
builderDaemonArch = "amd64"
|
||||
} else if builderDaemonArch == "aarch64" {
|
||||
builderDaemonArch = "arm64"
|
||||
}
|
||||
|
||||
h.MakeAndCopyLifecycle(t, builderDaemonOS, builderDaemonArch, builderBinaryDir)
|
||||
|
@ -125,7 +132,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
// check builder metadata.toml for success test
|
||||
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||
})
|
||||
|
@ -149,7 +156,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
|
||||
// prevent regression of inline table serialization
|
||||
h.AssertStringDoesNotContain(t, contents, "processes =")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||
h.AssertEq(t, len(md.Processes), 1)
|
||||
|
@ -158,7 +165,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
h.AssertEq(t, md.Processes[0].Command.Entries[0], "echo world")
|
||||
h.AssertEq(t, len(md.Processes[0].Args), 1)
|
||||
h.AssertEq(t, md.Processes[0].Args[0], "arg1")
|
||||
h.AssertEq(t, md.Processes[0].Direct, true)
|
||||
h.AssertEq(t, md.Processes[0].Direct, false)
|
||||
h.AssertEq(t, md.Processes[0].WorkingDirectory, "")
|
||||
h.AssertEq(t, md.Processes[0].Default, false)
|
||||
})
|
||||
|
@ -181,7 +188,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
|
||||
// prevent regression of inline table serialization
|
||||
h.AssertStringDoesNotContain(t, contents, "processes =")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||
h.AssertEq(t, len(md.Processes), 1)
|
||||
|
@ -190,7 +197,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
h.AssertEq(t, md.Processes[0].Command.Entries[0], "echo world")
|
||||
h.AssertEq(t, len(md.Processes[0].Args), 1)
|
||||
h.AssertEq(t, md.Processes[0].Args[0], "arg1")
|
||||
h.AssertEq(t, md.Processes[0].Direct, true)
|
||||
h.AssertEq(t, md.Processes[0].Direct, false)
|
||||
h.AssertEq(t, md.Processes[0].WorkingDirectory, "")
|
||||
h.AssertEq(t, md.Processes[0].Default, false)
|
||||
})
|
||||
|
@ -213,10 +220,11 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
// check builder metadata.toml for success test
|
||||
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||
h.AssertStringContains(t, md.Extensions[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Extensions[0].API, "0.9")
|
||||
h.AssertEq(t, md.Extensions[0].Extension, false) // this shows that `extension = true` is not redundantly printed in group.toml
|
||||
h.AssertStringContains(t, md.Extensions[0].ID, "hello_world")
|
||||
h.AssertStringContains(t, md.Extensions[0].Version, "0.0.1")
|
||||
})
|
||||
|
@ -237,7 +245,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to read group file: open /layers/group.toml: no such file or directory"
|
||||
expected := "failed to read buildpack group: open /layers/group.toml: no such file or directory"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
@ -274,7 +282,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to read group file: toml: line 1: expected '.' or '=', but got 'a' instead"
|
||||
expected := "failed to read buildpack group: toml: line 1: expected '.' or '=', but got 'a' instead"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
@ -313,7 +321,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to read plan file: open /layers/plan.toml: no such file or directory"
|
||||
expected := "failed to parse detect plan: open /layers/plan.toml: no such file or directory"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
@ -334,7 +342,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
// check builder metadata.toml for success test
|
||||
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||
})
|
||||
|
@ -353,7 +361,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to read plan file: toml: line 1: expected '.' or '=', but got 'a' instead"
|
||||
expected := "failed to parse detect plan: toml: line 1: expected '.' or '=', but got 'a' instead"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
@ -377,7 +385,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
)
|
||||
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers/different_layer_dir_from_env/config/metadata.toml"))
|
||||
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world_2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.2")
|
||||
})
|
||||
|
@ -399,7 +407,7 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
)
|
||||
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers/different_layer_dir_from_env/config/metadata.toml"))
|
||||
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].API, "0.2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world_2")
|
||||
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.2")
|
||||
})
|
||||
|
@ -499,37 +507,16 @@ func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
|||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("It runs", func() {
|
||||
it("sets CNB_TARGET_* vars", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
"--env", "CNB_LAYERS_DIR=/layers/03_layer",
|
||||
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan_buildpack_3.toml",
|
||||
builderImage,
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
fmt.Println(string(output))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertStringContains(t, string(output), "CNB_TARGET_ARCH: amd64")
|
||||
h.AssertStringContains(t, string(output), "CNB_TARGET_ARCH_VARIANT: some-variant")
|
||||
h.AssertStringContains(t, string(output), "CNB_TARGET_OS: linux")
|
||||
h.AssertStringContains(t, string(output), "CNB_TARGET_DISTRO_NAME: ubuntu")
|
||||
h.AssertStringContains(t, string(output), "CNB_TARGET_DISTRO_VERSION: some-cute-version")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func getBuilderMetadata(t *testing.T, path string) (string, *files.BuildMetadata) {
|
||||
func getBuilderMetadata(t *testing.T, path string) (string, *platform.BuildMetadata) {
|
||||
t.Helper()
|
||||
contents, _ := os.ReadFile(path)
|
||||
h.AssertEq(t, len(contents) > 0, true)
|
||||
|
||||
buildMD, err := files.Handler.ReadBuildMetadata(path, api.MustParse(latestPlatformAPI))
|
||||
var buildMD platform.BuildMetadata
|
||||
_, err := toml.Decode(string(contents), &buildMD)
|
||||
h.AssertNil(t, err)
|
||||
|
||||
return string(contents), buildMD
|
||||
return string(contents), &buildMD
|
||||
}
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
//go:build acceptance
|
||||
// +build acceptance
|
||||
|
||||
package acceptance
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/buildpacks/lifecycle/internal/path"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
|
@ -29,6 +30,10 @@ var (
|
|||
)
|
||||
|
||||
func TestCreator(t *testing.T) {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Creator acceptance tests are not yet supported on Windows")
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testImageDockerContext := filepath.Join("testdata", "creator")
|
||||
createTest = NewPhaseTest(t, "creator", testImageDockerContext)
|
||||
createTest.Start(t)
|
||||
|
@ -36,6 +41,7 @@ func TestCreator(t *testing.T) {
|
|||
|
||||
createImage = createTest.testImageRef
|
||||
creatorPath = createTest.containerBinaryPath
|
||||
cacheFixtureDir = filepath.Join("testdata", "creator", "cache-dir")
|
||||
createRegAuthConfig = createTest.targetRegistry.authConfig
|
||||
createRegNetwork = createTest.targetRegistry.network
|
||||
createDaemonFixtures = createTest.targetDaemon.fixtures
|
||||
|
@ -50,50 +56,6 @@ func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec
|
|||
return func(t *testing.T, when spec.G, it spec.S) {
|
||||
var createdImageName string
|
||||
|
||||
when("called with run", func() {
|
||||
it("uses the provided run.toml path", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept -run")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||
"--network", createRegNetwork,
|
||||
createImage,
|
||||
ctrPath(creatorPath),
|
||||
"-run", "/cnb/run.toml",
|
||||
createRegFixtures.SomeAppImage,
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to resolve inputs: failed to find accessible run image"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("detected order contains extensions", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||
"--network", createRegNetwork,
|
||||
createImage,
|
||||
ctrPath(creatorPath),
|
||||
"-log-level", "debug",
|
||||
"-order", "/cnb/order-with-extensions.toml",
|
||||
"-run-image", createRegFixtures.ReadOnlyRunImage,
|
||||
createRegFixtures.SomeAppImage,
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "detected order contains extensions which is not supported by the creator"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("daemon case", func() {
|
||||
it.After(func() {
|
||||
h.DockerImageRemove(t, createdImageName)
|
||||
|
@ -221,9 +183,8 @@ func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec
|
|||
|
||||
when("multiple builds", func() {
|
||||
var (
|
||||
createFlags []string
|
||||
createArgs []string
|
||||
duration1, duration2 time.Duration
|
||||
createFlags []string
|
||||
createArgs []string
|
||||
)
|
||||
|
||||
it.Before(func() {
|
||||
|
@ -239,7 +200,6 @@ func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec
|
|||
createArgs = append([]string{ctrPath(creatorPath)}, createFlags...)
|
||||
createArgs = append(createArgs, imageName)
|
||||
|
||||
startTime := time.Now()
|
||||
// first build
|
||||
output := h.DockerRunAndCopy(t,
|
||||
container1,
|
||||
|
@ -256,8 +216,6 @@ func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec
|
|||
)...),
|
||||
h.WithArgs(createArgs...),
|
||||
)
|
||||
duration1 = time.Now().Sub(startTime)
|
||||
t.Logf("First build duration: %s", duration1)
|
||||
h.AssertStringDoesNotContain(t, output, "restored with content")
|
||||
h.AssertPathExists(t, filepath.Join(dirBuild1, "layers", "sbom", "build", "samples_hello-world", "sbom.cdx.json"))
|
||||
h.AssertPathExists(t, filepath.Join(dirBuild1, "layers", "sbom", "build", "samples_hello-world", "some-build-layer", "sbom.cdx.json"))
|
||||
|
@ -300,16 +258,15 @@ func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec
|
|||
h.WithArgs(createArgs...),
|
||||
)
|
||||
// check that launch cache was used
|
||||
duration2 = time.Now().Sub(startTime)
|
||||
t.Logf("Second build duration: %s", duration2)
|
||||
if duration2+time.Duration(0.1*float64(time.Second)) >= duration1 {
|
||||
t.Logf("Second build output: %s", output)
|
||||
t.Fatalf("Expected second build to complete 0.1s faster than first build; first build took %s, second build took %s", duration1, duration2)
|
||||
duration := time.Now().Sub(startTime)
|
||||
t.Logf("Build duration: %s", duration)
|
||||
if duration > 3*time.Second {
|
||||
t.Fatalf("Expected second build to complete in less than 3 seconds; took %s", duration)
|
||||
}
|
||||
h.AssertStringContains(t, output, "some-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-bom-content\"}")
|
||||
h.AssertStringContains(t, output, "some-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-cache-true-bom-content\"}")
|
||||
h.AssertStringContains(t, output, "some-launch-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-cache-true-bom-content\"}")
|
||||
h.AssertStringContains(t, output, "Reusing layer 'buildpacksio/lifecycle:launch.sbom'")
|
||||
h.AssertStringContains(t, output, "Reusing layer 'launch.sbom'")
|
||||
h.AssertPathExists(t, filepath.Join(dirBuild2, "layers", "sbom", "build", "samples_hello-world", "sbom.cdx.json"))
|
||||
h.AssertPathExists(t, filepath.Join(dirBuild2, "layers", "sbom", "build", "samples_hello-world", "some-build-layer", "sbom.cdx.json"))
|
||||
t.Log("restores store.toml")
|
||||
|
@ -368,78 +325,5 @@ func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("layout case", func() {
|
||||
var (
|
||||
containerName string
|
||||
err error
|
||||
layoutDir string
|
||||
tmpDir string
|
||||
)
|
||||
when("experimental mode is enabled", func() {
|
||||
it.Before(func() {
|
||||
// creates the directory to save all the OCI images on disk
|
||||
tmpDir, err = os.MkdirTemp("", "layout")
|
||||
h.AssertNil(t, err)
|
||||
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
layoutDir = filepath.Join(path.RootDir, "layout-repo")
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
h.DockerImageRemove(t, createdImageName)
|
||||
|
||||
// removes all images created
|
||||
os.RemoveAll(tmpDir)
|
||||
|
||||
})
|
||||
|
||||
it("creates app", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||
var createFlags []string
|
||||
createFlags = append(createFlags, []string{"-layout", "-layout-dir", layoutDir, "-run-image", "busybox"}...)
|
||||
|
||||
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
|
||||
createdImageName = "some-created-image-" + h.RandString(10)
|
||||
createArgs = append(createArgs, createdImageName)
|
||||
|
||||
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, createImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_EXPERIMENTAL_MODE=warn",
|
||||
),
|
||||
h.WithArgs(createArgs...))
|
||||
|
||||
h.AssertStringContains(t, output, "Saving /layout-repo/index.docker.io/library/"+createdImageName+"/latest")
|
||||
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", createdImageName+"/latest"))
|
||||
h.AssertEq(t, len(index.Manifests), 1)
|
||||
})
|
||||
})
|
||||
|
||||
when("experimental mode is not enabled", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
createImage,
|
||||
ctrPath(creatorPath),
|
||||
"-layout",
|
||||
"-layout-dir", layoutDir,
|
||||
"-run-image", "busybox",
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,46 +1,42 @@
|
|||
//go:build acceptance
|
||||
// +build acceptance
|
||||
|
||||
package acceptance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
"github.com/buildpacks/lifecycle/cmd"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
"github.com/buildpacks/lifecycle/buildpack"
|
||||
"github.com/buildpacks/lifecycle/platform"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
var (
|
||||
detectDockerContext = filepath.Join("testdata", "detector")
|
||||
detectorBinaryDir = filepath.Join("testdata", "detector", "container", "cnb", "lifecycle")
|
||||
detectImage = "lifecycle/acceptance/detector"
|
||||
userID = "1234"
|
||||
detectorDaemonOS, detectorDaemonArch string
|
||||
detectDockerContext = filepath.Join("testdata", "detector")
|
||||
detectorBinaryDir = filepath.Join("testdata", "detector", "container", "cnb", "lifecycle")
|
||||
detectImage = "lifecycle/acceptance/detector"
|
||||
userID = "1234"
|
||||
)
|
||||
|
||||
func TestDetector(t *testing.T) {
|
||||
info, err := h.DockerCli(t).Info(context.TODO())
|
||||
h.AssertNil(t, err)
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Detector acceptance tests are not yet supported on Windows")
|
||||
h.SkipIf(t, runtime.GOARCH != "amd64", "Detector acceptance tests are not yet supported on non-amd64")
|
||||
|
||||
detectorDaemonOS = info.OSType
|
||||
detectorDaemonArch = info.Architecture
|
||||
if detectorDaemonArch == "x86_64" {
|
||||
detectorDaemonArch = "amd64"
|
||||
}
|
||||
if detectorDaemonArch == "aarch64" {
|
||||
detectorDaemonArch = "arm64"
|
||||
}
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
h.MakeAndCopyLifecycle(t, detectorDaemonOS, detectorDaemonArch, detectorBinaryDir)
|
||||
h.MakeAndCopyLifecycle(t, "linux", "amd64", detectorBinaryDir)
|
||||
h.DockerBuild(t,
|
||||
detectImage,
|
||||
detectDockerContext,
|
||||
|
@ -48,77 +44,362 @@ func TestDetector(t *testing.T) {
|
|||
)
|
||||
defer h.DockerImageRemove(t, detectImage)
|
||||
|
||||
for _, platformAPI := range api.Platform.Supported {
|
||||
if platformAPI.LessThan("0.12") {
|
||||
continue
|
||||
}
|
||||
|
||||
spec.Run(t, "acceptance-detector/"+platformAPI.String(), testDetectorFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
spec.Run(t, "acceptance-detector", testDetector, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
func testDetectorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||
return func(t *testing.T, when spec.G, it spec.S) {
|
||||
when("called with arguments", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
func testDetector(t *testing.T, when spec.G, it spec.S) {
|
||||
when("called with arguments", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
detectImage,
|
||||
"some-arg",
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to parse arguments: received unexpected arguments"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("running as a root", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--user",
|
||||
"root",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
detectImage,
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to detect: refusing to run as root"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("read buildpack order file failed", func() {
|
||||
it("errors", func() {
|
||||
// no order.toml file in the default search locations
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
detectImage,
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to initialize detector: reading order"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("no buildpack group passed detection", func() {
|
||||
it("errors and exits with the expected code", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/empty_order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
detectImage,
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
failErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
t.Fatalf("expected an error of type exec.ExitError")
|
||||
}
|
||||
h.AssertEq(t, failErr.ExitCode(), 20) // platform code for cmd.FailedDetect
|
||||
expected := "No buildpack groups passed detection."
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a buildpack group that passes detection", func() {
|
||||
var copyDir, containerName string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
it("writes group.toml and plan.toml at the default locations", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags("--user", userID,
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/simple_order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
),
|
||||
h.WithArgs(),
|
||||
)
|
||||
|
||||
// check group.toml
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||
var buildpackGroup buildpack.Group
|
||||
_, err := toml.DecodeFile(foundGroupTOML, &buildpackGroup)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
|
||||
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
|
||||
|
||||
// check plan.toml
|
||||
tempPlanToml := filepath.Join(copyDir, "layers", "plan.toml")
|
||||
var buildPlan platform.BuildPlan
|
||||
_, err = toml.DecodeFile(tempPlanToml, &buildPlan)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, buildPlan.Entries[0].Providers[0].ID, "simple_buildpack")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Providers[0].Version, "simple_buildpack_version")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Name, "some_requirement")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["some_metadata_key"], "some_metadata_val")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["version"], "some_version")
|
||||
})
|
||||
})
|
||||
|
||||
when("environment variables are provided for buildpack and app directories and for the output files", func() {
|
||||
var copyDir, containerName string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
it("writes group.toml and plan.toml in the right locations and with the right names", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags("--user", userID,
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/always_detect_order.toml",
|
||||
"--env", "CNB_BUILDPACKS_DIR=/cnb/custom_buildpacks",
|
||||
"--env", "CNB_APP_DIR=/custom_workspace",
|
||||
"--env", "CNB_GROUP_PATH=./custom_group.toml",
|
||||
"--env", "CNB_PLAN_PATH=./custom_plan.toml",
|
||||
"--env", "CNB_PLATFORM_DIR=/custom_platform",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
),
|
||||
h.WithArgs("-log-level=debug"),
|
||||
)
|
||||
|
||||
// check group.toml
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "custom_group.toml")
|
||||
var buildpackGroup buildpack.Group
|
||||
_, err := toml.DecodeFile(foundGroupTOML, &buildpackGroup)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, buildpackGroup.Group[0].ID, "always_detect_buildpack")
|
||||
h.AssertEq(t, buildpackGroup.Group[0].Version, "always_detect_buildpack_version")
|
||||
|
||||
// check plan.toml - should be empty since we're using always_detect_order.toml so there is no "actual plan"
|
||||
tempPlanToml := filepath.Join(copyDir, "layers", "custom_plan.toml")
|
||||
planContents, err := os.ReadFile(tempPlanToml)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(planContents) == 0, true)
|
||||
|
||||
// check platform directory
|
||||
logs := h.Run(t, exec.Command("docker", "logs", containerName))
|
||||
expectedPlatformPath := "platform_path: /custom_platform"
|
||||
expectedAppDir := "app_dir: /custom_workspace"
|
||||
h.AssertStringContains(t, logs, expectedPlatformPath)
|
||||
h.AssertStringContains(t, logs, expectedAppDir)
|
||||
})
|
||||
})
|
||||
|
||||
when("-order is provided", func() {
|
||||
var copyDir, containerName, expectedOrderTOMLPath string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
|
||||
simpleOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "simple_order.toml")
|
||||
expectedOrderTOMLPath, err = filepath.Abs(simpleOrderTOML)
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
when("the order.toml exists", func() {
|
||||
it("processes the provided order.toml", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
"some-arg",
|
||||
h.WithFlags("--user", userID,
|
||||
"--volume", expectedOrderTOMLPath+":/custom/order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
),
|
||||
h.WithArgs(
|
||||
"-log-level=debug",
|
||||
"-order=/custom/order.toml",
|
||||
),
|
||||
)
|
||||
|
||||
// check group.toml
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||
var buildpackGroup buildpack.Group
|
||||
_, err := toml.DecodeFile(foundGroupTOML, &buildpackGroup)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, buildpackGroup.Group[0].ID, "simple_buildpack")
|
||||
h.AssertEq(t, buildpackGroup.Group[0].Version, "simple_buildpack_version")
|
||||
})
|
||||
})
|
||||
|
||||
when("the order.toml does not exist", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command("docker", "run",
|
||||
"--user", userID,
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
detectImage,
|
||||
"-order=/custom/order.toml")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to parse arguments: received unexpected arguments"
|
||||
expected := "failed to initialize detector: reading order: failed to read order file: open /custom/order.toml: no such file or directory"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("running as a root", func() {
|
||||
when("the order.toml contains a buildpack using an unsupported api", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
command := exec.Command("docker", "run",
|
||||
"--user", userID,
|
||||
"--rm",
|
||||
"--user",
|
||||
"root",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
detectImage,
|
||||
)
|
||||
"-order=/cnb/orders/bad_api.toml")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to detect: refusing to run as root"
|
||||
failErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
t.Fatalf("expected an error of type exec.ExitError")
|
||||
}
|
||||
h.AssertEq(t, failErr.ExitCode(), 12) // platform code for buildpack api error
|
||||
expected := "buildpack API version '0.1' is incompatible with the lifecycle"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("read buildpack order file failed", func() {
|
||||
it("errors", func() {
|
||||
// no order.toml file in the default search locations
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
detectImage,
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to initialize detector: reading order"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
when("-order contains extensions", func() {
|
||||
var containerName, copyDir, orderPath string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
orderPath, err = filepath.Abs(filepath.Join("testdata", "detector", "container", "cnb", "orders", "order_with_ext.toml"))
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
it("processes the provided order.toml", func() {
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags(
|
||||
"--user", userID,
|
||||
"--volume", orderPath+":/layers/order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
"--env", "CNB_EXPERIMENTAL_MODE=warn", // required as the default is `error` if unset
|
||||
),
|
||||
h.WithArgs(
|
||||
"-analyzed=/layers/analyzed.toml",
|
||||
"-extensions=/cnb/extensions",
|
||||
"-generated=/layers/generated",
|
||||
"-log-level=debug",
|
||||
),
|
||||
)
|
||||
|
||||
t.Log("runs /bin/detect for buildpacks and extensions")
|
||||
h.AssertStringContains(t, output, "Platform requested experimental feature 'Dockerfiles'")
|
||||
h.AssertStringContains(t, output, "simple_extension: output from /bin/detect")
|
||||
t.Log("writes group.toml")
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||
var buildpackGroup buildpack.Group
|
||||
_, err := toml.DecodeFile(foundGroupTOML, &buildpackGroup)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, buildpackGroup.GroupExtensions[0].ID, "simple_extension")
|
||||
h.AssertEq(t, buildpackGroup.GroupExtensions[0].Version, "simple_extension_version")
|
||||
h.AssertEq(t, buildpackGroup.GroupExtensions[0].Extension, false) // this shows that `extension = true` is not redundantly printed in group.toml
|
||||
h.AssertEq(t, buildpackGroup.Group[0].ID, "buildpack_for_ext")
|
||||
h.AssertEq(t, buildpackGroup.Group[0].Version, "buildpack_for_ext_version")
|
||||
h.AssertEq(t, buildpackGroup.Group[0].Extension, false)
|
||||
t.Log("writes plan.toml")
|
||||
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
|
||||
var plan platform.BuildPlan
|
||||
_, err = toml.DecodeFile(foundPlanTOML, &plan)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(plan.Entries), 0) // this shows that the plan was filtered to remove `requires` provided by extensions
|
||||
|
||||
t.Log("runs /bin/generate for extensions")
|
||||
h.AssertStringContains(t, output, "simple_extension: output from /bin/generate")
|
||||
t.Log("copies the generated dockerfiles to the output directory")
|
||||
dockerfilePath := filepath.Join(copyDir, "layers", "generated", "run", "simple_extension", "Dockerfile")
|
||||
h.AssertPathExists(t, dockerfilePath)
|
||||
contents, err := os.ReadFile(dockerfilePath)
|
||||
h.AssertEq(t, string(contents), "FROM some-run-image-from-extension\n")
|
||||
t.Log("records the new run image in analyzed.toml")
|
||||
foundAnalyzedTOML := filepath.Join(copyDir, "layers", "analyzed.toml")
|
||||
var analyzed platform.AnalyzedMetadata
|
||||
_, err = toml.DecodeFile(foundAnalyzedTOML, &analyzed)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, analyzed.RunImage.Reference, "some-run-image-from-extension")
|
||||
})
|
||||
})
|
||||
|
||||
when("platform api < 0.6", func() {
|
||||
when("no buildpack group passed detection", func() {
|
||||
it("errors and exits with the expected code", func() {
|
||||
command := exec.Command(
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/fail_detect_order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/empty_order.toml",
|
||||
"--env", "CNB_PLATFORM_API=0.5",
|
||||
detectImage,
|
||||
)
|
||||
output, err := command.CombinedOutput()
|
||||
|
@ -127,296 +408,10 @@ func testDetectorFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
if !ok {
|
||||
t.Fatalf("expected an error of type exec.ExitError")
|
||||
}
|
||||
h.AssertEq(t, failErr.ExitCode(), 20) // platform code for failed detect
|
||||
|
||||
expected1 := `======== Output: fail_detect_buildpack@some_version ========
|
||||
Opted out of detection
|
||||
======== Results ========
|
||||
fail: fail_detect_buildpack@some_version`
|
||||
h.AssertStringContains(t, string(output), expected1)
|
||||
expected2 := "No buildpack groups passed detection."
|
||||
h.AssertStringContains(t, string(output), expected2)
|
||||
h.AssertEq(t, failErr.ExitCode(), 100) // platform code for cmd.FailedDetect
|
||||
expected := "No buildpack groups passed detection."
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("there is a buildpack group that passes detection", func() {
|
||||
var copyDir, containerName string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
it("writes group.toml and plan.toml at the default locations", func() {
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags("--user", userID,
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/simple_order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs(),
|
||||
)
|
||||
|
||||
// check group.toml
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, group.Group[0].ID, "simple_buildpack")
|
||||
h.AssertEq(t, group.Group[0].Version, "simple_buildpack_version")
|
||||
|
||||
// check plan.toml
|
||||
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
|
||||
buildPlan, err := files.Handler.ReadPlan(foundPlanTOML)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, buildPlan.Entries[0].Providers[0].ID, "simple_buildpack")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Providers[0].Version, "simple_buildpack_version")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Name, "some_requirement")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["some_metadata_key"], "some_metadata_val")
|
||||
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["version"], "some_version")
|
||||
|
||||
// check output
|
||||
h.AssertStringContains(t, output, "simple_buildpack simple_buildpack_version")
|
||||
h.AssertStringDoesNotContain(t, output, "======== Results ========") // log output is info level as detect passed
|
||||
})
|
||||
})
|
||||
|
||||
when("environment variables are provided for buildpack and app directories and for the output files", func() {
|
||||
var copyDir, containerName string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
it("writes group.toml and plan.toml in the right locations and with the right names", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags("--user", userID,
|
||||
"--env", "CNB_ORDER_PATH=/cnb/orders/always_detect_order.toml",
|
||||
"--env", "CNB_BUILDPACKS_DIR=/cnb/custom_buildpacks",
|
||||
"--env", "CNB_APP_DIR=/custom_workspace",
|
||||
"--env", "CNB_GROUP_PATH=./custom_group.toml",
|
||||
"--env", "CNB_PLAN_PATH=./custom_plan.toml",
|
||||
"--env", "CNB_PLATFORM_DIR=/custom_platform",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs("-log-level=debug"),
|
||||
)
|
||||
|
||||
// check group.toml
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "custom_group.toml")
|
||||
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, group.Group[0].ID, "always_detect_buildpack")
|
||||
h.AssertEq(t, group.Group[0].Version, "always_detect_buildpack_version")
|
||||
|
||||
// check plan.toml - should be empty since we're using always_detect_order.toml so there is no "actual plan"
|
||||
tempPlanToml := filepath.Join(copyDir, "layers", "custom_plan.toml")
|
||||
planContents, err := os.ReadFile(tempPlanToml)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(planContents) == 0, true)
|
||||
|
||||
// check platform directory
|
||||
logs := h.Run(t, exec.Command("docker", "logs", containerName))
|
||||
expectedPlatformPath := "platform_path: /custom_platform"
|
||||
expectedAppDir := "app_dir: /custom_workspace"
|
||||
h.AssertStringContains(t, logs, expectedPlatformPath)
|
||||
h.AssertStringContains(t, logs, expectedAppDir)
|
||||
})
|
||||
})
|
||||
|
||||
when("-order is provided", func() {
|
||||
var copyDir, containerName, expectedOrderTOMLPath string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
|
||||
simpleOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "simple_order.toml")
|
||||
expectedOrderTOMLPath, err = filepath.Abs(simpleOrderTOML)
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
when("the order.toml exists", func() {
|
||||
it("processes the provided order.toml", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags("--user", userID,
|
||||
"--volume", expectedOrderTOMLPath+":/custom/order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs(
|
||||
"-log-level=debug",
|
||||
"-order=/custom/order.toml",
|
||||
),
|
||||
)
|
||||
|
||||
// check group.toml
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, group.Group[0].ID, "simple_buildpack")
|
||||
h.AssertEq(t, group.Group[0].Version, "simple_buildpack_version")
|
||||
})
|
||||
})
|
||||
|
||||
when("the order.toml does not exist", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command("docker", "run",
|
||||
"--user", userID,
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
detectImage,
|
||||
"-order=/custom/order.toml")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to initialize detector: reading order: failed to read order file: open /custom/order.toml: no such file or directory"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("the order.toml contains a buildpack using an unsupported api", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command("docker", "run",
|
||||
"--user", userID,
|
||||
"--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
detectImage,
|
||||
"-order=/cnb/orders/bad_api.toml")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
failErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
t.Fatalf("expected an error of type exec.ExitError")
|
||||
}
|
||||
h.AssertEq(t, failErr.ExitCode(), 12) // platform code for buildpack api error
|
||||
expected := "buildpack API version '0.1' is incompatible with the lifecycle"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("-order contains extensions", func() {
|
||||
var containerName, copyDir, orderPath string
|
||||
|
||||
it.Before(func() {
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
var err error
|
||||
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||
h.AssertNil(t, err)
|
||||
orderPath, err = filepath.Abs(filepath.Join("testdata", "detector", "container", "cnb", "orders", "order_with_ext.toml"))
|
||||
h.AssertNil(t, err)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
os.RemoveAll(copyDir)
|
||||
})
|
||||
|
||||
it("processes the provided order.toml", func() {
|
||||
experimentalMode := "warn"
|
||||
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||
experimentalMode = "error"
|
||||
}
|
||||
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
detectImage,
|
||||
h.WithFlags(
|
||||
"--user", userID,
|
||||
"--volume", orderPath+":/layers/order.toml",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
|
||||
),
|
||||
h.WithArgs(
|
||||
"-analyzed=/layers/analyzed.toml",
|
||||
"-extensions=/cnb/extensions",
|
||||
"-generated=/layers/generated",
|
||||
"-log-level=debug",
|
||||
"-run=/layers/run.toml", // /cnb/run.toml is the default location of run.toml
|
||||
),
|
||||
)
|
||||
|
||||
t.Log("runs /bin/detect for buildpacks and extensions")
|
||||
if api.MustParse(platformAPI).LessThan("0.13") {
|
||||
h.AssertStringContains(t, output, "Platform requested experimental feature 'Dockerfiles'")
|
||||
}
|
||||
h.AssertStringContains(t, output, "FOO=val-from-build-config")
|
||||
h.AssertStringContains(t, output, "simple_extension: output from /bin/detect")
|
||||
t.Log("writes group.toml")
|
||||
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, group.GroupExtensions[0].ID, "simple_extension")
|
||||
h.AssertEq(t, group.GroupExtensions[0].Version, "simple_extension_version")
|
||||
h.AssertEq(t, group.Group[0].ID, "buildpack_for_ext")
|
||||
h.AssertEq(t, group.Group[0].Version, "buildpack_for_ext_version")
|
||||
h.AssertEq(t, group.Group[0].Extension, false)
|
||||
t.Log("writes plan.toml")
|
||||
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
|
||||
buildPlan, err := files.Handler.ReadPlan(foundPlanTOML)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(buildPlan.Entries), 0) // this shows that the plan was filtered to remove `requires` provided by extensions
|
||||
|
||||
t.Log("runs /bin/generate for extensions")
|
||||
h.AssertStringContains(t, output, "simple_extension: output from /bin/generate")
|
||||
|
||||
var dockerfilePath string
|
||||
if api.MustParse(platformAPI).LessThan("0.13") {
|
||||
t.Log("copies the generated Dockerfiles to the output directory")
|
||||
dockerfilePath = filepath.Join(copyDir, "layers", "generated", "run", "simple_extension", "Dockerfile")
|
||||
} else {
|
||||
dockerfilePath = filepath.Join(copyDir, "layers", "generated", "simple_extension", "run.Dockerfile")
|
||||
}
|
||||
h.AssertPathExists(t, dockerfilePath)
|
||||
contents, err := os.ReadFile(dockerfilePath)
|
||||
h.AssertEq(t, string(contents), "FROM some-run-image-from-extension\n")
|
||||
t.Log("records the new run image in analyzed.toml")
|
||||
foundAnalyzedTOML := filepath.Join(copyDir, "layers", "analyzed.toml")
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(foundAnalyzedTOML, cmd.DefaultLogger)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, analyzedMD.RunImage.Image, "some-run-image-from-extension")
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,35 +1,31 @@
|
|||
//go:build acceptance
|
||||
// +build acceptance
|
||||
|
||||
package acceptance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/buildpacks/imgutil"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/imgutil"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
"github.com/buildpacks/lifecycle/auth"
|
||||
"github.com/buildpacks/lifecycle/cache"
|
||||
"github.com/buildpacks/lifecycle/cmd"
|
||||
"github.com/buildpacks/lifecycle/internal/fsutil"
|
||||
"github.com/buildpacks/lifecycle/internal/path"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
"github.com/buildpacks/lifecycle/internal/encoding"
|
||||
"github.com/buildpacks/lifecycle/platform"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
|
@ -44,19 +40,25 @@ var (
|
|||
)
|
||||
|
||||
func TestExporter(t *testing.T) {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Exporter acceptance tests are not yet supported on Windows")
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testImageDockerContext := filepath.Join("testdata", "exporter")
|
||||
exportTest = NewPhaseTest(t, "exporter", testImageDockerContext)
|
||||
|
||||
exportTest.Start(t, updateTOMLFixturesWithTestRegistry)
|
||||
exportTest.Start(t, updateAnalyzedTOMLFixturesWithRegRepoName)
|
||||
defer exportTest.Stop(t)
|
||||
|
||||
exportImage = exportTest.testImageRef
|
||||
exporterPath = exportTest.containerBinaryPath
|
||||
cacheFixtureDir = filepath.Join("testdata", "exporter", "cache-dir")
|
||||
exportRegAuthConfig = exportTest.targetRegistry.authConfig
|
||||
exportRegNetwork = exportTest.targetRegistry.network
|
||||
exportDaemonFixtures = exportTest.targetDaemon.fixtures
|
||||
exportRegFixtures = exportTest.targetRegistry.fixtures
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
for _, platformAPI := range api.Platform.Supported {
|
||||
spec.Run(t, "acceptance-exporter/"+platformAPI.String(), testExporterFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
@ -64,149 +66,51 @@ func TestExporter(t *testing.T) {
|
|||
|
||||
func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||
return func(t *testing.T, when spec.G, it spec.S) {
|
||||
var exportedImageName string
|
||||
|
||||
it.After(func() {
|
||||
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
|
||||
})
|
||||
|
||||
when("daemon case", func() {
|
||||
var exportedImageName string
|
||||
when("first build", func() {
|
||||
when("app", func() {
|
||||
it("is created", func() {
|
||||
exportFlags := []string{"-daemon"}
|
||||
if api.MustParse(platformAPI).LessThan("0.7") {
|
||||
exportFlags = append(exportFlags, []string{"-run-image", exportRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
it.After(func() {
|
||||
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
|
||||
})
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
it("app is created", func() {
|
||||
exportFlags := []string{"-daemon", "-log-level", "debug"}
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||
"--network", exportRegNetwork,
|
||||
)...),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
if api.MustParse(platformAPI).AtLeast("0.11") {
|
||||
extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"}
|
||||
for _, extension := range extensions {
|
||||
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension)))
|
||||
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension)))
|
||||
}
|
||||
} else {
|
||||
h.AssertStringDoesNotContain(t, output, "Copying SBOM")
|
||||
}
|
||||
|
||||
if api.MustParse(platformAPI).AtLeast("0.12") {
|
||||
expectedHistory := []string{
|
||||
"Buildpacks Launcher Config",
|
||||
"Buildpacks Application Launcher",
|
||||
"Application Layer",
|
||||
"Software Bill-of-Materials",
|
||||
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
|
||||
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
|
||||
"", // run image layer
|
||||
}
|
||||
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
|
||||
} else {
|
||||
assertDaemonImageDoesNotHaveHistory(t, exportedImageName)
|
||||
}
|
||||
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
})
|
||||
|
||||
when("using extensions", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||
})
|
||||
|
||||
it("app is created from the extended run image", func() {
|
||||
exportFlags := []string{
|
||||
"-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
|
||||
"-daemon",
|
||||
"-extended", "/layers/some-extended-dir",
|
||||
"-log-level", "debug",
|
||||
"-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
|
||||
}
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
// get run image top layer
|
||||
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
|
||||
h.AssertNil(t, err)
|
||||
layers := inspect.RootFS.Layers
|
||||
runImageFixtureTopLayerSHA := layers[len(layers)-1]
|
||||
runImageFixtureSHA := inspect.ID
|
||||
|
||||
experimentalMode := "warn"
|
||||
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||
experimentalMode = "error"
|
||||
}
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
)...),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
expectedHistory := []string{
|
||||
"Buildpacks Launcher Config",
|
||||
"Buildpacks Application Launcher",
|
||||
"Application Layer",
|
||||
"Software Bill-of-Materials",
|
||||
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
|
||||
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
|
||||
"Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension",
|
||||
"Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension",
|
||||
"", // run image layer
|
||||
}
|
||||
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
|
||||
t.Log("bases the exported image on the extended run image")
|
||||
inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
|
||||
t.Log("Adds extension layers")
|
||||
type testCase struct {
|
||||
expectedDiffID string
|
||||
layerIndex int
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps
|
||||
layerIndex: 1,
|
||||
},
|
||||
{
|
||||
expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps
|
||||
layerIndex: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID)
|
||||
}
|
||||
t.Log("sets the layers metadata label according to the new spec")
|
||||
var lmd files.LayersMetadata
|
||||
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
|
||||
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
|
||||
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
|
||||
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
|
||||
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA)
|
||||
h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:"))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("SOURCE_DATE_EPOCH is set", func() {
|
||||
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
|
||||
it("Image CreatedAt is set to SOURCE_DATE_EPOCH", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
|
||||
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
|
||||
|
||||
exportFlags := []string{"-daemon"}
|
||||
if api.MustParse(platformAPI).LessThan("0.7") {
|
||||
exportFlags = append(exportFlags, []string{"-run-image", exportRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
@ -230,93 +134,14 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
|
||||
when("registry case", func() {
|
||||
var exportedImageName string
|
||||
when("first build", func() {
|
||||
when("app", func() {
|
||||
it("is created", func() {
|
||||
var exportFlags []string
|
||||
if api.MustParse(platformAPI).LessThan("0.7") {
|
||||
exportFlags = append(exportFlags, []string{"-run-image", exportRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
it.After(func() {
|
||||
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
|
||||
})
|
||||
|
||||
it("app is created", func() {
|
||||
var exportFlags []string
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||
"--network", exportRegNetwork,
|
||||
),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
})
|
||||
|
||||
when("registry is insecure", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||
})
|
||||
|
||||
it("uses http protocol", func() {
|
||||
var exportFlags []string
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
insecureRegistry := "host.docker.internal/bar"
|
||||
insecureAnalyzed := "/layers/analyzed_insecure.toml"
|
||||
|
||||
_, _, err := h.DockerRunWithError(t,
|
||||
exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
|
||||
"--env", "CNB_ANALYZED_PATH="+insecureAnalyzed,
|
||||
"--network", exportRegNetwork,
|
||||
),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
|
||||
})
|
||||
})
|
||||
|
||||
when("SOURCE_DATE_EPOCH is set", func() {
|
||||
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
|
||||
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
|
||||
|
||||
var exportFlags []string
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
|
||||
"--network", exportRegNetwork,
|
||||
),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
|
||||
})
|
||||
})
|
||||
|
||||
// FIXME: move this out of the registry block
|
||||
when("cache", func() {
|
||||
when("image case", func() {
|
||||
it("cache is created", func() {
|
||||
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
|
||||
exportFlags := []string{"-cache-image", cacheImageName}
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
@ -331,16 +156,50 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
// To detect whether the export of cacheImage and exportedImage is successful
|
||||
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
h.Run(t, exec.Command("docker", "pull", cacheImageName))
|
||||
})
|
||||
})
|
||||
when("SOURCE_DATE_EPOCH is set", func() {
|
||||
it("Image CreatedAt is set to SOURCE_DATE_EPOCH", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
|
||||
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
|
||||
|
||||
when("parallel export is enabled", func() {
|
||||
it("cache is created", func() {
|
||||
var exportFlags []string
|
||||
if api.MustParse(platformAPI).LessThan("0.7") {
|
||||
exportFlags = append(exportFlags, []string{"-run-image", exportRegFixtures.ReadOnlyRunImage}...)
|
||||
}
|
||||
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
|
||||
"--network", exportRegNetwork,
|
||||
),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
|
||||
})
|
||||
})
|
||||
when("cache", func() {
|
||||
when("cache image case", func() {
|
||||
it("is created", func() {
|
||||
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
|
||||
exportFlags := []string{"-cache-image", cacheImageName, "-parallel"}
|
||||
exportFlags := []string{"-cache-image", cacheImageName}
|
||||
if api.MustParse(platformAPI).LessThan("0.7") {
|
||||
exportFlags = append(exportFlags, "-run-image", exportRegFixtures.ReadOnlyRunImage)
|
||||
}
|
||||
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
@ -358,14 +217,15 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
h.Run(t, exec.Command("docker", "pull", cacheImageName))
|
||||
})
|
||||
})
|
||||
|
||||
when("cache is provided but no data was cached", func() {
|
||||
it("cache is created with an empty layer", func() {
|
||||
it("is created with empty layer", func() {
|
||||
cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10))
|
||||
exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"}
|
||||
if api.MustParse(platformAPI).LessThan("0.7") {
|
||||
exportFlags = append(exportFlags, "-run-image", exportRegFixtures.ReadOnlyRunImage)
|
||||
}
|
||||
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
@ -385,9 +245,7 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
// Retrieve the cache image from the ephemeral registry
|
||||
h.Run(t, exec.Command("docker", "pull", cacheImageName))
|
||||
logger := cmd.DefaultLogger
|
||||
|
||||
subject, err := cache.NewImageCacheFromName(cacheImageName, authn.DefaultKeychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, api.MustParse(platformAPI).LessThan("0.13")))
|
||||
subject, err := cache.NewImageCacheFromName(cacheImageName, authn.DefaultKeychain)
|
||||
h.AssertNil(t, err)
|
||||
|
||||
//Assert the cache image was created with an empty layer
|
||||
|
@ -397,275 +255,42 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("directory case", func() {
|
||||
when("original cache was corrupted", func() {
|
||||
var cacheDir string
|
||||
|
||||
it.Before(func() {
|
||||
var err error
|
||||
cacheDir, err = os.MkdirTemp("", "cache")
|
||||
h.AssertNil(t, err)
|
||||
h.AssertNil(t, os.Chmod(cacheDir, 0777)) // Override umask
|
||||
|
||||
cacheFixtureDir := filepath.Join("testdata", "exporter", "cache-dir")
|
||||
h.AssertNil(t, fsutil.Copy(cacheFixtureDir, cacheDir))
|
||||
// We have to pre-create the tar files so that their digests do not change due to timestamps
|
||||
// But, ':' in the filepath on Windows is not allowed
|
||||
h.AssertNil(t, os.Rename(
|
||||
filepath.Join(cacheDir, "committed", "sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
|
||||
filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
|
||||
))
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
_ = os.RemoveAll(cacheDir)
|
||||
})
|
||||
|
||||
it("overwrites the original layer", func() {
|
||||
exportFlags := []string{
|
||||
"-cache-dir", "/cache",
|
||||
"-log-level", "debug",
|
||||
}
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||
"--network", exportRegNetwork,
|
||||
"--volume", fmt.Sprintf("%s:/cache", cacheDir),
|
||||
),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Skipping reuse for layer corrupted_buildpack:corrupted-layer: expected layer contents to have SHA 'sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59'; found 'sha256:9e0b77ed599eafdab8611f7eeefef084077f91f02f1da0a3870c7ff20a08bee8'")
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
defer h.Run(t, exec.Command("docker", "image", "rm", exportedImageName))
|
||||
// Verify the app has the correct sha for the layer
|
||||
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
|
||||
h.AssertNil(t, err)
|
||||
var lmd files.LayersMetadata
|
||||
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
|
||||
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
|
||||
h.AssertEq(t, lmd.Buildpacks[2].Layers["corrupted-layer"].SHA, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
|
||||
// Verify the cache has correct contents now
|
||||
foundDiffID, err := func() (string, error) {
|
||||
layerPath := filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar")
|
||||
layerRC, err := os.Open(layerPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = layerRC.Close()
|
||||
}()
|
||||
hasher := sha256.New()
|
||||
if _, err = io.Copy(hasher, layerRC); err != nil {
|
||||
return "", errors.Wrap(err, "hashing layer")
|
||||
}
|
||||
foundDiffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
|
||||
return foundDiffID, nil
|
||||
}()
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, foundDiffID, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("using extensions", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||
})
|
||||
|
||||
it("app is created from the extended run image", func() {
|
||||
exportFlags := []string{
|
||||
"-analyzed", "/layers/run-image-extended-analyzed.toml",
|
||||
"-extended", "/layers/some-extended-dir",
|
||||
"-log-level", "debug",
|
||||
"-run", "/cnb/run.toml",
|
||||
}
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
// get run image SHA & top layer
|
||||
ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
|
||||
h.AssertNil(t, err)
|
||||
remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth))
|
||||
h.AssertNil(t, err)
|
||||
layers, err := remoteImage.Layers()
|
||||
h.AssertNil(t, err)
|
||||
runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID()
|
||||
h.AssertNil(t, err)
|
||||
runImageFixtureSHA, err := remoteImage.Digest()
|
||||
h.AssertNil(t, err)
|
||||
|
||||
experimentalMode := "warn"
|
||||
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||
experimentalMode = "error"
|
||||
}
|
||||
|
||||
output := h.DockerRun(t,
|
||||
exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||
"--network", exportRegNetwork,
|
||||
),
|
||||
h.WithArgs(exportArgs...),
|
||||
)
|
||||
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||
|
||||
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||
t.Log("bases the exported image on the extended run image")
|
||||
ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName)
|
||||
h.AssertNil(t, err)
|
||||
remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth))
|
||||
h.AssertNil(t, err)
|
||||
configFile, err := remoteImage.ConfigFile()
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
|
||||
t.Log("Adds extension layers")
|
||||
layers, err = remoteImage.Layers()
|
||||
h.AssertNil(t, err)
|
||||
type testCase struct {
|
||||
expectedDigest string
|
||||
layerIndex int
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing
|
||||
layerIndex: 1,
|
||||
},
|
||||
{
|
||||
expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing
|
||||
layerIndex: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
layer := layers[tc.layerIndex]
|
||||
digest, err := layer.Digest()
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, digest.String(), tc.expectedDigest)
|
||||
}
|
||||
t.Log("sets the layers metadata label according to the new spec")
|
||||
var lmd files.LayersMetadata
|
||||
lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"]
|
||||
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
|
||||
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
|
||||
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
|
||||
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String())
|
||||
h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String()))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("layout case", func() {
|
||||
var (
|
||||
containerName string
|
||||
err error
|
||||
layoutDir string
|
||||
tmpDir string
|
||||
exportedImageName string
|
||||
)
|
||||
|
||||
when("experimental mode is enabled", func() {
|
||||
it.Before(func() {
|
||||
// create the directory to save all OCI images on disk
|
||||
tmpDir, err = os.MkdirTemp("", "layout")
|
||||
h.AssertNil(t, err)
|
||||
|
||||
containerName = "test-container-" + h.RandString(10)
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
if h.DockerContainerExists(t, containerName) {
|
||||
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||
}
|
||||
// removes all images created
|
||||
os.RemoveAll(tmpDir)
|
||||
})
|
||||
|
||||
when("using a custom layout directory", func() {
|
||||
it.Before(func() {
|
||||
exportedImageName = "my-custom-layout-app"
|
||||
layoutDir = filepath.Join(path.RootDir, "my-layout-dir")
|
||||
})
|
||||
|
||||
it("app is created", func() {
|
||||
var exportFlags []string
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||
exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...)
|
||||
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||
exportArgs = append(exportArgs, exportedImageName)
|
||||
|
||||
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_EXPERIMENTAL_MODE=warn",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
),
|
||||
h.WithArgs(exportArgs...))
|
||||
|
||||
h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest")
|
||||
|
||||
// assert the image was saved on disk in OCI layout format
|
||||
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest"))
|
||||
h.AssertEq(t, len(index.Manifests), 1)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("experimental mode is not enabled", func() {
|
||||
it.Before(func() {
|
||||
layoutDir = filepath.Join(path.RootDir, "layout-dir")
|
||||
})
|
||||
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||
|
||||
cmd := exec.Command(
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
exportImage,
|
||||
ctrPath(exporterPath),
|
||||
"-layout",
|
||||
"-layout-dir", layoutDir,
|
||||
"some-image",
|
||||
) // #nosec G204
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertDaemonImageDoesNotHaveHistory(t *testing.T, repoName string) {
|
||||
history, err := h.DockerCli(t).ImageHistory(context.TODO(), repoName)
|
||||
func assertImageOSAndArch(t *testing.T, imageName string, phaseTest *PhaseTest) {
|
||||
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), imageName)
|
||||
h.AssertNil(t, err)
|
||||
for _, hs := range history {
|
||||
h.AssertEq(t, hs.Created, imgutil.NormalizedDateTime.Unix())
|
||||
h.AssertEq(t, hs.CreatedBy, "")
|
||||
}
|
||||
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
|
||||
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
|
||||
}
|
||||
|
||||
func assertDaemonImageHasHistory(t *testing.T, repoName string, expectedHistory []string) {
|
||||
history, err := h.DockerCli(t).ImageHistory(context.TODO(), repoName)
|
||||
func assertImageOSAndArchAndCreatedAt(t *testing.T, imageName string, phaseTest *PhaseTest, expectedCreatedAt time.Time) {
|
||||
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), imageName)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(history), len(expectedHistory))
|
||||
for idx, hs := range history {
|
||||
h.AssertEq(t, hs.Created, imgutil.NormalizedDateTime.Unix())
|
||||
h.AssertEq(t, hs.CreatedBy, expectedHistory[idx])
|
||||
}
|
||||
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
|
||||
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
|
||||
h.AssertEq(t, inspect.Created, expectedCreatedAt.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
func updateAnalyzedTOMLFixturesWithRegRepoName(t *testing.T, phaseTest *PhaseTest) {
|
||||
placeHolderPath := filepath.Join("testdata", "exporter", "container", "layers", "analyzed.toml.placeholder")
|
||||
analyzedMD := assertAnalyzedMetadata(t, placeHolderPath)
|
||||
analyzedMD.RunImage = &platform.ImageIdentifier{Reference: phaseTest.targetRegistry.fixtures.ReadOnlyRunImage}
|
||||
encoding.WriteTOML(strings.TrimSuffix(placeHolderPath, ".placeholder"), analyzedMD)
|
||||
|
||||
placeHolderPath = filepath.Join("testdata", "exporter", "container", "layers", "some-analyzed.toml.placeholder")
|
||||
analyzedMD = assertAnalyzedMetadata(t, placeHolderPath)
|
||||
analyzedMD.PreviousImage = &platform.ImageIdentifier{Reference: phaseTest.targetRegistry.fixtures.SomeAppImage}
|
||||
analyzedMD.RunImage = &platform.ImageIdentifier{Reference: phaseTest.targetRegistry.fixtures.ReadOnlyRunImage}
|
||||
encoding.WriteTOML(strings.TrimSuffix(placeHolderPath, ".placeholder"), analyzedMD)
|
||||
|
||||
placeHolderPath = filepath.Join("testdata", "exporter", "container", "other_layers", "analyzed.toml.placeholder")
|
||||
analyzedMD = assertAnalyzedMetadata(t, placeHolderPath)
|
||||
analyzedMD.RunImage = &platform.ImageIdentifier{Reference: phaseTest.targetRegistry.fixtures.ReadOnlyRunImage}
|
||||
encoding.WriteTOML(strings.TrimSuffix(placeHolderPath, ".placeholder"), analyzedMD)
|
||||
}
|
||||
|
||||
func calculateEmptyLayerSha(t *testing.T) string {
|
||||
|
|
|
@ -1,26 +1,29 @@
|
|||
//go:build acceptance
|
||||
// +build acceptance
|
||||
|
||||
package acceptance
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/buildpacks/imgutil/layout/sparse"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
"github.com/buildpacks/lifecycle/auth"
|
||||
"github.com/buildpacks/lifecycle/cmd"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
"github.com/buildpacks/lifecycle/internal/encoding"
|
||||
"github.com/buildpacks/lifecycle/internal/selective"
|
||||
"github.com/buildpacks/lifecycle/platform"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
|
@ -34,16 +37,11 @@ var (
|
|||
extendTest *PhaseTest
|
||||
)
|
||||
|
||||
const (
|
||||
// Log message emitted by kaniko;
|
||||
// if we provide cache directory as an option, kaniko looks there for the base image as a tarball;
|
||||
// however the base image is in OCI layout format, so we fail to initialize the base image;
|
||||
// we manage to provide the base image because we override image.RetrieveRemoteImage,
|
||||
// but the log message could be confusing to end users, hence we check that it is not printed.
|
||||
msgErrRetrievingImageFromCache = "Error while retrieving image from cache"
|
||||
)
|
||||
|
||||
func TestExtender(t *testing.T) {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Extender is not supported on Windows")
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testImageDockerContext := filepath.Join("testdata", "extender")
|
||||
extendTest = NewPhaseTest(t, "extender", testImageDockerContext)
|
||||
extendTest.Start(t)
|
||||
|
@ -57,76 +55,53 @@ func TestExtender(t *testing.T) {
|
|||
extendRegFixtures = extendTest.targetRegistry.fixtures
|
||||
|
||||
for _, platformAPI := range api.Platform.Supported {
|
||||
if platformAPI.LessThan("0.10") {
|
||||
continue
|
||||
}
|
||||
spec.Run(t, "acceptance-extender/"+platformAPI.String(), testExtenderFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
}
|
||||
|
||||
func testExtenderFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||
return func(t *testing.T, when spec.G, it spec.S) {
|
||||
var generatedDir = "/layers/generated"
|
||||
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
|
||||
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||
generatedDir = "/layers/generated-with-contexts"
|
||||
}
|
||||
})
|
||||
|
||||
when("kaniko case", func() {
|
||||
var extendedDir, kanikoDir, analyzedPath string
|
||||
var kanikoDir, analyzedPath string
|
||||
|
||||
it.Before(func() {
|
||||
var err error
|
||||
extendedDir, err = os.MkdirTemp("", "lifecycle-acceptance")
|
||||
h.AssertNil(t, err)
|
||||
kanikoDir, err = os.MkdirTemp("", "lifecycle-acceptance")
|
||||
h.AssertNil(t, err)
|
||||
|
||||
// push base image to test registry
|
||||
// push "builder" image to test registry
|
||||
h.Run(t, exec.Command("docker", "tag", extendImage, extendTest.RegRepoName(extendImage)))
|
||||
h.AssertNil(t, h.PushImage(h.DockerCli(t), extendTest.RegRepoName(extendImage), extendTest.targetRegistry.registry.EncodedLabeledAuth()))
|
||||
|
||||
// mimic what the restorer would have done in the previous phase:
|
||||
|
||||
// warm kaniko cache
|
||||
|
||||
// get remote image
|
||||
os.Setenv("DOCKER_CONFIG", extendTest.targetRegistry.dockerConfigDir)
|
||||
ref, auth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, extendTest.RegRepoName(extendImage))
|
||||
h.AssertNil(t, err)
|
||||
remoteImage, err := remote.Image(ref, remote.WithAuth(auth))
|
||||
h.AssertNil(t, err)
|
||||
baseImageHash, err := remoteImage.Digest()
|
||||
buildImageHash, err := remoteImage.Digest()
|
||||
h.AssertNil(t, err)
|
||||
baseImageDigest := baseImageHash.String()
|
||||
buildImageDigest := buildImageHash.String()
|
||||
baseCacheDir := filepath.Join(kanikoDir, "cache", "base")
|
||||
h.AssertNil(t, os.MkdirAll(baseCacheDir, 0755))
|
||||
|
||||
// write sparse image
|
||||
layoutImage, err := sparse.NewImage(filepath.Join(baseCacheDir, baseImageDigest), remoteImage)
|
||||
layoutPath, err := selective.Write(filepath.Join(baseCacheDir, buildImageDigest), empty.Index)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertNil(t, layoutImage.Save())
|
||||
h.AssertNil(t, layoutPath.AppendImage(remoteImage))
|
||||
|
||||
// write image reference in analyzed.toml
|
||||
analyzedMD := files.Analyzed{
|
||||
BuildImage: &files.ImageIdentifier{
|
||||
Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), baseImageDigest),
|
||||
},
|
||||
RunImage: &files.RunImage{
|
||||
Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), baseImageDigest),
|
||||
Extend: true,
|
||||
},
|
||||
}
|
||||
// write build image reference in analyzed.toml
|
||||
analyzedMD := platform.AnalyzedMetadata{BuildImage: &platform.ImageIdentifier{Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), buildImageDigest)}}
|
||||
analyzedPath = h.TempFile(t, "", "analyzed.toml")
|
||||
h.AssertNil(t, files.Handler.WriteAnalyzed(analyzedPath, &analyzedMD, cmd.DefaultLogger))
|
||||
h.AssertNil(t, encoding.WriteTOML(analyzedPath, analyzedMD))
|
||||
})
|
||||
|
||||
it.After(func() {
|
||||
_ = os.RemoveAll(kanikoDir)
|
||||
_ = os.RemoveAll(extendedDir)
|
||||
})
|
||||
|
||||
when("extending the build image", func() {
|
||||
|
@ -134,7 +109,7 @@ func testExtenderFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
extendArgs := []string{
|
||||
ctrPath(extenderPath),
|
||||
"-analyzed", "/layers/analyzed.toml",
|
||||
"-generated", generatedDir,
|
||||
"-generated", "/layers/generated",
|
||||
"-log-level", "debug",
|
||||
"-gid", "1000",
|
||||
"-uid", "1234",
|
||||
|
@ -152,12 +127,12 @@ func testExtenderFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
h.WithFlags(extendFlags...),
|
||||
h.WithArgs(extendArgs...),
|
||||
)
|
||||
h.AssertStringDoesNotContain(t, firstOutput, msgErrRetrievingImageFromCache)
|
||||
h.AssertStringDoesNotContain(t, firstOutput, "Did not find cache key, pulling remote image...")
|
||||
h.AssertStringDoesNotContain(t, firstOutput, "Error while retrieving image from cache: oci")
|
||||
h.AssertStringContains(t, firstOutput, "ca-certificates")
|
||||
h.AssertStringContains(t, firstOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl was installed on the build image
|
||||
t.Log("sets environment variables from the extended build image in the build context")
|
||||
h.AssertStringContains(t, firstOutput, "CNB_STACK_ID for buildpack: stack-id-from-ext-tree")
|
||||
h.AssertStringContains(t, firstOutput, "HOME for buildpack: /home/cnb")
|
||||
|
||||
t.Log("cleans the kaniko directory")
|
||||
fis, err := os.ReadDir(kanikoDir)
|
||||
|
@ -170,119 +145,12 @@ func testExtenderFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
h.WithFlags(extendFlags...),
|
||||
h.WithArgs(extendArgs...),
|
||||
)
|
||||
h.AssertStringDoesNotContain(t, secondOutput, msgErrRetrievingImageFromCache)
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that first cache layer was used
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree") // shows that second cache layer was used
|
||||
h.AssertStringContains(t, secondOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl is still installed in the unpacked cached layer
|
||||
})
|
||||
})
|
||||
|
||||
when("extending the run image", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not support run image extension")
|
||||
})
|
||||
|
||||
it("succeeds", func() {
|
||||
extendArgs := []string{
|
||||
ctrPath(extenderPath),
|
||||
"-analyzed", "/layers/analyzed.toml",
|
||||
"-extended", "/layers/extended",
|
||||
"-generated", generatedDir,
|
||||
"-kind", "run",
|
||||
"-log-level", "debug",
|
||||
"-gid", "1000",
|
||||
"-uid", "1234",
|
||||
}
|
||||
|
||||
extendFlags := []string{
|
||||
"--env", "CNB_PLATFORM_API=" + platformAPI,
|
||||
"--volume", fmt.Sprintf("%s:/layers/analyzed.toml", analyzedPath),
|
||||
"--volume", fmt.Sprintf("%s:/layers/extended", extendedDir),
|
||||
"--volume", fmt.Sprintf("%s:/kaniko", kanikoDir),
|
||||
}
|
||||
|
||||
t.Log("first build extends the run image by running Dockerfile commands")
|
||||
firstOutput := h.DockerRunWithCombinedOutput(t,
|
||||
extendImage,
|
||||
h.WithFlags(extendFlags...),
|
||||
h.WithArgs(extendArgs...),
|
||||
)
|
||||
h.AssertStringDoesNotContain(t, firstOutput, msgErrRetrievingImageFromCache)
|
||||
h.AssertStringContains(t, firstOutput, "ca-certificates")
|
||||
h.AssertStringContains(t, firstOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree")
|
||||
t.Log("does not run the build phase")
|
||||
h.AssertStringDoesNotContain(t, firstOutput, "Hello Extensions buildpack\ncurl")
|
||||
t.Log("outputs extended image layers to the extended directory")
|
||||
images, err := os.ReadDir(filepath.Join(extendedDir, "run"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(images), 1) // sha256:<extended image digest>
|
||||
assertExpectedImage(t, filepath.Join(extendedDir, "run", images[0].Name()), platformAPI)
|
||||
t.Log("cleans the kaniko directory")
|
||||
caches, err := os.ReadDir(kanikoDir)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(caches), 1) // 1: /kaniko/cache
|
||||
|
||||
t.Log("second build extends the build image by pulling from the cache directory")
|
||||
secondOutput := h.DockerRunWithCombinedOutput(t,
|
||||
extendImage,
|
||||
h.WithFlags(extendFlags...),
|
||||
h.WithArgs(extendArgs...),
|
||||
)
|
||||
h.AssertStringDoesNotContain(t, secondOutput, msgErrRetrievingImageFromCache)
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that first cache layer was used
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree") // shows that second cache layer was used
|
||||
t.Log("does not run the build phase")
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "Hello Extensions buildpack\ncurl")
|
||||
t.Log("outputs extended image layers to the extended directory")
|
||||
images, err = os.ReadDir(filepath.Join(extendedDir, "run"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(images), 1) // sha256:<first extended image digest>
|
||||
assertExpectedImage(t, filepath.Join(extendedDir, "run", images[0].Name()), platformAPI)
|
||||
t.Log("cleans the kaniko directory")
|
||||
caches, err = os.ReadDir(kanikoDir)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(caches), 1) // 1: /kaniko/cache
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "Did not find cache key, pulling remote image...")
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "Error while retrieving image from cache: oci")
|
||||
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that cache layer was used
|
||||
h.AssertStringContains(t, secondOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl is still installed in the unpacked cached layer
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertExpectedImage(t *testing.T, imagePath, platformAPI string) {
|
||||
image, err := readOCI(imagePath)
|
||||
h.AssertNil(t, err)
|
||||
configFile, err := image.ConfigFile()
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false")
|
||||
layers, err := image.Layers()
|
||||
h.AssertNil(t, err)
|
||||
history := configFile.History
|
||||
h.AssertEq(t, len(history), len(configFile.RootFS.DiffIDs))
|
||||
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||
h.AssertEq(t, len(layers), 7) // base (3), curl (2), tree (2)
|
||||
h.AssertEq(t, history[3].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y curl', Created by extension: curl")
|
||||
h.AssertEq(t, history[4].CreatedBy, "Layer: 'COPY run-file /', Created by extension: curl")
|
||||
h.AssertEq(t, history[5].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y tree', Created by extension: tree")
|
||||
h.AssertEq(t, history[6].CreatedBy, "Layer: 'COPY shared-file /shared-run', Created by extension: tree")
|
||||
} else {
|
||||
h.AssertEq(t, len(layers), 5) // base (3), curl (1), tree (1)
|
||||
h.AssertEq(t, history[3].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y curl', Created by extension: curl")
|
||||
h.AssertEq(t, history[4].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y tree', Created by extension: tree")
|
||||
}
|
||||
}
|
||||
|
||||
func readOCI(fromPath string) (v1.Image, error) {
|
||||
layoutPath, err := layout.FromPath(fromPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting layout from path: %w", err)
|
||||
}
|
||||
hash, err := v1.NewHash(filepath.Base(fromPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting hash from reference '%s': %w", fromPath, err)
|
||||
}
|
||||
v1Image, err := layoutPath.Image(hash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting image from hash '%s': %w", hash.String(), err)
|
||||
}
|
||||
return v1Image, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -24,6 +25,9 @@ func TestLauncher(t *testing.T) {
|
|||
launchTest = NewPhaseTest(t, "launcher", testImageDockerContext, withoutDaemonFixtures, withoutRegistry)
|
||||
|
||||
containerBinaryDir := filepath.Join("testdata", "launcher", "linux", "container", "cnb", "lifecycle")
|
||||
if launchTest.targetDaemon.os == "windows" {
|
||||
containerBinaryDir = filepath.Join("testdata", "launcher", "windows", "container", "cnb", "lifecycle")
|
||||
}
|
||||
withCustomContainerBinaryDir := func(_ *testing.T, phaseTest *PhaseTest) {
|
||||
phaseTest.containerBinaryDir = containerBinaryDir
|
||||
}
|
||||
|
@ -37,201 +41,314 @@ func TestLauncher(t *testing.T) {
|
|||
}
|
||||
|
||||
func testLauncher(t *testing.T, when spec.G, it spec.S) {
|
||||
when("exec.d", func() {
|
||||
it("executes the binaries and modifies env before running profiles", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", //nolint
|
||||
"--env=CNB_PLATFORM_API=0.7",
|
||||
"--entrypoint=exec.d-checker"+exe,
|
||||
"--env=VAR_FROM_EXEC_D=orig-val",
|
||||
launchImage)
|
||||
when("Buildpack API >= 0.5", func() {
|
||||
when("exec.d", func() {
|
||||
it("executes the binaries and modifies env before running profiles", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--env=VAR_FROM_EXEC_D=orig-val",
|
||||
launchImage, "exec.d-checker")
|
||||
|
||||
helper := "helper" + exe
|
||||
execDHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d", helper)
|
||||
execDCheckerHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d/exec.d-checker", helper)
|
||||
workDir := ctrPath("/workspace")
|
||||
helper := "helper" + exe
|
||||
execDHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d", helper)
|
||||
execDCheckerHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d/exec.d-checker", helper)
|
||||
workDir := ctrPath("/workspace")
|
||||
|
||||
expected := fmt.Sprintf("%s was executed\n", execDHelper)
|
||||
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
|
||||
expected += fmt.Sprintf("%s was executed\n", execDCheckerHelper)
|
||||
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
|
||||
expected += "sourced bp profile\n"
|
||||
expected += "sourced app profile\n"
|
||||
expected += "VAR_FROM_EXEC_D: orig-val:val-from-exec.d:val-from-exec.d-for-process-type-exec.d-checker"
|
||||
expected := fmt.Sprintf("%s was executed\n", execDHelper)
|
||||
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
|
||||
expected += fmt.Sprintf("%s was executed\n", execDCheckerHelper)
|
||||
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
|
||||
expected += "sourced bp profile\n"
|
||||
expected += "sourced app profile\n"
|
||||
expected += "VAR_FROM_EXEC_D: orig-val:val-from-exec.d:val-from-exec.d-for-process-type-exec.d-checker"
|
||||
|
||||
assertOutput(t, cmd, expected)
|
||||
assertOutput(t, cmd, expected)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("entrypoint is a process", func() {
|
||||
it("launches that process", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", //nolint
|
||||
"--entrypoint=web",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage)
|
||||
assertOutput(t, cmd, "Executing web process-type")
|
||||
})
|
||||
|
||||
when("process contains a period", func() {
|
||||
when("Platform API >= 0.4", func() {
|
||||
when("entrypoint is a process", func() {
|
||||
it("launches that process", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--entrypoint=process.with.period"+exe,
|
||||
"--entrypoint=web",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage)
|
||||
assertOutput(t, cmd, "Executing process.with.period process-type")
|
||||
assertOutput(t, cmd, "Executing web process-type")
|
||||
})
|
||||
|
||||
when("process contains a period", func() {
|
||||
it("launches that process", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--entrypoint=process.with.period"+exe,
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage)
|
||||
assertOutput(t, cmd, "Executing process.with.period process-type")
|
||||
})
|
||||
})
|
||||
|
||||
it("appends any args to the process args", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--entrypoint=web",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "with user provided args")
|
||||
if runtime.GOOS == "windows" {
|
||||
assertOutput(t, cmd, `Executing web process-type "with user provided args"`)
|
||||
} else {
|
||||
assertOutput(t, cmd, "Executing web process-type with user provided args")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
it("appends any args to the process args", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--entrypoint=web",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "with user provided args",
|
||||
)
|
||||
assertOutput(t, cmd, "Executing web process-type with user provided args")
|
||||
when("entrypoint is a not a process", func() {
|
||||
it("builds a process from the arguments", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--entrypoint=launcher",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "--", "env")
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.Command("docker", "run", "--rm",
|
||||
`--entrypoint=launcher`,
|
||||
"--env=CNB_PLATFORM_API=0.4",
|
||||
launchImage, "--", "cmd", "/c", "set",
|
||||
)
|
||||
}
|
||||
|
||||
assertOutput(t, cmd,
|
||||
"SOME_VAR=some-bp-val",
|
||||
"OTHER_VAR=other-bp-val",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
when("CNB_PROCESS_TYPE is set", func() {
|
||||
it("should warn", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--env=CNB_PROCESS_TYPE=direct-process",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
"--env=CNB_NO_COLOR=true",
|
||||
launchImage,
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
h.AssertStringContains(t, string(out), "Warning: CNB_PROCESS_TYPE is not supported in Platform API "+latestPlatformAPI)
|
||||
h.AssertStringContains(t, string(out), `Warning: Run with ENTRYPOINT 'direct-process' to invoke the 'direct-process' process type`)
|
||||
h.AssertStringContains(t, string(out), "ERROR: failed to launch: determine start command: when there is no default process a command is required")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("entrypoint is a not a process", func() {
|
||||
it("builds a process from the arguments", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--entrypoint=launcher",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "--",
|
||||
"env",
|
||||
)
|
||||
when("Platform API < 0.4", func() {
|
||||
when("there is no CMD provided", func() {
|
||||
when("CNB_PROCESS_TYPE is NOT set", func() {
|
||||
it("web is the default process-type", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage)
|
||||
assertOutput(t, cmd, "Executing web process-type")
|
||||
})
|
||||
})
|
||||
|
||||
assertOutput(t, cmd,
|
||||
"SOME_VAR=some-bp-val",
|
||||
"OTHER_VAR=other-bp-val",
|
||||
)
|
||||
when("CNB_PROCESS_TYPE is set", func() {
|
||||
it("should run the specified CNB_PROCESS_TYPE", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", "--env", "CNB_PROCESS_TYPE=direct-process", launchImage)
|
||||
if runtime.GOOS == "windows" {
|
||||
assertOutput(t, cmd, "Usage: ping")
|
||||
} else {
|
||||
assertOutput(t, cmd, "Executing direct-process process-type")
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("CNB_PROCESS_TYPE is set", func() {
|
||||
it("should warn", func() {
|
||||
when("process-type provided in CMD", func() {
|
||||
it("launches that process-type", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "direct-process")
|
||||
expected := "Executing direct-process process-type"
|
||||
if runtime.GOOS == "windows" {
|
||||
expected = "Usage: ping"
|
||||
}
|
||||
assertOutput(t, cmd, expected)
|
||||
})
|
||||
|
||||
it("sets env vars from process specific directories", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "worker")
|
||||
expected := "worker-process-val"
|
||||
assertOutput(t, cmd, expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("process is direct=false", func() {
|
||||
when("the process type has no args", func() {
|
||||
it("runs command as script", func() {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "scripts are unsupported on windows")
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--env", "VAR1=val1",
|
||||
"--env", "VAR2=val with space",
|
||||
launchImage, "indirect-process-with-script",
|
||||
)
|
||||
assertOutput(t, cmd, "'val1' 'val with space'")
|
||||
})
|
||||
})
|
||||
|
||||
when("the process type has args", func() {
|
||||
when("buildpack API 0.4", func() {
|
||||
// buildpack API is determined by looking up the API of the process buildpack in metadata.toml
|
||||
|
||||
it("command and args become shell-parsed tokens in a script", func() {
|
||||
var val2 string
|
||||
if runtime.GOOS == "windows" {
|
||||
val2 = `"val with space"` // windows values with spaces must contain quotes
|
||||
} else {
|
||||
val2 = "val with space"
|
||||
}
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--env", "VAR1=val1",
|
||||
"--env", "VAR2="+val2,
|
||||
launchImage, "indirect-process-with-args",
|
||||
) // #nosec G204
|
||||
assertOutput(t, cmd, "'val1' 'val with space'")
|
||||
})
|
||||
})
|
||||
|
||||
when("buildpack API < 0.4", func() {
|
||||
// buildpack API is determined by looking up the API of the process buildpack in metadata.toml
|
||||
|
||||
it("args become arguments to bash", func() {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "scripts are unsupported on windows")
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
launchImage, "legacy-indirect-process-with-args",
|
||||
)
|
||||
assertOutput(t, cmd, "'arg' 'arg with spaces'")
|
||||
})
|
||||
|
||||
it("script must be explicitly written to accept bash args", func() {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "scripts are unsupported on windows")
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
launchImage, "legacy-indirect-process-with-incorrect-args",
|
||||
)
|
||||
output, err := cmd.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
h.AssertStringContains(t, string(output), "printf: usage: printf [-v var] format [arguments]")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it("sources scripts from process specific directories", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "profile-checker")
|
||||
expected := "sourced bp profile\nsourced bp profile-checker profile\nsourced app profile\nval-from-profile"
|
||||
assertOutput(t, cmd, expected)
|
||||
})
|
||||
})
|
||||
|
||||
it("respects CNB_APP_DIR and CNB_LAYERS_DIR environment variables", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--env=CNB_PROCESS_TYPE=direct-process",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
"--env=CNB_NO_COLOR=true",
|
||||
launchImage,
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
h.AssertStringContains(t, string(out), "Warning: CNB_PROCESS_TYPE is not supported in Platform API "+latestPlatformAPI)
|
||||
h.AssertStringContains(t, string(out), `Warning: Run with ENTRYPOINT 'direct-process' to invoke the 'direct-process' process type`)
|
||||
h.AssertStringContains(t, string(out), "ERROR: failed to launch: determine start command: when there is no default process a command is required")
|
||||
})
|
||||
})
|
||||
|
||||
when("provided CMD is not a process-type", func() {
|
||||
it("sources profiles and executes the command in a shell", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage,
|
||||
"echo", "something",
|
||||
)
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsomething")
|
||||
"--env", "CNB_APP_DIR="+ctrPath("/other-app"),
|
||||
"--env", "CNB_LAYERS_DIR=/other-layers",
|
||||
launchImage) // #nosec G204
|
||||
assertOutput(t, cmd, "sourced other app profile\nExecuting other-layers web process-type")
|
||||
})
|
||||
|
||||
it("sets env vars from layers", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage,
|
||||
"echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR",
|
||||
)
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val")
|
||||
when("provided CMD is not a process-type", func() {
|
||||
it("sources profiles and executes the command in a shell", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "echo", "something")
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsomething")
|
||||
})
|
||||
|
||||
it("sets env vars from layers", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR")
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.Command("docker", "run", "--rm", launchImage, "echo", "%SOME_VAR%", "%OTHER_VAR%", "%WORKER_VAR%")
|
||||
}
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val")
|
||||
})
|
||||
|
||||
it("passes through env vars from user, excluding excluded vars", func() {
|
||||
args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"}
|
||||
if runtime.GOOS == "windows" {
|
||||
args = []string{"echo", "%SOME_USER_VAR%, %CNB_APP_DIR%, %OTHER_VAR%"}
|
||||
}
|
||||
cmd := exec.Command("docker",
|
||||
append(
|
||||
[]string{
|
||||
"run", "--rm",
|
||||
"--env", "CNB_APP_DIR=" + ctrPath("/workspace"),
|
||||
"--env", "SOME_USER_VAR=some-user-val",
|
||||
"--env", "OTHER_VAR=other-user-val",
|
||||
launchImage,
|
||||
},
|
||||
args...)...,
|
||||
) // #nosec G204
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// windows values with spaces will contain quotes
|
||||
// empty values on windows preserve variable names instead of interpolating to empty strings
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\n\"some-user-val, %CNB_APP_DIR%, other-user-val**other-bp-val\"")
|
||||
} else {
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val")
|
||||
}
|
||||
})
|
||||
|
||||
it("adds buildpack bin dirs to the path", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "bp-executable")
|
||||
assertOutput(t, cmd, "bp executable")
|
||||
})
|
||||
})
|
||||
|
||||
it("passes through env vars from user, excluding excluded vars", func() {
|
||||
args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"}
|
||||
cmd := exec.Command("docker",
|
||||
append(
|
||||
[]string{
|
||||
"run", "--rm",
|
||||
"--env", "CNB_APP_DIR=" + ctrPath("/workspace"),
|
||||
"--env=CNB_PLATFORM_API=" + latestPlatformAPI,
|
||||
when("CMD provided starts with --", func() {
|
||||
it("launches command directly", func() {
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "ping", "/?")
|
||||
assertOutput(t, cmd, "Usage: ping")
|
||||
} else {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "echo", "something")
|
||||
assertOutput(t, cmd, "something")
|
||||
}
|
||||
})
|
||||
|
||||
it("sets env vars from layers", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "env")
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.Command("docker", "run", "--rm", launchImage, "--", "cmd", "/c", "set")
|
||||
}
|
||||
|
||||
assertOutput(t, cmd,
|
||||
"SOME_VAR=some-bp-val",
|
||||
"OTHER_VAR=other-bp-val",
|
||||
)
|
||||
})
|
||||
|
||||
it("passes through env vars from user, excluding excluded vars", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm",
|
||||
"--env", "CNB_APP_DIR=/workspace",
|
||||
"--env", "SOME_USER_VAR=some-user-val",
|
||||
launchImage, "--",
|
||||
"env",
|
||||
)
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.Command("docker", "run", "--rm",
|
||||
"--env", "CNB_APP_DIR=/workspace",
|
||||
"--env", "SOME_USER_VAR=some-user-val",
|
||||
"--env", "OTHER_VAR=other-user-val",
|
||||
launchImage,
|
||||
},
|
||||
args...)...,
|
||||
) // #nosec G204
|
||||
launchImage, "--",
|
||||
"cmd", "/c", "set",
|
||||
)
|
||||
}
|
||||
|
||||
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val")
|
||||
})
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
||||
}
|
||||
expected := "SOME_USER_VAR=some-user-val"
|
||||
if !strings.Contains(string(output), expected) {
|
||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
||||
}
|
||||
|
||||
it("adds buildpack bin dirs to the path", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage,
|
||||
"bp-executable",
|
||||
)
|
||||
assertOutput(t, cmd, "bp executable")
|
||||
})
|
||||
})
|
||||
if strings.Contains(string(output), "CNB_APP_DIR") {
|
||||
t.Fatalf("env contained white listed env far CNB_APP_DIR:\n\t got: %s\n", output)
|
||||
}
|
||||
})
|
||||
|
||||
when("CMD provided starts with --", func() {
|
||||
it("launches command directly", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "--",
|
||||
"echo", "something",
|
||||
)
|
||||
assertOutput(t, cmd, "something")
|
||||
})
|
||||
|
||||
it("sets env vars from layers", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "--",
|
||||
"env",
|
||||
)
|
||||
|
||||
assertOutput(t, cmd,
|
||||
"SOME_VAR=some-bp-val",
|
||||
"OTHER_VAR=other-bp-val",
|
||||
)
|
||||
})
|
||||
|
||||
it("passes through env vars from user, excluding excluded vars", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env", "CNB_APP_DIR=/workspace",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
"--env", "SOME_USER_VAR=some-user-val",
|
||||
launchImage, "--",
|
||||
"env",
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
||||
}
|
||||
expected := "SOME_USER_VAR=some-user-val"
|
||||
if !strings.Contains(string(output), expected) {
|
||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
||||
}
|
||||
|
||||
if strings.Contains(string(output), "CNB_APP_DIR") {
|
||||
t.Fatalf("env contained white listed env far CNB_APP_DIR:\n\t got: %s\n", output)
|
||||
}
|
||||
})
|
||||
|
||||
it("adds buildpack bin dirs to the path before looking up command", func() {
|
||||
cmd := exec.Command( //nolint
|
||||
"docker", "run", "--rm",
|
||||
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||
launchImage, "--",
|
||||
"bp-executable",
|
||||
)
|
||||
assertOutput(t, cmd, "bp executable")
|
||||
it("adds buildpack bin dirs to the path before looking up command", func() {
|
||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "bp-executable")
|
||||
assertOutput(t, cmd, "bp executable")
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package acceptance
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -12,22 +10,15 @@ import (
|
|||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
|
||||
ih "github.com/buildpacks/imgutil/testhelpers"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/registry"
|
||||
|
||||
"github.com/buildpacks/lifecycle/auth"
|
||||
"github.com/buildpacks/lifecycle/cmd"
|
||||
"github.com/buildpacks/lifecycle/internal/encoding"
|
||||
"github.com/buildpacks/lifecycle/platform"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
|
@ -130,29 +121,7 @@ func (p *PhaseTest) Start(t *testing.T, phaseOp ...func(*testing.T, *PhaseTest))
|
|||
}
|
||||
|
||||
h.MakeAndCopyLifecycle(t, p.targetDaemon.os, p.targetDaemon.arch, p.containerBinaryDir)
|
||||
// calculate lifecycle digest
|
||||
hasher := sha256.New()
|
||||
f, err := os.Open(filepath.Join(p.containerBinaryDir, "lifecycle"+exe)) //#nosec G304
|
||||
h.AssertNil(t, err)
|
||||
_, err = io.Copy(hasher, f)
|
||||
h.AssertNil(t, err)
|
||||
t.Logf("Built lifecycle binary with digest: %s", hex.EncodeToString(hasher.Sum(nil)))
|
||||
|
||||
copyFakeSboms(t)
|
||||
h.DockerBuild(
|
||||
t,
|
||||
p.testImageRef,
|
||||
p.testImageDockerContext,
|
||||
h.WithArgs("-f", filepath.Join(p.testImageDockerContext, dockerfileName)),
|
||||
)
|
||||
t.Logf("Using image %s with lifecycle version %s",
|
||||
p.testImageRef,
|
||||
h.DockerRun(
|
||||
t,
|
||||
p.testImageRef,
|
||||
h.WithFlags("--env", "CNB_PLATFORM_API="+latestPlatformAPI, "--entrypoint", ctrPath("/cnb/lifecycle/lifecycle"+exe)),
|
||||
h.WithArgs("-version"),
|
||||
))
|
||||
h.DockerBuild(t, p.testImageRef, p.testImageDockerContext, h.WithArgs("-f", filepath.Join(p.testImageDockerContext, dockerfileName)))
|
||||
}
|
||||
|
||||
func (p *PhaseTest) Stop(t *testing.T) {
|
||||
|
@ -174,7 +143,7 @@ func (d *targetDaemon) createFixtures(t *testing.T) {
|
|||
|
||||
var fixtures daemonImageFixtures
|
||||
|
||||
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), files.LayersMetadata{})
|
||||
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), platform.LayersMetadata{})
|
||||
cacheMeta := minifyMetadata(t, filepath.Join("testdata", "cache_image_metadata.json"), platform.CacheMetadata{})
|
||||
|
||||
fixtures.AppImage = "some-app-image-" + h.RandString(10)
|
||||
|
@ -247,7 +216,7 @@ func (r *targetRegistry) start(t *testing.T) {
|
|||
func (r *targetRegistry) createFixtures(t *testing.T) {
|
||||
var fixtures regImageFixtures
|
||||
|
||||
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), files.LayersMetadata{})
|
||||
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), platform.LayersMetadata{})
|
||||
cacheMeta := minifyMetadata(t, filepath.Join("testdata", "cache_image_metadata.json"), platform.CacheMetadata{})
|
||||
|
||||
// With Permissions
|
||||
|
@ -394,129 +363,3 @@ func withoutDaemonFixtures(phaseTest *PhaseTest) {
|
|||
func withoutRegistry(phaseTest *PhaseTest) {
|
||||
phaseTest.targetRegistry = nil
|
||||
}
|
||||
|
||||
func copyFakeSboms(t *testing.T) {
|
||||
goos := runtime.GOOS
|
||||
|
||||
// Check Target Daemon != runtime.GOOS
|
||||
if goos == "darwin" {
|
||||
goos = "linux"
|
||||
}
|
||||
buildLifecycleDir, err := filepath.Abs(filepath.Join("..", "out", fmt.Sprintf("%s-%s", goos, runtime.GOARCH), "lifecycle"))
|
||||
if err != nil {
|
||||
t.Log("Fail to locate lifecycle directory")
|
||||
}
|
||||
|
||||
extensions := SBOMExtensions()
|
||||
components := SBOMComponents()
|
||||
|
||||
for _, component := range components {
|
||||
for _, extension := range extensions {
|
||||
if err := encoding.WriteJSON(filepath.Join(buildLifecycleDir, component+extension), "fake data"); err != nil {
|
||||
t.Log("Fail to write:" + component + extension)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SBOMExtensions() []string {
|
||||
return []string{".sbom.cdx.json", ".sbom.spdx.json", ".sbom.syft.json"}
|
||||
}
|
||||
|
||||
func SBOMComponents() []string {
|
||||
return []string{"lifecycle", "launcher"}
|
||||
}
|
||||
|
||||
func assertImageOSAndArch(t *testing.T, imageName string, phaseTest *PhaseTest) { //nolint - these functions are in fact used, i promise
|
||||
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
|
||||
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
|
||||
}
|
||||
|
||||
func assertImageOSAndArchAndCreatedAt(t *testing.T, imageName string, phaseTest *PhaseTest, expectedCreatedAt time.Time) { //nolint
|
||||
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName)
|
||||
|
||||
if err != nil {
|
||||
list, _ := h.DockerCli(t).ImageList(context.TODO(), image.ListOptions{})
|
||||
fmt.Println("Error encountered running ImageInspectWithRaw. imageName: ", imageName)
|
||||
fmt.Println(err)
|
||||
for _, value := range list {
|
||||
fmt.Println("Image Name: ", value)
|
||||
}
|
||||
|
||||
if strings.Contains(err.Error(), "No such image") {
|
||||
t.Log("Image not found, retrying...")
|
||||
time.Sleep(1 * time.Second)
|
||||
inspect, err = h.DockerCli(t).ImageInspect(context.TODO(), imageName)
|
||||
}
|
||||
}
|
||||
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
|
||||
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
|
||||
h.AssertEq(t, inspect.Created, expectedCreatedAt.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
func assertRunMetadata(t *testing.T, path string) *files.Run { //nolint
|
||||
contents, err := os.ReadFile(path)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(contents) > 0, true)
|
||||
|
||||
runMD, err := files.Handler.ReadRun(path, cmd.DefaultLogger)
|
||||
h.AssertNil(t, err)
|
||||
|
||||
return &runMD
|
||||
}
|
||||
|
||||
func updateTOMLFixturesWithTestRegistry(t *testing.T, phaseTest *PhaseTest) { //nolint
|
||||
analyzedTOMLPlaceholders := []string{
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "analyzed.toml.placeholder"),
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "run-image-extended-analyzed.toml.placeholder"),
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-analyzed.toml.placeholder"),
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-extend-false-analyzed.toml.placeholder"),
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-extend-true-analyzed.toml.placeholder"),
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "other_layers", "analyzed.toml.placeholder"),
|
||||
}
|
||||
runTOMLPlaceholders := []string{
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "cnb", "run.toml.placeholder"),
|
||||
}
|
||||
layoutPlaceholders := []string{
|
||||
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "layout-analyzed.toml.placeholder"),
|
||||
}
|
||||
for _, pPath := range analyzedTOMLPlaceholders {
|
||||
if _, err := os.Stat(pPath); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
analyzedMD := assertAnalyzedMetadata(t, pPath)
|
||||
if analyzedMD.RunImage != nil {
|
||||
analyzedMD.RunImage.Reference = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage // don't override extend
|
||||
if analyzedMD.RunImage.Image == "REPLACE" {
|
||||
analyzedMD.RunImage.Image = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage
|
||||
}
|
||||
}
|
||||
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), analyzedMD))
|
||||
}
|
||||
for _, pPath := range runTOMLPlaceholders {
|
||||
if _, err := os.Stat(pPath); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
runMD := assertRunMetadata(t, pPath)
|
||||
for idx, image := range runMD.Images {
|
||||
image.Image = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage
|
||||
runMD.Images[idx] = image
|
||||
}
|
||||
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), runMD))
|
||||
}
|
||||
for _, pPath := range layoutPlaceholders {
|
||||
if _, err := os.Stat(pPath); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
analyzedMD := assertAnalyzedMetadata(t, pPath)
|
||||
if analyzedMD.RunImage != nil {
|
||||
// Values from image acceptance/testdata/exporter/container/layout-repo in OCI layout format
|
||||
analyzedMD.RunImage = &files.RunImage{Reference: "/layout-repo/index.docker.io/library/busybox/latest@sha256:445c45cc89fdeb64b915b77f042e74ab580559b8d0d5ef6950be1c0265834c33"}
|
||||
}
|
||||
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), analyzedMD))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
//go:build acceptance
|
||||
|
||||
package acceptance
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
var (
|
||||
rebaserTest *PhaseTest
|
||||
rebaserPath string
|
||||
rebaserImage string
|
||||
)
|
||||
|
||||
func TestRebaser(t *testing.T) {
|
||||
testImageDockerContextFolder := filepath.Join("testdata", "rebaser")
|
||||
rebaserTest = NewPhaseTest(t, "rebaser", testImageDockerContextFolder)
|
||||
rebaserTest.Start(t, updateTOMLFixturesWithTestRegistry)
|
||||
defer rebaserTest.Stop(t)
|
||||
|
||||
rebaserImage = rebaserTest.testImageRef
|
||||
rebaserPath = rebaserTest.containerBinaryPath
|
||||
|
||||
for _, platformAPI := range api.Platform.Supported {
|
||||
spec.Run(t, "acceptance-rebaser/"+platformAPI.String(), testRebaser(platformAPI.String()), spec.Sequential(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
}
|
||||
|
||||
func testRebaser(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||
return func(t *testing.T, when spec.G, it spec.S) {
|
||||
when("called with insecure registry flag", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||
})
|
||||
it("should do an http request", func() {
|
||||
insecureRegistry := "host.docker.internal"
|
||||
rebaserOutputImageName := insecureRegistry + "/bar"
|
||||
_, _, err := h.DockerRunWithError(t,
|
||||
rebaserImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
|
||||
),
|
||||
h.WithArgs(ctrPath(rebaserPath), rebaserOutputImageName),
|
||||
)
|
||||
|
||||
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,25 +1,25 @@
|
|||
//go:build acceptance
|
||||
// +build acceptance
|
||||
|
||||
package acceptance
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
|
||||
"github.com/buildpacks/lifecycle"
|
||||
"github.com/buildpacks/lifecycle/api"
|
||||
"github.com/buildpacks/lifecycle/cmd"
|
||||
"github.com/buildpacks/lifecycle/platform/files"
|
||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||
)
|
||||
|
||||
const emptyImageSHA = "03cbce912ef1a8a658f73c660ab9c539d67188622f00b15c4f15b89b884f0e10"
|
||||
|
||||
var (
|
||||
restoreImage string
|
||||
restoreRegAuthConfig string
|
||||
|
@ -31,9 +31,14 @@ var (
|
|||
)
|
||||
|
||||
func TestRestorer(t *testing.T) {
|
||||
h.SkipIf(t, runtime.GOOS == "windows", "Restorer acceptance tests are not yet supported on Windows")
|
||||
h.SkipIf(t, runtime.GOARCH != "amd64", "Restorer acceptance tests are not yet supported on non-amd64")
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
testImageDockerContext := filepath.Join("testdata", "restorer")
|
||||
restoreTest = NewPhaseTest(t, "restorer", testImageDockerContext)
|
||||
restoreTest.Start(t, updateTOMLFixturesWithTestRegistry)
|
||||
restoreTest.Start(t)
|
||||
defer restoreTest.Stop(t)
|
||||
|
||||
restoreImage = restoreTest.testImageRef
|
||||
|
@ -67,7 +72,7 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
when("called with arguments", func() {
|
||||
it("errors", func() {
|
||||
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+platformAPI, restoreImage, "some-arg")
|
||||
command := exec.Command("docker", "run", "--rm", restoreImage, "some-arg")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "failed to parse arguments: received unexpected Args"
|
||||
|
@ -75,18 +80,41 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
})
|
||||
|
||||
when("called with -analyzed", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 supports -analyzed flag")
|
||||
command := exec.Command("docker", "run", "--rm", restoreImage, "-analyzed some-file-location")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "flag provided but not defined: -analyzed"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("called with -skip-layers", func() {
|
||||
it("errors", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).AtLeast("0.7"), "Platform API >= 0.7 supports -skip-layers flag")
|
||||
command := exec.Command("docker", "run", "--rm", restoreImage, "-skip-layers true")
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNotNil(t, err)
|
||||
expected := "flag provided but not defined: -skip-layers"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("called without any cache flag", func() {
|
||||
it("outputs it will not restore cache layer data", func() {
|
||||
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+platformAPI, restoreImage)
|
||||
output, err := command.CombinedOutput()
|
||||
h.AssertNil(t, err)
|
||||
expected := "No cached data will be used, no cache specified"
|
||||
expected := "Not restoring cached layer data, no cache flag specified"
|
||||
h.AssertStringContains(t, string(output), expected)
|
||||
})
|
||||
})
|
||||
|
||||
when("analyzed.toml exists with app metadata", func() {
|
||||
it("restores app metadata", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.7"), "Platform API < 0.7 does not restore app metadata")
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
|
@ -101,27 +129,6 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
|
||||
h.AssertStringContains(t, output, "Restoring metadata for \"some-buildpack-id:launch-layer\"")
|
||||
})
|
||||
|
||||
when("restores app metadata using an insecure registry", func() {
|
||||
it.Before(func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||
})
|
||||
it("does an http request ", func() {
|
||||
insecureRegistry := "host.docker.internal"
|
||||
|
||||
_, _, err := h.DockerRunWithError(t,
|
||||
restoreImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
|
||||
"--env", "CNB_BUILD_IMAGE="+insecureRegistry+"/bar",
|
||||
)...),
|
||||
)
|
||||
|
||||
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("using cache-dir", func() {
|
||||
|
@ -147,7 +154,7 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
})
|
||||
|
||||
it("does not restore cache=true layers not in cache", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
output := h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
|
@ -159,9 +166,12 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
// check uncached layer is not restored
|
||||
uncachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "uncached-layer")
|
||||
h.AssertPathDoesNotExist(t, uncachedFile)
|
||||
|
||||
// check output to confirm why this layer was not restored from cache
|
||||
h.AssertStringContains(t, string(output), "Removing \"cacher_buildpack:layer-not-in-cache\", not in cache")
|
||||
})
|
||||
|
||||
it("does not restore layer data from unused buildpacks", func() {
|
||||
it("does not restore unused buildpack layer data", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
|
@ -175,27 +185,12 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack")
|
||||
h.AssertPathDoesNotExist(t, unusedBpLayer)
|
||||
})
|
||||
|
||||
it("does not restore corrupted layer data", func() {
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/layers",
|
||||
restoreImage,
|
||||
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
|
||||
h.WithArgs("-cache-dir", "/cache"),
|
||||
)
|
||||
|
||||
// check corrupted layer is not restored
|
||||
corruptedFile := filepath.Join(copyDir, "layers", "corrupted_buildpack", "corrupted-layer")
|
||||
h.AssertPathDoesNotExist(t, corruptedFile)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("restoring builder image metadata for extensions", func() {
|
||||
it("accepts -build-image and saves the metadata to /kaniko/cache", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "Platform API < 0.10 does not restore builder image metadata")
|
||||
when("using kaniko cache", func() {
|
||||
it("accepts -build-image", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "Platform API < 0.10 does not use kaniko")
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
|
@ -209,129 +204,14 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
|
|||
h.WithArgs("-build-image", restoreRegFixtures.SomeCacheImage), // some-cache-image simulates a builder image in a registry
|
||||
)
|
||||
t.Log("records builder image digest in analyzed.toml")
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "analyzed.toml"), cmd.DefaultLogger)
|
||||
analyzedMD, err := lifecycle.Config.ReadAnalyzed(filepath.Join(copyDir, "layers", "analyzed.toml"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertStringContains(t, analyzedMD.BuildImage.Reference, restoreRegFixtures.SomeCacheImage+"@sha256:")
|
||||
t.Log("writes builder manifest and config to the kaniko cache")
|
||||
ref, err := name.ParseReference(analyzedMD.BuildImage.Reference)
|
||||
h.AssertNil(t, err)
|
||||
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko", "cache", "base"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(fis), 1)
|
||||
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", ref.Identifier(), "oci-layout"))
|
||||
})
|
||||
})
|
||||
|
||||
when("restoring run image metadata for extensions", func() {
|
||||
it("saves metadata to /kaniko/cache", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not restore run image metadata")
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/",
|
||||
restoreImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "DOCKER_CONFIG=/docker-config",
|
||||
"--network", restoreRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
"-analyzed", "/layers/some-extend-true-analyzed.toml",
|
||||
"-log-level", "debug",
|
||||
),
|
||||
)
|
||||
t.Log("updates run image reference in analyzed.toml to include digest and target data")
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-true-analyzed.toml"), cmd.DefaultLogger)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
|
||||
t.Log("does not return the digest for an empty image")
|
||||
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
|
||||
t.Log("writes run image manifest and config to the kaniko cache")
|
||||
ref, err := name.ParseReference(analyzedMD.RunImage.Reference)
|
||||
h.AssertNil(t, err)
|
||||
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko", "cache", "base"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(fis), 1)
|
||||
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", ref.Identifier(), "oci-layout"))
|
||||
})
|
||||
})
|
||||
|
||||
when("target data", func() {
|
||||
it("updates run image reference in analyzed.toml to include digest and target data on newer platforms", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/",
|
||||
restoreImage,
|
||||
h.WithFlags(
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "DOCKER_CONFIG=/docker-config",
|
||||
"--network", restoreRegNetwork,
|
||||
),
|
||||
h.WithArgs(
|
||||
"-analyzed", "/layers/some-extend-false-analyzed.toml",
|
||||
"-log-level", "debug",
|
||||
),
|
||||
)
|
||||
if api.MustParse(platformAPI).AtLeast("0.12") {
|
||||
t.Log("updates run image reference in analyzed.toml to include digest and target data")
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
|
||||
t.Log("does not return the digest for an empty image")
|
||||
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
|
||||
t.Log("does not write run image manifest and config to the kaniko cache")
|
||||
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(fis), 1) // .gitkeep
|
||||
} else {
|
||||
t.Log("updates run image reference in analyzed.toml to include digest only")
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||
h.AssertNil(t, analyzedMD.RunImage.TargetMetadata)
|
||||
t.Log("does not return the digest for an empty image")
|
||||
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
|
||||
}
|
||||
})
|
||||
|
||||
when("-daemon", func() {
|
||||
it("updates run image reference in analyzed.toml to include digest and target data on newer platforms", func() {
|
||||
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not support -daemon flag")
|
||||
h.DockerRunAndCopy(t,
|
||||
containerName,
|
||||
copyDir,
|
||||
"/",
|
||||
restoreImage,
|
||||
h.WithFlags(append(
|
||||
dockerSocketMount,
|
||||
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||
"--env", "DOCKER_CONFIG=/docker-config",
|
||||
"--network", restoreRegNetwork,
|
||||
)...),
|
||||
h.WithArgs(
|
||||
"-analyzed", "/layers/some-extend-false-analyzed.toml",
|
||||
"-daemon",
|
||||
"-log-level", "debug",
|
||||
),
|
||||
)
|
||||
t.Log("updates run image reference in analyzed.toml to include digest and target data")
|
||||
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
|
||||
h.AssertNil(t, err)
|
||||
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, "@sha256:") // daemon image ID
|
||||
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
|
||||
t.Log("does not write run image manifest and config to the kaniko cache")
|
||||
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko"))
|
||||
h.AssertNil(t, err)
|
||||
h.AssertEq(t, len(fis), 1) // .gitkeep
|
||||
})
|
||||
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", fis[0].Name(), "oci-layout"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
FROM ubuntu:bionic
|
||||
ARG cnb_platform_api
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
|
||||
|
@ -10,7 +11,6 @@ ENV CNB_USER_ID=2222
|
|||
|
||||
ENV CNB_GROUP_ID=3333
|
||||
|
||||
ARG cnb_platform_api
|
||||
ENV CNB_PLATFORM_API=${cnb_platform_api}
|
||||
|
||||
RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /some-dir
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
FROM mcr.microsoft.com/windows/nanoserver:1809
|
||||
USER ContainerAdministrator
|
||||
|
||||
COPY container /
|
||||
|
||||
WORKDIR /layers
|
||||
|
||||
ENV CNB_USER_ID=1
|
||||
|
||||
ENV CNB_GROUP_ID=1
|
||||
|
||||
ENV CNB_PLATFORM_API=${cnb_platform_api}
|
|
@ -1,5 +0,0 @@
|
|||
[[images]]
|
||||
image = "some-run-image-from-run-toml"
|
||||
|
||||
[[images]]
|
||||
image = "some-other-run-image"
|
|
@ -1,4 +1,4 @@
|
|||
[[group]]
|
||||
id = "some-buildpack-id"
|
||||
version = "some-buildpack-version"
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
[[group]]
|
||||
id = "some-other-buildpack-id"
|
||||
version = "some-other-buildpack-version"
|
||||
api = "0.10"
|
||||
api = "0.3"
|
||||
|
|
Binary file not shown.
|
@ -1 +0,0 @@
|
|||
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 1457,
|
||||
"digest": "sha256:9d5226e6ce3fb6aee2822206a5ef85f38c303d2b37bfc894b419fca2c0501269"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 772999,
|
||||
"digest": "sha256:405fecb6a2fa4f29683f977e7e3b852bf6f8975a2aba647d234d2371894943da"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 527,
|
||||
"digest": "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"imageLayoutVersion": "1.0.0"
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
[[group]]
|
||||
id = "another-buildpack-id"
|
||||
version = "another-buildpack-version"
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
FROM mcr.microsoft.com/windows/nanoserver:1809
|
||||
USER ContainerAdministrator
|
||||
|
||||
COPY container /
|
||||
|
||||
ENTRYPOINT ["/cnb/lifecycle/builder"]
|
||||
|
||||
WORKDIR /layers
|
||||
|
||||
ENV CNB_USER_ID=1
|
||||
|
||||
ENV CNB_GROUP_ID=1
|
||||
|
||||
ENV CNB_PLATFORM_API=${cnb_platform_api}
|
|
@ -28,8 +28,9 @@ echo
|
|||
cat > "${layers_dir}/launch.toml" << EOL
|
||||
[[processes]]
|
||||
type = "hello"
|
||||
command = ["echo world"]
|
||||
command = "echo world"
|
||||
args = ["arg1"]
|
||||
direct = false
|
||||
EOL
|
||||
|
||||
echo "---> Done"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Buildpack API version
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
|
||||
# Buildpack ID and metadata
|
||||
[buildpack]
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Buildpack API version
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
|
||||
# Buildpack ID and metadata
|
||||
[buildpack]
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eo pipefail
|
||||
|
||||
echo "---> Hello World 3 buildpack"
|
||||
|
||||
# INPUT ARGUMENTS
|
||||
platform_dir=$2
|
||||
env_dir=${platform_dir}/env
|
||||
layers_dir=$1
|
||||
plan_path=$3
|
||||
|
||||
# CNB_APP_DIR
|
||||
echo "CNB_APP_DIR: ${PWD}"
|
||||
|
||||
# PLATFORM DIR
|
||||
echo "PLATFORM_DIR: ${platform_dir}"
|
||||
|
||||
# LAYERS
|
||||
echo "LAYERS_DIR: ${layers_dir}"
|
||||
|
||||
# PLAN
|
||||
echo "PLAN_PATH: ${plan_path}"
|
||||
echo "plan contents:"
|
||||
cat ${plan_path}
|
||||
echo
|
||||
|
||||
echo "CNB_TARGET_ARCH:" `printenv CNB_TARGET_ARCH`
|
||||
echo "CNB_TARGET_ARCH_VARIANT:" `printenv CNB_TARGET_ARCH_VARIANT`
|
||||
echo "CNB_TARGET_OS:" `printenv CNB_TARGET_OS`
|
||||
echo "CNB_TARGET_DISTRO_NAME:" `printenv CNB_TARGET_DISTRO_NAME`
|
||||
echo "CNB_TARGET_DISTRO_VERSION:" `printenv CNB_TARGET_DISTRO_VERSION`
|
||||
|
||||
echo "---> Done"
|
|
@ -1,7 +0,0 @@
|
|||
# Buildpack API version
|
||||
api = "0.10"
|
||||
|
||||
# Buildpack ID and metadata
|
||||
[buildpack]
|
||||
id = "hello_world_3"
|
||||
version = "0.0.3"
|
|
@ -1,4 +1,4 @@
|
|||
[[group]]
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
id = "hello_world"
|
||||
version = "0.0.1"
|
|
@ -1,4 +1,4 @@
|
|||
[[group]]
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
id = "hello_world_2"
|
||||
version = "0.0.2"
|
|
@ -1,9 +1,9 @@
|
|||
[[group]]
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
id = "hello_world"
|
||||
version = "0.0.1"
|
||||
|
||||
[[group-extensions]]
|
||||
api = "0.10"
|
||||
api = "0.9"
|
||||
id = "hello_world"
|
||||
version = "0.0.1"
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
[[entries]]
|
||||
|
||||
[[entries.providers]]
|
||||
id = "hello_world_3"
|
||||
version = "0.0.3"
|
||||
|
||||
[[entries.requires]]
|
||||
name = "03_plan.toml_requires_subset_content_idk"
|
||||
[entries.requires.metadata]
|
||||
# arbitrary data describing the required dependency
|
|
@ -1,5 +1,5 @@
|
|||
# Buildpack API version
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
|
||||
# Buildpack ID and metadata
|
||||
[buildpack]
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
[run-image.target]
|
||||
id = "my id"
|
||||
os = "linux"
|
||||
arch = "amd64"
|
||||
arch-variant = "some-variant"
|
||||
[run-image.target.distro]
|
||||
name = "ubuntu"
|
||||
version = "some-cute-version"
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
[[group]]
|
||||
api = "0.10"
|
||||
id = "hello_world_3"
|
||||
version = "0.0.3"
|
|
@ -1,6 +0,0 @@
|
|||
[[entries]]
|
||||
|
||||
[[entries.providers]]
|
||||
id = "hello_world_3"
|
||||
version = "0.0.3"
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
[run-image]
|
||||
[target]
|
||||
id = "software"
|
||||
os = "linux"
|
||||
arch = "amd64"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
[[group]]
|
||||
api = "0.10"
|
||||
api = "0.2"
|
||||
id = "hello_world_2"
|
||||
version = "0.0.2"
|
|
@ -13,7 +13,6 @@ RUN groupadd cnb --gid ${cnb_gid} && \
|
|||
|
||||
# chown the directories so the tests do not have to run as root
|
||||
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layers"
|
||||
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layout-repo"
|
||||
|
||||
WORKDIR /layers
|
||||
|
||||
|
|
|
@ -28,15 +28,7 @@ fi
|
|||
|
||||
echo -n "{\"key\": \"some-launch-true-bom-content\"}" > ${layers_dir}/some-layer.sbom.cdx.json
|
||||
|
||||
if test -f ${layers_dir}/some-layer.toml; then
|
||||
# mimic not downloading new content
|
||||
echo "nop"
|
||||
else
|
||||
# mimic downloading new content
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
cat <<EOF > ${layers_dir}/some-layer.toml
|
||||
cat <<EOF > "$layers_dir"/some-layer.toml
|
||||
[types]
|
||||
launch = true
|
||||
EOF
|
||||
|
@ -50,7 +42,7 @@ fi
|
|||
|
||||
echo -n "{\"key\": \"some-cache-true-bom-content\"}" > ${layers_dir}/some-cache-layer.sbom.cdx.json
|
||||
|
||||
cat <<EOF > ${layers_dir}/some-cache-layer.toml
|
||||
cat <<EOF > "$layers_dir"/some-cache-layer.toml
|
||||
[types]
|
||||
cache = true
|
||||
EOF
|
||||
|
@ -64,7 +56,7 @@ fi
|
|||
|
||||
echo -n "{\"key\": \"some-launch-true-cache-true-bom-content\"}" > ${layers_dir}/some-launch-cache-layer.sbom.cdx.json
|
||||
|
||||
cat <<EOF > ${layers_dir}/some-launch-cache-layer.toml
|
||||
cat <<EOF > "$layers_dir"/some-launch-cache-layer.toml
|
||||
[types]
|
||||
launch = true
|
||||
cache = true
|
||||
|
@ -80,7 +72,7 @@ fi
|
|||
|
||||
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/some-build-layer.sbom.cdx.json
|
||||
|
||||
cat <<EOF > ${layers_dir}/some-build-layer.toml
|
||||
cat <<EOF > "$layers_dir"/some-build-layer.toml
|
||||
[types]
|
||||
build = true
|
||||
EOF
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
# Buildpack API version
|
||||
api = "0.9"
|
||||
|
||||
# Extension ID and metadata
|
||||
[extension]
|
||||
id = "samples/hello-world"
|
||||
version = "0.0.1"
|
||||
name = "Hello World Extension"
|
|
@ -1,9 +0,0 @@
|
|||
[[order]]
|
||||
[[order.group]]
|
||||
id = "samples/hello-world"
|
||||
version = "0.0.1"
|
||||
|
||||
[[order-extensions]]
|
||||
[[order-extensions.group]]
|
||||
id = "samples/hello-world"
|
||||
version = "0.0.1"
|
|
@ -1,5 +0,0 @@
|
|||
[[images]]
|
||||
image = "some-run-image-from-run-toml"
|
||||
|
||||
[[images]]
|
||||
image = "some-other-run-image"
|
Binary file not shown.
|
@ -1 +0,0 @@
|
|||
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 1457,
|
||||
"digest": "sha256:9d5226e6ce3fb6aee2822206a5ef85f38c303d2b37bfc894b419fca2c0501269"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 772999,
|
||||
"digest": "sha256:405fecb6a2fa4f29683f977e7e3b852bf6f8975a2aba647d234d2371894943da"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 527,
|
||||
"digest": "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"imageLayoutVersion": "1.0.0"
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
FROM ubuntu:jammy
|
||||
FROM ubuntu:bionic
|
||||
|
||||
ARG cnb_uid=1234
|
||||
ARG cnb_gid=1000
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
val-from-build-config
|
|
@ -1,8 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo "ENV"
|
||||
env
|
||||
|
||||
plan_path=$2
|
||||
|
||||
cat >> "${plan_path}" <<EOL
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
api = "0.10"
|
||||
|
||||
api = "0.3"
|
||||
[buildpack]
|
||||
id = "buildpack_for_ext"
|
||||
version = "buildpack_for_ext_version"
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo "Opted out of detection"
|
||||
|
||||
exit 100
|
|
@ -1,5 +0,0 @@
|
|||
api = "0.9"
|
||||
[buildpack]
|
||||
id = "fail_detect_buildpack"
|
||||
version = "some_version"
|
||||
name = "Fail Detect Buildpack"
|
|
@ -1,12 +1,5 @@
|
|||
api = "0.9"
|
||||
|
||||
api = "0.3"
|
||||
[buildpack]
|
||||
id = "simple_buildpack"
|
||||
version = "simple_buildpack_version"
|
||||
name = "Simple Buildpack"
|
||||
|
||||
[[stacks]]
|
||||
id = "io.buildpacks.stacks.bionic"
|
||||
|
||||
[[stacks]]
|
||||
id = "io.buildpacks.stacks.jammy"
|
||||
id = "simple_buildpack"
|
||||
version = "simple_buildpack_version"
|
||||
name = "Simple Buildpack"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
api = "0.10"
|
||||
|
||||
api = "0.6"
|
||||
[buildpack]
|
||||
id = "always_detect_buildpack"
|
||||
version = "always_detect_buildpack_version"
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
[[order]]
|
||||
|
||||
[[order.group]]
|
||||
id = "fail_detect_buildpack"
|
||||
version = "some_version"
|
|
@ -1,5 +0,0 @@
|
|||
[[images]]
|
||||
image = "some-run-image-from-extension"
|
||||
|
||||
[[images]]
|
||||
image = "some-other-run-image"
|
|
@ -1,2 +0,0 @@
|
|||
[run-image]
|
||||
reference = "some-old-run-image"
|
|
@ -13,13 +13,8 @@ RUN groupadd cnb --gid ${cnb_gid} && \
|
|||
|
||||
# chown the directories so the tests do not have to run as root
|
||||
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layers"
|
||||
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layout-repo"
|
||||
RUN chown -R "${cnb_uid}:${cnb_gid}" "/other_layers"
|
||||
|
||||
# create and chown a custom oci layout directory to export images
|
||||
RUN mkdir /my-layout-dir
|
||||
RUN chown -R "${cnb_uid}:${cnb_gid}" "/my-layout-dir"
|
||||
|
||||
WORKDIR /layers
|
||||
|
||||
USER ${cnb_uid}:${cnb_gid}
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"buildpacks": [
|
||||
{
|
||||
"key": "corrupted_buildpack",
|
||||
"version": "corrupted_v1",
|
||||
"layers": {
|
||||
"corrupted-layer": {
|
||||
"sha": "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59",
|
||||
"data": null,
|
||||
"build": false,
|
||||
"launch": true,
|
||||
"cache": true
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
Binary file not shown.
|
@ -1,3 +0,0 @@
|
|||
[[images]]
|
||||
image = "REPLACE"
|
||||
mirrors = ["mirror1", "mirror2"]
|
|
@ -1,2 +0,0 @@
|
|||
[run-image]
|
||||
reference = "host.docker.internal/bar"
|
|
@ -1 +1 @@
|
|||
sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944
|
||||
sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494
|
|
@ -1,4 +1,3 @@
|
|||
[types]
|
||||
build = false
|
||||
launch = false
|
||||
cache = true
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
[types]
|
||||
launch = true
|
|
@ -1 +0,0 @@
|
|||
launch-data
|
|
@ -1,3 +0,0 @@
|
|||
[types]
|
||||
cache = true
|
||||
launch = true
|
|
@ -1 +0,0 @@
|
|||
digest-not-match-data
|
|
@ -1,14 +1,9 @@
|
|||
[[group]]
|
||||
id = "some-buildpack-id"
|
||||
version = "some-buildpack-version"
|
||||
api = "0.7"
|
||||
api = "0.2"
|
||||
|
||||
[[group]]
|
||||
id = "cacher_buildpack"
|
||||
version = "cacher_v1"
|
||||
api = "0.8"
|
||||
|
||||
[[group]]
|
||||
id = "corrupted_buildpack"
|
||||
version = "corrupted_v1"
|
||||
api = "0.8"
|
||||
api = "0.3"
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
[run-image]
|
||||
reference = "REPLACE"
|
||||
name = "REPLACE"
|
|
@ -1,3 +0,0 @@
|
|||
[run-image]
|
||||
reference = "REPLACE"
|
||||
extend = true
|
Binary file not shown.
|
@ -1,47 +0,0 @@
|
|||
{
|
||||
"architecture": "amd64",
|
||||
"created": "0001-01-01T00:00:00Z",
|
||||
"history": [
|
||||
{
|
||||
"author": "some-base-image-author",
|
||||
"created": "2023-03-06T17:34:39.0316521Z",
|
||||
"created_by": "FROM some-base-image"
|
||||
},
|
||||
{
|
||||
"author": "kaniko",
|
||||
"created": "0001-01-01T00:00:00Z",
|
||||
"created_by": "Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension"
|
||||
},
|
||||
{
|
||||
"author": "kaniko",
|
||||
"created": "0001-01-01T00:00:00Z",
|
||||
"created_by": "Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension"
|
||||
}
|
||||
],
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c",
|
||||
"sha256:d8dea3a780ba766c08bd11800809652ce5e9eba50b7b94ac09cb7f5e98e07f08",
|
||||
"sha256:36f3735021a89a605c3da10b9659f0ec69e7c4c72abc802dc32471f1b080fd78"
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"Cmd": [
|
||||
"/bin/bash"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"CNB_USER_ID=1234",
|
||||
"CNB_GROUP_ID=1000",
|
||||
"CNB_STACK_ID=some-stack-id"
|
||||
],
|
||||
"Labels": {
|
||||
"io.buildpacks.rebasable": "false",
|
||||
"org.opencontainers.image.ref.name": "ubuntu",
|
||||
"org.opencontainers.image.version": "18.04"
|
||||
},
|
||||
"User": "root"
|
||||
}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 2771,
|
||||
"digest": "sha256:2dc6ef9f627c01f3f9e4f735c90f0251b5adaf6ad5685c5afb5cf638412fad67"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 26711153,
|
||||
"digest": "sha256:0064b1b97ec0775813740e8cb92821a6d84fd38eee70bafba9c12d9c37534661"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 38445484,
|
||||
"digest": "sha256:65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 146545,
|
||||
"digest": "sha256:0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18"
|
||||
}
|
||||
]
|
||||
}
|
Binary file not shown.
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 1083,
|
||||
"digest": "sha256:40007d6086160bcdf45770ed12d23f0c594013cf0cd5e65ffc67be8f46e0d9c9"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"imageLayoutVersion": "1.0.0"
|
||||
}
|
Binary file not shown.
|
@ -1 +0,0 @@
|
|||
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 1457,
|
||||
"digest": "sha256:9d5226e6ce3fb6aee2822206a5ef85f38c303d2b37bfc894b419fca2c0501269"
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 772999,
|
||||
"digest": "sha256:405fecb6a2fa4f29683f977e7e3b852bf6f8975a2aba647d234d2371894943da"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"size": 527,
|
||||
"digest": "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue