Compare commits

..

1 Commits

Author SHA1 Message Date
Jesse Brown e717efe1c8
Add go-version-file to draft-release workflow (#1331)
We are getting warnings about go-version not being specified. This should fix that.

Signed-off-by: Jesse Brown <jabrown85@gmail.com>
2024-04-05 14:12:52 -05:00
173 changed files with 2779 additions and 2992 deletions

View File

@ -33,9 +33,8 @@ jobs:
TEST_COVERAGE: 1 TEST_COVERAGE: 1
run: make test run: make test
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v3
with: with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./out/tests/coverage-unit.txt file: ./out/tests/coverage-unit.txt
flags: unit,os_linux flags: unit,os_linux
fail_ci_if_error: true fail_ci_if_error: true
@ -55,10 +54,62 @@ jobs:
run: | run: |
make format || true make format || true
make test make test
test-windows:
runs-on: windows-2019
steps:
- name: Set git to use LF and symlinks
run: |
git config --global core.autocrlf false
git config --global core.eol lf
git config --global core.symlinks true
- uses: actions/checkout@v4
with:
fetch-depth: '0'
- name: Setup go
uses: actions/setup-go@v5
with:
check-latest: true
go-version-file: 'go.mod'
- name: Add runner IP to daemon insecure-registries and firewall
shell: powershell
run: |
# Get IP from default gateway interface
$IPAddress=(Get-NetIPAddress -InterfaceAlias ((Get-NetRoute "0.0.0.0/0").InterfaceAlias) -AddressFamily IPv4)[0].IPAddress
# Allow container-to-host registry traffic (from public interface, to the same interface)
New-NetfirewallRule -DisplayName test-registry -LocalAddress $IPAddress -RemoteAddress $IPAddress
# create or update daemon config to allow host as insecure-registry
$config=@{}
if (Test-Path C:\ProgramData\docker\config\daemon.json) {
$config=(Get-Content C:\ProgramData\docker\config\daemon.json | ConvertFrom-json)
}
$config | Add-Member -Force -Name "insecure-registries" -value @("$IPAddress/32") -MemberType NoteProperty
$config | Add-Member -Force -Name "allow-nondistributable-artifacts" -value @("$IPAddress/32") -MemberType NoteProperty
ConvertTo-json $config | Out-File -Encoding ASCII C:\ProgramData\docker\config\daemon.json
Restart-Service docker
# dump docker info for auditing
docker version
docker info
- name: Test
env:
TEST_COVERAGE: 1
run: |
make test
- name: Prepare Codecov
uses: crazy-max/ghaction-chocolatey@v3
with:
args: install codecov -y
- name: Run Codecov
run: |
codecov.exe -f .\out\tests\coverage-unit.txt -v --flag os_windows
build-and-publish: build-and-publish:
needs: needs:
- test-linux-amd64 - test-linux-amd64
- test-linux-arm64 - test-linux-arm64
- test-windows
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
id-token: write id-token: write
@ -76,14 +127,14 @@ jobs:
- name: Set version - name: Set version
run: | run: |
echo "LIFECYCLE_VERSION=$(go run tools/version/main.go)" | tee -a $GITHUB_ENV version.txt echo "LIFECYCLE_VERSION=$(go run tools/version/main.go)" | tee -a $GITHUB_ENV version.txt
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: version name: version
path: version.txt path: version.txt
- name: Set tag - name: Set tag
run: | run: |
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> tag.txt echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> tag.txt
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: tag name: tag
path: tag.txt path: tag.txt
@ -92,60 +143,68 @@ jobs:
make clean make clean
make build make build
make package make package
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-x86-64 name: lifecycle-linux-x86-64
path: out/lifecycle-v*+linux.x86-64.tgz path: out/lifecycle-v*+linux.x86-64.tgz
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-x86-64-sha256 name: lifecycle-linux-x86-64-sha256
path: out/lifecycle-v*+linux.x86-64.tgz.sha256 path: out/lifecycle-v*+linux.x86-64.tgz.sha256
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-arm64 name: lifecycle-linux-arm64
path: out/lifecycle-v*+linux.arm64.tgz path: out/lifecycle-v*+linux.arm64.tgz
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-arm64-sha256 name: lifecycle-linux-arm64-sha256
path: out/lifecycle-v*+linux.arm64.tgz.sha256 path: out/lifecycle-v*+linux.arm64.tgz.sha256
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-ppc64le name: lifecycle-linux-ppc64le
path: out/lifecycle-v*+linux.ppc64le.tgz path: out/lifecycle-v*+linux.ppc64le.tgz
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-ppc64le-sha256 name: lifecycle-linux-ppc64le-sha256
path: out/lifecycle-v*+linux.ppc64le.tgz.sha256 path: out/lifecycle-v*+linux.ppc64le.tgz.sha256
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-s390x name: lifecycle-linux-s390x
path: out/lifecycle-v*+linux.s390x.tgz path: out/lifecycle-v*+linux.s390x.tgz
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-linux-s390x-sha256 name: lifecycle-linux-s390x-sha256
path: out/lifecycle-v*+linux.s390x.tgz.sha256 path: out/lifecycle-v*+linux.s390x.tgz.sha256
- uses: actions/upload-artifact@v3
with:
name: lifecycle-windows-x86-64
path: out/lifecycle-v*+windows.x86-64.tgz
- uses: actions/upload-artifact@v3
with:
name: lifecycle-windows-x86-64-sha256
path: out/lifecycle-v*+windows.x86-64.tgz.sha256
- name: Generate SBOM JSON - name: Generate SBOM JSON
uses: CycloneDX/gh-gomod-generate-sbom@v2 uses: CycloneDX/gh-gomod-generate-sbom@v2
with: with:
args: mod -licenses -json -output lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json args: mod -licenses -json -output lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json
version: ^v1 version: ^v1
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-bom-cdx name: lifecycle-bom-cdx
path: lifecycle-v*-bom.cdx.json path: lifecycle-v*-bom.cdx.json
- name: Calculate SBOM sha - name: Calculate SBOM sha
run: | run: |
shasum -a 256 lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json > lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json.sha256 shasum -a 256 lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json > lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json.sha256
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v3
with: with:
name: lifecycle-bom-cdx-sha256 name: lifecycle-bom-cdx-sha256
path: lifecycle-v*-bom.cdx.json.sha256 path: lifecycle-v*-bom.cdx.json.sha256
- uses: azure/docker-login@v2 - uses: azure/docker-login@v1
if: github.event_name == 'push' if: github.event_name == 'push'
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- uses: actions/download-artifact@v5 - uses: actions/download-artifact@v3
with: with:
name: tag name: tag
- name: Set env - name: Set env
@ -169,11 +228,15 @@ jobs:
LINUX_S390X_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.s390x.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x -arch s390x | awk '{print $NF}') LINUX_S390X_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.s390x.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x -arch s390x | awk '{print $NF}')
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA" echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
WINDOWS_AMD64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+windows.x86-64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-windows -os windows | awk '{print $NF}')
echo "WINDOWS_AMD64_SHA: $WINDOWS_AMD64_SHA"
docker manifest create buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} \ docker manifest create buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64@${LINUX_AMD64_SHA} \ buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64@${LINUX_AMD64_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64@${LINUX_ARM64_SHA} \ buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64@${LINUX_ARM64_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le@${LINUX_PPC64LE_SHA} \ buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le@${LINUX_PPC64LE_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x@${LINUX_S390X_SHA} buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x@${LINUX_S390X_SHA} \
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-windows@${WINDOWS_AMD64_SHA}
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}) MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG})
echo "MANIFEST_SHA: $MANIFEST_SHA" echo "MANIFEST_SHA: $MANIFEST_SHA"
@ -188,7 +251,7 @@ jobs:
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}
- name: Scan image - name: Scan image
if: github.event_name == 'push' if: github.event_name == 'push'
uses: anchore/scan-action@v6 uses: anchore/scan-action@v3
with: with:
image: buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }} image: buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}
pack-acceptance-linux: pack-acceptance-linux:
@ -206,17 +269,17 @@ jobs:
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version-file: 'pack/go.mod' go-version-file: 'pack/go.mod'
- uses: actions/download-artifact@v5 - uses: actions/download-artifact@v3
with: with:
name: version name: version
- uses: actions/download-artifact@v5 - uses: actions/download-artifact@v3
with: with:
name: tag name: tag
- name: Set env - name: Set env
run: | run: |
cat version.txt >> $GITHUB_ENV cat version.txt >> $GITHUB_ENV
cat tag.txt >> $GITHUB_ENV cat tag.txt >> $GITHUB_ENV
- uses: actions/download-artifact@v5 - uses: actions/download-artifact@v3
with: with:
name: lifecycle-linux-x86-64 name: lifecycle-linux-x86-64
path: pack path: pack
@ -227,3 +290,75 @@ jobs:
LIFECYCLE_PATH="../lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz" \ LIFECYCLE_PATH="../lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz" \
LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}" \ LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}" \
make acceptance make acceptance
pack-acceptance-windows:
if: github.event_name == 'push'
needs: build-and-publish
runs-on: windows-2019
steps:
- name: Set git to use LF and symlinks
run: |
git config --global core.autocrlf false
git config --global core.eol lf
git config --global core.symlinks true
- uses: actions/checkout@v4
with:
repository: 'buildpacks/pack'
path: 'pack'
ref: 'main'
fetch-depth: 0 # fetch all history for all branches and tags
- name: Setup go
uses: actions/setup-go@v5
with:
go-version-file: 'pack/go.mod'
- name: Add runner IP to daemon insecure-registries and firewall
shell: powershell
run: |
# Get IP from default gateway interface
$IPAddress=(Get-NetIPAddress -InterfaceAlias ((Get-NetRoute "0.0.0.0/0").InterfaceAlias) -AddressFamily IPv4)[0].IPAddress
# Allow container-to-host registry traffic (from public interface, to the same interface)
New-NetfirewallRule -DisplayName test-registry -LocalAddress $IPAddress -RemoteAddress $IPAddress
# create or update daemon config to allow host as insecure-registry
$config=@{}
if (Test-Path C:\ProgramData\docker\config\daemon.json) {
$config=(Get-Content C:\ProgramData\docker\config\daemon.json | ConvertFrom-json)
}
$config | Add-Member -Force -Name "insecure-registries" -value @("$IPAddress/32") -MemberType NoteProperty
ConvertTo-json $config | Out-File -Encoding ASCII C:\ProgramData\docker\config\daemon.json
Restart-Service docker
# dump docker info for auditing
docker version
docker info
- name: Modify etc\hosts to include runner IP
shell: powershell
run: |
$IPAddress=(Get-NetIPAddress -InterfaceAlias ((Get-NetRoute "0.0.0.0/0").InterfaceAlias) -AddressFamily IPv4)[0].IPAddress
"# Modified by CNB: https://github.com/buildpacks/ci/tree/main/gh-runners/windows
${IPAddress} host.docker.internal
${IPAddress} gateway.docker.internal
" | Out-File -Filepath C:\Windows\System32\drivers\etc\hosts -Encoding utf8
- uses: actions/download-artifact@v3
with:
name: version
- uses: actions/download-artifact@v3
with:
name: tag
- name: Set env
run: |
cat version.txt >> $env:GITHUB_ENV
cat tag.txt >> $env:GITHUB_ENV
- uses: actions/download-artifact@v3
with:
name: lifecycle-windows-x86-64
path: pack
- name: Run pack acceptance
run: |
cd pack
git checkout $(git describe --abbrev=0 --tags) # check out the latest tag
$env:LIFECYCLE_PATH="..\lifecycle-v${{ env.LIFECYCLE_VERSION }}+windows.x86-64.tgz"
$env:LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}"
make acceptance

View File

@ -86,7 +86,7 @@ jobs:
fi fi
- name: Scan latest release image - name: Scan latest release image
id: scan-image id: scan-image
uses: anchore/scan-action@v6 uses: anchore/scan-action@v3
with: with:
image: buildpacksio/lifecycle:${{ steps.read-versions.outputs.latest-release-version }} image: buildpacksio/lifecycle:${{ steps.read-versions.outputs.latest-release-version }}
fail-build: true fail-build: true

View File

@ -24,7 +24,7 @@ jobs:
exit 1 exit 1
fi fi
echo "LIFECYCLE_VERSION=$version" >> $GITHUB_ENV echo "LIFECYCLE_VERSION=$version" >> $GITHUB_ENV
- name: Determine download urls for linux-x86-64, linux-arm64, linux-ppc64le, linux-s390x - name: Determine download urls for linux-x86-64, linux-arm64, linux-ppc64le, linux-s390x, and windows
id: artifact-urls id: artifact-urls
# FIXME: this script should be updated to work with actions/github-script@v6 # FIXME: this script should be updated to work with actions/github-script@v6
uses: actions/github-script@v3 uses: actions/github-script@v3
@ -82,11 +82,8 @@ jobs:
if (urlList.length === 0) { if (urlList.length === 0) {
throw "no artifacts found" throw "no artifacts found"
} }
if (urlList.length != 10) { if (urlList.length != 12) {
// found too many artifacts throw "there should be exactly 12 artifacts"
// list them and throw
console.log(urlList);
throw "there should be exactly 10 artifacts, found " + urlList.length + " artifacts"
} }
return urlList.join(",") return urlList.join(",")
}) })
@ -191,7 +188,7 @@ jobs:
--draft \ --draft \
--notes-file ../body.txt \ --notes-file ../body.txt \
--prerelease \ --prerelease \
--target $GITHUB_REF_NAME \ --target $GITHUB_REF \
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}" --title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -203,7 +200,7 @@ jobs:
$(ls | sort | paste -sd " " -) \ $(ls | sort | paste -sd " " -) \
--draft \ --draft \
--notes-file ../body.txt \ --notes-file ../body.txt \
--target $GITHUB_REF_NAME \ --target $GITHUB_REF \
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}" --title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -22,7 +22,7 @@ jobs:
go install github.com/google/go-containerregistry/cmd/crane@latest go install github.com/google/go-containerregistry/cmd/crane@latest
- name: Install cosign - name: Install cosign
uses: sigstore/cosign-installer@v3 uses: sigstore/cosign-installer@v3
- uses: azure/docker-login@v2 - uses: azure/docker-login@v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@ -48,6 +48,9 @@ jobs:
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA" echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
echo "LINUX_S390X_SHA=$LINUX_S390X_SHA" >> $GITHUB_ENV echo "LINUX_S390X_SHA=$LINUX_S390X_SHA" >> $GITHUB_ENV
WINDOWS_AMD64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows | jq -r .[0].critical.image.\"docker-manifest-digest\")
echo "WINDOWS_AMD64_SHA: $WINDOWS_AMD64_SHA"
echo "WINDOWS_AMD64_SHA=$WINDOWS_AMD64_SHA" >> $GITHUB_ENV
- name: Download SBOM - name: Download SBOM
run: | run: |
gh release download --pattern '*-bom.cdx.json' ${{ github.event.release.tag_name }} gh release download --pattern '*-bom.cdx.json' ${{ github.event.release.tag_name }}
@ -61,12 +64,14 @@ jobs:
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-arm64 crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-arm64
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-ppc64le crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-ppc64le
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-s390x crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-s390x
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows@${{ env.WINDOWS_AMD64_SHA }} ${{ env.LIFECYCLE_VERSION }}-windows
docker manifest create buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }} \ docker manifest create buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \ buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} \ buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \ buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-s390x@${{ env.LINUX_S390X_SHA }} buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-s390x@${{ env.LINUX_S390X_SHA }} \
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-windows@${{ env.WINDOWS_AMD64_SHA }}
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}) MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }})
echo "MANIFEST_SHA: $MANIFEST_SHA" echo "MANIFEST_SHA: $MANIFEST_SHA"
@ -98,12 +103,14 @@ jobs:
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} latest-linux-arm64 crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} latest-linux-arm64
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} latest-linux-ppc64le crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} latest-linux-ppc64le
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} latest-linux-s390x crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} latest-linux-s390x
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-windows@${{ env.WINDOWS_AMD64_SHA }} latest-windows
docker manifest create buildpacksio/lifecycle:latest \ docker manifest create buildpacksio/lifecycle:latest \
buildpacksio/lifecycle:latest-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \ buildpacksio/lifecycle:latest-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
buildpacksio/lifecycle:latest-linux-arm64@${{ env.LINUX_ARM64_SHA }} \ buildpacksio/lifecycle:latest-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
buildpacksio/lifecycle:latest-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \ buildpacksio/lifecycle:latest-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
buildpacksio/lifecycle:latest-linux-s390x@${{ env.LINUX_S390X_SHA }} buildpacksio/lifecycle:latest-linux-s390x@${{ env.LINUX_S390X_SHA }} \
buildpacksio/lifecycle:latest-windows@${{ env.WINDOWS_AMD64_SHA }}
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:latest) MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:latest)
echo "MANIFEST_SHA: $MANIFEST_SHA" echo "MANIFEST_SHA: $MANIFEST_SHA"

View File

@ -32,11 +32,11 @@ jobs:
id: ZVSI id: ZVSI
run: | run: |
#creation of zvsi #creation of zvsi
ibmcloud is instance-create $ZVSI_INSTANCE_NAME ${{ secrets.ZVSI_VPC }} $ZVSI_ZONE_NAME $ZVSI_PROFILE_NAME ${{ secrets.ZVSI_SUBNET }} --image ${{ secrets.ZVSI_IMAGE }} --keys ${{ secrets.ZVSI_KEY }} --resource-group-id ${{ secrets.ZVSI_RG_ID }} --primary-network-interface "{\"name\":\"eth0\",\"allow_ip_spoofing\":false,\"subnet\": {\"name\":\"${{ secrets.ZVSI_SUBNET }}\"},\"security_groups\":[{\"id\":\"${{ secrets.ZVSI_SG }}\"}]}" ibmcloud is instance-create $ZVSI_INSTANCE_NAME ${{ secrets.ZVSI_VPC }} $ZVSI_ZONE_NAME $ZVSI_PROFILE_NAME ${{ secrets.ZVSI_SUBNET }} --image ${{ secrets.ZVSI_IMAGE }} --keys ${{ secrets.ZVSI_KEY }} --resource-group-id ${{ secrets.ZVSI_RG_ID }} --sgs ${{ secrets.ZVSI_SG }}
#Reserving a floating ip to the ZVSI #Reserving a floating ip to the ZVSI
ibmcloud is floating-ip-reserve $ZVSI_FP_NAME --zone $ZVSI_ZONE_NAME --resource-group-id ${{ secrets.ZVSI_RG_ID }} --in $ZVSI_INSTANCE_NAME ibmcloud is floating-ip-reserve $ZVSI_FP_NAME --zone $ZVSI_ZONE_NAME --resource-group-id ${{ secrets.ZVSI_RG_ID }} --in $ZVSI_INSTANCE_NAME
#Bouding the Floating ip to the ZVSI #Bouding the Floating ip to the ZVSI
ibmcloud is floating-ip-update $ZVSI_FP_NAME --nic eth0 --in $ZVSI_INSTANCE_NAME ibmcloud is floating-ip-update $ZVSI_FP_NAME --nic primary --in $ZVSI_INSTANCE_NAME
sleep 60 sleep 60
#Saving the Floating IP to login ZVSI #Saving the Floating IP to login ZVSI
ZVSI_HOST=$(ibmcloud is floating-ip $ZVSI_FP_NAME | awk '/Address/{print $2}') ZVSI_HOST=$(ibmcloud is floating-ip $ZVSI_FP_NAME | awk '/Address/{print $2}')
@ -55,7 +55,7 @@ jobs:
fi fi
done done
- name: Install dependencies and run all tests on s390x ZVSI - name: Install dependencies and run all tests on s390x ZVSI
uses: appleboy/ssh-action@v1.2.2 uses: appleboy/ssh-action@v1.0.3
env: env:
GH_REPOSITORY: ${{ github.server_url }}/${{ github.repository }} GH_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
GH_REF: ${{ github.ref }} GH_REF: ${{ github.ref }}
@ -68,8 +68,8 @@ jobs:
script: | script: |
apt-get update -y apt-get update -y
apt-get install -y wget curl git make gcc jq docker.io apt-get install -y wget curl git make gcc jq docker.io
wget https://go.dev/dl/go1.24.6.linux-s390x.tar.gz wget https://go.dev/dl/go1.22.2.linux-s390x.tar.gz
rm -rf /usr/local/go && tar -C /usr/local -xzf go1.24.6.linux-s390x.tar.gz rm -rf /usr/local/go && tar -C /usr/local -xzf go1.22.2.linux-s390x.tar.gz
export PATH=$PATH:/usr/local/go/bin export PATH=$PATH:/usr/local/go/bin
git clone ${GH_REPOSITORY} lifecycle git clone ${GH_REPOSITORY} lifecycle
cd lifecycle && git checkout ${GH_REF} cd lifecycle && git checkout ${GH_REF}

View File

@ -1,5 +1,3 @@
ignore: ignore:
- vulnerability: CVE-2015-5237 # false positive, see https://github.com/anchore/grype/issues/558 - vulnerability: CVE-2015-5237 # false positive, see https://github.com/anchore/grype/issues/558
- vulnerability: CVE-2021-22570 # false positive, see https://github.com/anchore/grype/issues/558 - vulnerability: CVE-2021-22570 # false positive, see https://github.com/anchore/grype/issues/558
- vulnerability: CVE-2024-41110 # non-impactful as we only use docker as a client
- vulnerability: GHSA-v23v-6jw2-98fq # non-impactful as we only use docker as a client

View File

@ -32,6 +32,17 @@
] ]
``` ```
* Some of the Windows acceptance tests use license restricted base images. By default, the docker deamon will not publish layers from these images when pushing to a registry which can result in test failures with error messages such as: `Ignoring image "X" because it was corrupt`. To fix these failures you must [enable pushing nondistributable artifacts](https://docs.docker.com/engine/reference/commandline/dockerd/#allow-push-of-nondistributable-artifacts) to the test registry by adding the following to your Docker Desktop Engine config:
* `%programdata%\docker\config\daemon.json`:
```
{
"allow-nondistributable-artifacts": [
"<my-host-ip>/32"
]
}
```
### Testing GitHub actions on forks ### Testing GitHub actions on forks
The lifecycle release process involves chaining a series of GitHub actions together such that: The lifecycle release process involves chaining a series of GitHub actions together such that:

View File

@ -7,6 +7,7 @@ This image is maintained by the [Cloud Native Buildpacks project](https://buildp
Supported tags are semver-versioned manifest lists - e.g., `0.12.0` or `0.12.0-rc.1`, pointing to one of the following os/architectures: Supported tags are semver-versioned manifest lists - e.g., `0.12.0` or `0.12.0-rc.1`, pointing to one of the following os/architectures:
* `linux/amd64` * `linux/amd64`
* `linux/arm64` * `linux/arm64`
* `windows/amd64`
# About this image # About this image

316
Makefile
View File

@ -30,6 +30,7 @@ LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.Version=$(LIFECYCLE_VERSION)'
GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)" GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)"
GOTEST=$(GOCMD) test $(GOFLAGS) GOTEST=$(GOCMD) test $(GOFLAGS)
BUILD_DIR?=$(PWD)$/out BUILD_DIR?=$(PWD)$/out
WINDOWS_COMPILATION_IMAGE?=golang:1.22-windowsservercore-1809
SOURCE_COMPILATION_IMAGE?=lifecycle-img SOURCE_COMPILATION_IMAGE?=lifecycle-img
BUILD_CTR?=lifecycle-ctr BUILD_CTR?=lifecycle-ctr
DOCKER_CMD?=make test DOCKER_CMD?=make test
@ -38,9 +39,13 @@ GOFILES := $(shell $(GOCMD) run tools$/lister$/main.go)
all: test build package all: test build package
GOOS_ARCHS = linux/amd64 linux/arm64 linux/ppc64le linux/s390x darwin/amd64 darwin/arm64 build: build-linux-amd64 build-linux-arm64 build-windows-amd64 build-linux-ppc64le build-linux-s390x
build: build-linux-amd64 build-linux-arm64 build-linux-ppc64le build-linux-s390x build-linux-amd64: build-linux-amd64-lifecycle build-linux-amd64-symlinks build-linux-amd64-launcher
build-linux-arm64: build-linux-arm64-lifecycle build-linux-arm64-symlinks build-linux-arm64-launcher
build-windows-amd64: build-windows-amd64-lifecycle build-windows-amd64-symlinks build-windows-amd64-launcher
build-linux-ppc64le: build-linux-ppc64le-lifecycle build-linux-ppc64le-symlinks build-linux-ppc64le-launcher
build-linux-s390x: build-linux-s390x-lifecycle build-linux-s390x-symlinks build-linux-s390x-launcher
build-image-linux-amd64: build-linux-amd64 package-linux-amd64 build-image-linux-amd64: build-linux-amd64 package-linux-amd64
build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz
@ -52,6 +57,11 @@ build-image-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSIO
build-image-linux-arm64: build-image-linux-arm64:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch arm64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch arm64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
build-image-windows-amd64: build-windows-amd64 package-windows-amd64
build-image-windows-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+windows.x86-64.tgz
build-image-windows-amd64:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os windows -arch amd64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
build-image-linux-ppc64le: build-linux-ppc64le package-linux-ppc64le build-image-linux-ppc64le: build-linux-ppc64le package-linux-ppc64le
build-image-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.ppc64le.tgz build-image-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.ppc64le.tgz
build-image-linux-ppc64le: build-image-linux-ppc64le:
@ -62,50 +72,240 @@ build-image-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSIO
build-image-linux-s390x: build-image-linux-s390x:
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch s390x -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch s390x -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
define build_targets build-linux-amd64-lifecycle: $(BUILD_DIR)/linux-amd64/lifecycle/lifecycle
build-$(1)-$(2): build-$(1)-$(2)-lifecycle build-$(1)-$(2)-symlinks build-$(1)-$(2)-launcher
build-$(1)-$(2)-lifecycle: $(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle build-linux-arm64-lifecycle: $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOOS:=$(1) build-linux-ppc64le-lifecycle: $(BUILD_DIR)/linux-ppc64le/lifecycle/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOARCH:=$(2)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: $$(GOFILES)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle:
@echo "> Building lifecycle/lifecycle for $$(GOOS)/$$(GOARCH)..."
mkdir -p $$(OUT_DIR)
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/lifecycle -a ./cmd/lifecycle
build-$(1)-$(2)-symlinks: export GOOS:=$(1) build-linux-s390x-lifecycle: $(BUILD_DIR)/linux-s390x/lifecycle/lifecycle
build-$(1)-$(2)-symlinks: export GOARCH:=$(2)
build-$(1)-$(2)-symlinks: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
build-$(1)-$(2)-symlinks:
@echo "> Creating phase symlinks for $$(GOOS)/$$(GOARCH)..."
ln -sf lifecycle $$(OUT_DIR)/detector
ln -sf lifecycle $$(OUT_DIR)/analyzer
ln -sf lifecycle $$(OUT_DIR)/restorer
ln -sf lifecycle $$(OUT_DIR)/builder
ln -sf lifecycle $$(OUT_DIR)/exporter
ln -sf lifecycle $$(OUT_DIR)/rebaser
ln -sf lifecycle $$(OUT_DIR)/creator
ln -sf lifecycle $$(OUT_DIR)/extender
build-$(1)-$(2)-launcher: $$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher $(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: export GOOS:=linux
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: export GOARCH:=amd64
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: $(GOFILES)
$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle:
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOOS:=$(1) $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: export GOOS:=linux
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOARCH:=$(2) $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: export GOARCH:=arm64
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: $$(GOFILES) $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: $(GOFILES)
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle:
@echo "> Building lifecycle/launcher for $$(GOOS)/$$(GOARCH)..." @echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
mkdir -p $$(OUT_DIR) mkdir -p $(OUT_DIR)
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/launcher -a ./cmd/launcher $(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
test $$$$(du -m $$(OUT_DIR)/launcher|cut -f 1) -le 3
endef
$(foreach ga,$(GOOS_ARCHS),$(eval $(call build_targets,$(word 1, $(subst /, ,$(ga))),$(word 2, $(subst /, ,$(ga)))))) $(BUILD_DIR)/linux-ppc64le/lifecycle/lifecycle: export GOOS:=linux
$(BUILD_DIR)/linux-ppc64le/lifecycle/lifecycle: export GOARCH:=ppc64le
$(BUILD_DIR)/linux-ppc64le/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-ppc64le/lifecycle/lifecycle: $(GOFILES)
$(BUILD_DIR)/linux-ppc64le/lifecycle/lifecycle:
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
generate-sbom: run-syft-linux-amd64 run-syft-linux-arm64 run-syft-linux-ppc64le run-syft-linux-s390x $(BUILD_DIR)/linux-s390x/lifecycle/lifecycle: export GOOS:=linux
$(BUILD_DIR)/linux-s390x/lifecycle/lifecycle: export GOARCH:=s390x
$(BUILD_DIR)/linux-s390x/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-s390x/lifecycle/lifecycle: $(GOFILES)
$(BUILD_DIR)/linux-s390x/lifecycle/lifecycle:
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
build-linux-amd64-launcher: $(BUILD_DIR)/linux-amd64/lifecycle/launcher
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: export GOOS:=linux
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: export GOARCH:=amd64
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-amd64/lifecycle/launcher: $(GOFILES)
$(BUILD_DIR)/linux-amd64/lifecycle/launcher:
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
build-linux-arm64-launcher: $(BUILD_DIR)/linux-arm64/lifecycle/launcher
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: export GOOS:=linux
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: export GOARCH:=arm64
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-arm64/lifecycle/launcher: $(GOFILES)
$(BUILD_DIR)/linux-arm64/lifecycle/launcher:
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
build-linux-ppc64le-launcher: $(BUILD_DIR)/linux-ppc64le/lifecycle/launcher
$(BUILD_DIR)/linux-ppc64le/lifecycle/launcher: export GOOS:=linux
$(BUILD_DIR)/linux-ppc64le/lifecycle/launcher: export GOARCH:=ppc64le
$(BUILD_DIR)/linux-ppc64le/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-ppc64le/lifecycle/launcher: $(GOFILES)
$(BUILD_DIR)/linux-ppc64le/lifecycle/launcher:
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
build-linux-s390x-launcher: $(BUILD_DIR)/linux-s390x/lifecycle/launcher
$(BUILD_DIR)/linux-s390x/lifecycle/launcher: export GOOS:=linux
$(BUILD_DIR)/linux-s390x/lifecycle/launcher: export GOARCH:=s390x
$(BUILD_DIR)/linux-s390x/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
$(BUILD_DIR)/linux-s390x/lifecycle/launcher: $(GOFILES)
$(BUILD_DIR)/linux-s390x/lifecycle/launcher:
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
mkdir -p $(OUT_DIR)
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
build-linux-amd64-symlinks: export GOOS:=linux
build-linux-amd64-symlinks: export GOARCH:=amd64
build-linux-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
build-linux-amd64-symlinks:
@echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..."
ln -sf lifecycle $(OUT_DIR)/detector
ln -sf lifecycle $(OUT_DIR)/analyzer
ln -sf lifecycle $(OUT_DIR)/restorer
ln -sf lifecycle $(OUT_DIR)/builder
ln -sf lifecycle $(OUT_DIR)/exporter
ln -sf lifecycle $(OUT_DIR)/rebaser
ln -sf lifecycle $(OUT_DIR)/creator
ln -sf lifecycle $(OUT_DIR)/extender
build-linux-arm64-symlinks: export GOOS:=linux
build-linux-arm64-symlinks: export GOARCH:=arm64
build-linux-arm64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
build-linux-arm64-symlinks:
@echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..."
ln -sf lifecycle $(OUT_DIR)/detector
ln -sf lifecycle $(OUT_DIR)/analyzer
ln -sf lifecycle $(OUT_DIR)/restorer
ln -sf lifecycle $(OUT_DIR)/builder
ln -sf lifecycle $(OUT_DIR)/exporter
ln -sf lifecycle $(OUT_DIR)/rebaser
ln -sf lifecycle $(OUT_DIR)/creator
ln -sf lifecycle $(OUT_DIR)/extender
build-linux-ppc64le-symlinks: export GOOS:=linux
build-linux-ppc64le-symlinks: export GOARCH:=ppc64le
build-linux-ppc64le-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
build-linux-ppc64le-symlinks:
@echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..."
ln -sf lifecycle $(OUT_DIR)/detector
ln -sf lifecycle $(OUT_DIR)/analyzer
ln -sf lifecycle $(OUT_DIR)/restorer
ln -sf lifecycle $(OUT_DIR)/builder
ln -sf lifecycle $(OUT_DIR)/exporter
ln -sf lifecycle $(OUT_DIR)/rebaser
ln -sf lifecycle $(OUT_DIR)/creator
ln -sf lifecycle $(OUT_DIR)/extender
build-linux-s390x-symlinks: export GOOS:=linux
build-linux-s390x-symlinks: export GOARCH:=s390x
build-linux-s390x-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
build-linux-s390x-symlinks:
@echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..."
ln -sf lifecycle $(OUT_DIR)/detector
ln -sf lifecycle $(OUT_DIR)/analyzer
ln -sf lifecycle $(OUT_DIR)/restorer
ln -sf lifecycle $(OUT_DIR)/builder
ln -sf lifecycle $(OUT_DIR)/exporter
ln -sf lifecycle $(OUT_DIR)/rebaser
ln -sf lifecycle $(OUT_DIR)/creator
ln -sf lifecycle $(OUT_DIR)/extender
build-windows-amd64-lifecycle: $(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: export GOOS:=windows
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: export GOARCH:=amd64
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: $(GOFILES)
$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe:
@echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..."
$(GOBUILD) -o $(OUT_DIR)$/lifecycle.exe -a .$/cmd$/lifecycle
build-windows-amd64-launcher: $(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: export GOOS:=windows
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: export GOARCH:=amd64
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: $(GOFILES)
$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe:
@echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..."
$(GOBUILD) -o $(OUT_DIR)$/launcher.exe -a .$/cmd$/launcher
build-windows-amd64-symlinks: export GOOS:=windows
build-windows-amd64-symlinks: export GOARCH:=amd64
build-windows-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
build-windows-amd64-symlinks:
@echo "> Creating phase symlinks for Windows..."
ifeq ($(OS),Windows_NT)
call del $(OUT_DIR)$/detector.exe
call del $(OUT_DIR)$/analyzer.exe
call del $(OUT_DIR)$/restorer.exe
call del $(OUT_DIR)$/builder.exe
call del $(OUT_DIR)$/exporter.exe
call del $(OUT_DIR)$/rebaser.exe
call del $(OUT_DIR)$/creator.exe
call mklink $(OUT_DIR)$/detector.exe lifecycle.exe
call mklink $(OUT_DIR)$/analyzer.exe lifecycle.exe
call mklink $(OUT_DIR)$/restorer.exe lifecycle.exe
call mklink $(OUT_DIR)$/builder.exe lifecycle.exe
call mklink $(OUT_DIR)$/exporter.exe lifecycle.exe
call mklink $(OUT_DIR)$/rebaser.exe lifecycle.exe
call mklink $(OUT_DIR)$/creator.exe lifecycle.exe
else
ln -sf lifecycle.exe $(OUT_DIR)$/detector.exe
ln -sf lifecycle.exe $(OUT_DIR)$/analyzer.exe
ln -sf lifecycle.exe $(OUT_DIR)$/restorer.exe
ln -sf lifecycle.exe $(OUT_DIR)$/builder.exe
ln -sf lifecycle.exe $(OUT_DIR)$/exporter.exe
ln -sf lifecycle.exe $(OUT_DIR)$/rebaser.exe
ln -sf lifecycle.exe $(OUT_DIR)$/creator.exe
endif
## DARWIN ARM64/AMD64
include lifecycle.mk
include launcher.mk
build-darwin-arm64: build-darwin-arm64-lifecycle build-darwin-arm64-launcher
build-darwin-arm64-lifecycle:
$(eval GOARCH := arm64)
$(eval TARGET := darwin-arm64)
$(eval OUT_DIR := $(BUILD_DIR)/$(TARGET)/lifecycle)
$(call build_lifecycle)
build-darwin-arm64-launcher:
$(eval GOARCH := arm64)
$(eval TARGET := darwin-arm64)
$(eval OUT_DIR := $(BUILD_DIR)/$(TARGET)/lifecycle)
$(call build_launcher)
build-darwin-amd64: build-darwin-amd64-lifecycle build-darwin-amd64-launcher
build-darwin-amd64-lifecycle:
$(eval GOARCH := amd64)
$(eval TARGET := darwin-amd64)
$(eval OUT_DIR := $(BUILD_DIR)/$(TARGET)/lifecycle)
$(call build_lifecycle)
build-darwin-amd64-launcher:
$(eval GOARCH := amd64)
$(eval TARGET := darwin-amd64)
$(eval OUT_DIR := $(BUILD_DIR)/$(TARGET)/lifecycle)
$(call build_launcher)
generate-sbom: run-syft-windows run-syft-linux-amd64 run-syft-linux-arm64 run-syft-linux-ppc64le run-syft-linux-s390x
run-syft-windows: install-syft
run-syft-windows: export GOOS:=windows
run-syft-windows: export GOARCH:=amd64
run-syft-windows:
@echo "> Running syft..."
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.exe -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.exe -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
run-syft-linux-amd64: install-syft run-syft-linux-amd64: install-syft
run-syft-linux-amd64: export GOOS:=linux run-syft-linux-amd64: export GOOS:=linux
@ -143,26 +343,21 @@ install-syft:
@echo "> Installing syft..." @echo "> Installing syft..."
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
define install-go-tool
@echo "> Installing $(1)..."
$(GOCMD) install $(1)@$(shell $(GOCMD) list -m -f '{{.Version}}' $(2))
endef
install-goimports: install-goimports:
@echo "> Installing goimports..." @echo "> Installing goimports..."
$(call install-go-tool,golang.org/x/tools/cmd/goimports,golang.org/x/tools) $(GOCMD) install golang.org/x/tools/cmd/goimports@v0.1.2
install-yj: install-yj:
@echo "> Installing yj..." @echo "> Installing yj..."
$(call install-go-tool,github.com/sclevine/yj,github.com/sclevine/yj) $(GOCMD) install github.com/sclevine/yj@v0.0.0-20210612025309-737bdf40a5d1
install-mockgen: install-mockgen:
@echo "> Installing mockgen..." @echo "> Installing mockgen..."
$(call install-go-tool,github.com/golang/mock/mockgen,github.com/golang/mock) $(GOCMD) install github.com/golang/mock/mockgen@v1.5.0
install-golangci-lint: install-golangci-lint:
@echo "> Installing golangci-lint..." @echo "> Installing golangci-lint..."
$(call install-go-tool,github.com/golangci/golangci-lint/v2/cmd/golangci-lint,github.com/golangci/golangci-lint/v2) $(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2
lint: install-golangci-lint lint: install-golangci-lint
@echo "> Linting code..." @echo "> Linting code..."
@ -205,7 +400,7 @@ clean:
@echo "> Cleaning workspace..." @echo "> Cleaning workspace..."
rm -rf $(BUILD_DIR) rm -rf $(BUILD_DIR)
package: generate-sbom package-linux-amd64 package-linux-arm64 package-linux-ppc64le package-linux-s390x package: generate-sbom package-linux-amd64 package-linux-arm64 package-windows-amd64 package-linux-ppc64le package-linux-s390x
package-linux-amd64: GOOS:=linux package-linux-amd64: GOOS:=linux
package-linux-amd64: GOARCH:=amd64 package-linux-amd64: GOARCH:=amd64
@ -242,3 +437,26 @@ package-linux-s390x: PACKAGER=./tools/packager/main.go
package-linux-s390x: package-linux-s390x:
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..." @echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) $(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
package-windows-amd64: GOOS:=windows
package-windows-amd64: GOARCH:=amd64
package-windows-amd64: INPUT_DIR:=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle
package-windows-amd64: ARCHIVE_PATH=$(BUILD_DIR)$/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz
package-windows-amd64: PACKAGER=.$/tools$/packager$/main.go
package-windows-amd64:
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
# Ensure workdir is clean and build image from .git
docker-build-source-image-windows: $(GOFILES)
docker-build-source-image-windows:
$(if $(shell git status --short), @echo Uncommitted changes. Refusing to run. && exit 1)
docker build .git -f tools/Dockerfile.windows --tag $(SOURCE_COMPILATION_IMAGE) --build-arg image_tag=$(WINDOWS_COMPILATION_IMAGE) --cache-from=$(SOURCE_COMPILATION_IMAGE) --isolation=process --compress
docker-run-windows: docker-build-source-image-windows
docker-run-windows:
@echo "> Running '$(DOCKER_CMD)' in docker windows..."
@docker volume rm -f lifecycle-out
docker run -v lifecycle-out:c:/lifecycle/out -e LIFECYCLE_VERSION -e PLATFORM_API -e BUILDPACK_API -v gopathcache:c:/gopath -v '\\.\pipe\docker_engine:\\.\pipe\docker_engine' --isolation=process --interactive --tty --rm $(SOURCE_COMPILATION_IMAGE) $(DOCKER_CMD)
docker run -v lifecycle-out:c:/lifecycle/out --rm $(SOURCE_COMPILATION_IMAGE) tar -cf- out | tar -xf-
@docker volume rm -f lifecycle-out

View File

@ -11,7 +11,6 @@ A reference implementation of the [Cloud Native Buildpacks specification](https:
## Supported APIs ## Supported APIs
| Lifecycle Version | Platform APIs | Buildpack APIs | | Lifecycle Version | Platform APIs | Buildpack APIs |
|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------| |-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
| 0.20.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13], [0.14][p/0.14] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] |
| 0.19.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] | | 0.19.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] |
| 0.18.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] | | 0.18.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] |
| 0.17.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] | | 0.17.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] |
@ -42,7 +41,6 @@ A reference implementation of the [Cloud Native Buildpacks specification](https:
[p/0.11]: https://github.com/buildpacks/spec/blob/platform/v0.11/platform.md [p/0.11]: https://github.com/buildpacks/spec/blob/platform/v0.11/platform.md
[p/0.12]: https://github.com/buildpacks/spec/blob/platform/v0.12/platform.md [p/0.12]: https://github.com/buildpacks/spec/blob/platform/v0.12/platform.md
[p/0.13]: https://github.com/buildpacks/spec/blob/platform/v0.13/platform.md [p/0.13]: https://github.com/buildpacks/spec/blob/platform/v0.13/platform.md
[p/0.14]: https://github.com/buildpacks/spec/blob/platform/v0.14/platform.md
\* denotes unreleased version \* denotes unreleased version

View File

@ -1,73 +1,22 @@
# Release Finalization ## Release Finalization
## Types of releases To cut a pre-release:
#### New minor
* For newly supported Platform or Buildpack API versions, or breaking changes (e.g., API deprecations).
#### Pre-release aka release candidate
* Ideally we should ship a pre-release (waiting a few days for folks to try it out) before we ship a new minor.
* We typically don't ship pre-releases for patches or backports.
#### New patch
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
* Review the latest commits on `main` to determine if any are unacceptable for a patch - if there are commits that should be excluded, branch off the latest tag for the current minor and cherry-pick commits over.
#### Backport
* New patch for an old minor. Typically, to help folks out who haven't yet upgraded from [unsupported APIs](https://github.com/buildpacks/rfcs/blob/main/text/0110-deprecate-apis.md).
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
* Branch off the latest tag for the desired minor.
## Release Finalization Steps
### Step 1 - Prepare
Determine the type of release ([new minor](#new-minor), [pre-release](#pre-release-aka-release-candidate), [new patch](#new-patch), or [backport](#backport)) and prepare the branch accordingly.
**To prepare the release branch:**
1. Check open PRs for any dependabot updates that should be merged.
1. Create a release branch in the format `release/0.99.0-rc.1` (for pre-releases) or `release/0.99.0` (for final releases).
* New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550). 1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550).
* For final releases (not pre-releases), remove the pre-release note (`*`) for the latest apis. 1. Create a release branch in the format `release/0.99.0-rc.1`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0-rc.1`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
1. Edit the release notes as necessary.
1. Perform any manual validation of the artifacts.
1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:0.99.0` but will NOT update the `latest` tag.
**For final releases (not pre-releases):** To cut a release:
1. Ensure the relevant spec APIs have been released. 1. Ensure the relevant spec APIs have been released.
1. Ensure the `lifecycle/0.99.0` milestone on the [docs repo](https://github.com/buildpacks/docs/blob/main/RELEASE.md#lump-changes) is complete, such that every new feature in the lifecycle is fully explained in the `release/lifecycle/0.99` branch on the docs repo, and [migration guides](https://github.com/buildpacks/docs/tree/main/content/docs/reference/spec/migration) (if relevant) are included. 1. Ensure the `lifecycle/0.99.0` milestone on the [docs repo](https://github.com/buildpacks/docs/blob/main/RELEASE.md#lump-changes) is complete, such that every new feature in the lifecycle is fully explained in the `release/lifecycle/0.99` branch on the docs repo, and [migration guides](https://github.com/buildpacks/docs/tree/main/content/docs/reference/spec/migration) (if relevant) are included.
1. Create a release branch in the format `release/0.99.0`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
### Step 2 - Publish the Release 1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550) and remove the pre-release note for the latest apis.
1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
1. Manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/<release version>`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
1. Edit the release notes as necessary. 1. Edit the release notes as necessary.
1. Perform any manual validation of the artifacts as necessary (usually none). 1. Perform any manual validation of the artifacts.
1. Edit the release page and click "Publish release". 1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:0.99.0` and `buildpacksio/lifecycle:latest`.
* This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:<release version>`. 1. Once released
* For final releases ONLY, this will also re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:latest`. - Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`.
- Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo.
### Step 3 - Follow-up
**For pre-releases:**
* Ask the relevant teams to try out the pre-released artifacts.
**For final releases:**
* Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`.
* Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo.
## Go version updates
Go version updates should be released as a [new minor](#new-minor) or [new patch](#new-patch) release.
### New Patch
If the go patch is in [actions/go-versions](https://github.com/actions/go-versions/pulls?q=is%3Apr+is%3Aclosed) then CI should pull it in automatically without any action needed.
We simply need to create the release branch and let the pipeline run.
### New Minor
We typically do this when the existing patch version exceeds 6 - e.g., `1.22.6`. This means we have about 6 months to upgrade before the current minor becomes unsupported due to the introduction of the new n+2 minor.
#### Steps
1. Update go.mod
1. Search for the old `major.minor`, there are a few files that need to be updated (example PR: https://github.com/buildpacks/lifecycle/pull/1405/files)
1. Update the linter to a version that supports the current `major.minor`
1. Fix any lint errors as necessary

View File

@ -23,6 +23,7 @@ const (
var ( var (
latestPlatformAPI = api.Platform.Latest().String() latestPlatformAPI = api.Platform.Latest().String()
buildDir string buildDir string
cacheFixtureDir string
) )
func TestVersion(t *testing.T) { func TestVersion(t *testing.T) {

View File

@ -5,6 +5,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"testing" "testing"
@ -36,6 +37,7 @@ func TestAnalyzer(t *testing.T) {
analyzeImage = analyzeTest.testImageRef analyzeImage = analyzeTest.testImageRef
analyzerPath = analyzeTest.containerBinaryPath analyzerPath = analyzeTest.containerBinaryPath
cacheFixtureDir = filepath.Join("testdata", "cache-dir")
analyzeRegAuthConfig = analyzeTest.targetRegistry.authConfig analyzeRegAuthConfig = analyzeTest.targetRegistry.authConfig
analyzeRegNetwork = analyzeTest.targetRegistry.network analyzeRegNetwork = analyzeTest.targetRegistry.network
analyzeDaemonFixtures = analyzeTest.targetDaemon.fixtures analyzeDaemonFixtures = analyzeTest.targetDaemon.fixtures
@ -127,6 +129,8 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
when("the provided layers directory isn't writeable", func() { when("the provided layers directory isn't writeable", func() {
it("recursively chowns the directory", func() { it("recursively chowns the directory", func() {
h.SkipIf(t, runtime.GOOS == "windows", "Not relevant on Windows")
analyzeFlags := []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage} analyzeFlags := []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}
output := h.DockerRun(t, output := h.DockerRun(t,
@ -199,6 +203,8 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
}) })
it("drops privileges", func() { it("drops privileges", func() {
h.SkipIf(t, runtime.GOOS == "windows", "Not relevant on Windows")
analyzeArgs := []string{ analyzeArgs := []string{
"-analyzed", "/some-dir/some-analyzed.toml", "-analyzed", "/some-dir/some-analyzed.toml",
"-run-image", analyzeRegFixtures.ReadOnlyRunImage, "-run-image", analyzeRegFixtures.ReadOnlyRunImage,
@ -444,7 +450,7 @@ func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
h.AssertNotNil(t, err) h.AssertNotNil(t, err)
expected := "ensure registry read/write access to " + analyzeRegFixtures.InaccessibleImage expected := "validating registry write access: ensure registry read/write access to " + analyzeRegFixtures.InaccessibleImage
h.AssertStringContains(t, string(output), expected) h.AssertStringContains(t, string(output), expected)
}) })
}) })

View File

@ -6,6 +6,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/sclevine/spec" "github.com/sclevine/spec"
@ -24,6 +25,8 @@ var (
) )
func TestBuilder(t *testing.T) { func TestBuilder(t *testing.T) {
h.SkipIf(t, runtime.GOOS == "windows", "Builder acceptance tests are not yet supported on Windows")
info, err := h.DockerCli(t).Info(context.TODO()) info, err := h.DockerCli(t).Info(context.TODO())
h.AssertNil(t, err) h.AssertNil(t, err)

View File

@ -1,4 +1,5 @@
//go:build acceptance //go:build acceptance
// +build acceptance
package acceptance package acceptance
@ -6,6 +7,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"time" "time"
@ -29,6 +31,8 @@ var (
) )
func TestCreator(t *testing.T) { func TestCreator(t *testing.T) {
h.SkipIf(t, runtime.GOOS == "windows", "Creator acceptance tests are not yet supported on Windows")
testImageDockerContext := filepath.Join("testdata", "creator") testImageDockerContext := filepath.Join("testdata", "creator")
createTest = NewPhaseTest(t, "creator", testImageDockerContext) createTest = NewPhaseTest(t, "creator", testImageDockerContext)
createTest.Start(t) createTest.Start(t)
@ -36,6 +40,7 @@ func TestCreator(t *testing.T) {
createImage = createTest.testImageRef createImage = createTest.testImageRef
creatorPath = createTest.containerBinaryPath creatorPath = createTest.containerBinaryPath
cacheFixtureDir = filepath.Join("testdata", "creator", "cache-dir")
createRegAuthConfig = createTest.targetRegistry.authConfig createRegAuthConfig = createTest.targetRegistry.authConfig
createRegNetwork = createTest.targetRegistry.network createRegNetwork = createTest.targetRegistry.network
createDaemonFixtures = createTest.targetDaemon.fixtures createDaemonFixtures = createTest.targetDaemon.fixtures

View File

@ -1,4 +1,5 @@
//go:build acceptance //go:build acceptance
// +build acceptance
package acceptance package acceptance
@ -8,6 +9,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/sclevine/spec" "github.com/sclevine/spec"
@ -28,6 +30,8 @@ var (
) )
func TestDetector(t *testing.T) { func TestDetector(t *testing.T) {
h.SkipIf(t, runtime.GOOS == "windows", "Detector acceptance tests are not yet supported on Windows")
info, err := h.DockerCli(t).Info(context.TODO()) info, err := h.DockerCli(t).Info(context.TODO())
h.AssertNil(t, err) h.AssertNil(t, err)

View File

@ -1,17 +1,16 @@
//go:build acceptance //go:build acceptance
// +build acceptance
package acceptance package acceptance
import ( import (
"context" "context"
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -19,7 +18,6 @@ import (
"github.com/buildpacks/imgutil" "github.com/buildpacks/imgutil"
"github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/v1/remote" "github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/pkg/errors"
"github.com/sclevine/spec" "github.com/sclevine/spec"
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
@ -27,7 +25,6 @@ import (
"github.com/buildpacks/lifecycle/auth" "github.com/buildpacks/lifecycle/auth"
"github.com/buildpacks/lifecycle/cache" "github.com/buildpacks/lifecycle/cache"
"github.com/buildpacks/lifecycle/cmd" "github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/internal/fsutil"
"github.com/buildpacks/lifecycle/internal/path" "github.com/buildpacks/lifecycle/internal/path"
"github.com/buildpacks/lifecycle/platform/files" "github.com/buildpacks/lifecycle/platform/files"
h "github.com/buildpacks/lifecycle/testhelpers" h "github.com/buildpacks/lifecycle/testhelpers"
@ -44,6 +41,8 @@ var (
) )
func TestExporter(t *testing.T) { func TestExporter(t *testing.T) {
h.SkipIf(t, runtime.GOOS == "windows", "Exporter acceptance tests are not yet supported on Windows")
testImageDockerContext := filepath.Join("testdata", "exporter") testImageDockerContext := filepath.Join("testdata", "exporter")
exportTest = NewPhaseTest(t, "exporter", testImageDockerContext) exportTest = NewPhaseTest(t, "exporter", testImageDockerContext)
@ -52,6 +51,7 @@ func TestExporter(t *testing.T) {
exportImage = exportTest.testImageRef exportImage = exportTest.testImageRef
exporterPath = exportTest.containerBinaryPath exporterPath = exportTest.containerBinaryPath
cacheFixtureDir = filepath.Join("testdata", "exporter", "cache-dir")
exportRegAuthConfig = exportTest.targetRegistry.authConfig exportRegAuthConfig = exportTest.targetRegistry.authConfig
exportRegNetwork = exportTest.targetRegistry.network exportRegNetwork = exportTest.targetRegistry.network
exportDaemonFixtures = exportTest.targetDaemon.fixtures exportDaemonFixtures = exportTest.targetDaemon.fixtures
@ -64,146 +64,147 @@ func TestExporter(t *testing.T) {
func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) { func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
return func(t *testing.T, when spec.G, it spec.S) { return func(t *testing.T, when spec.G, it spec.S) {
var exportedImageName string
it.After(func() {
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
})
when("daemon case", func() { when("daemon case", func() {
var exportedImageName string when("first build", func() {
when("app", func() {
it("is created", func() {
exportFlags := []string{"-daemon", "-log-level", "debug"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = "some-exported-image-" + h.RandString(10)
exportArgs = append(exportArgs, exportedImageName)
it.After(func() { output := h.DockerRun(t,
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204 exportImage,
}) h.WithFlags(append(
dockerSocketMount,
"--env", "CNB_PLATFORM_API="+platformAPI,
)...),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
it("app is created", func() { if api.MustParse(platformAPI).AtLeast("0.11") {
exportFlags := []string{"-daemon", "-log-level", "debug"} extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) for _, extension := range extensions {
exportedImageName = "some-exported-image-" + h.RandString(10) h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension)))
exportArgs = append(exportArgs, exportedImageName) h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension)))
}
} else {
h.AssertStringDoesNotContain(t, output, "Copying SBOM")
}
output := h.DockerRun(t, if api.MustParse(platformAPI).AtLeast("0.12") {
exportImage, expectedHistory := []string{
h.WithFlags(append( "Buildpacks Launcher Config",
dockerSocketMount, "Buildpacks Application Launcher",
"--env", "CNB_PLATFORM_API="+platformAPI, "Application Layer",
)...), "Software Bill-of-Materials",
h.WithArgs(exportArgs...), "Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
) "", // run image layer
h.AssertStringContains(t, output, "Saving "+exportedImageName) }
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
} else {
assertDaemonImageDoesNotHaveHistory(t, exportedImageName)
}
if api.MustParse(platformAPI).AtLeast("0.11") { assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"} })
for _, extension := range extensions {
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension)))
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension)))
}
} else {
h.AssertStringDoesNotContain(t, output, "Copying SBOM")
}
if api.MustParse(platformAPI).AtLeast("0.12") {
expectedHistory := []string{
"Buildpacks Launcher Config",
"Buildpacks Application Launcher",
"Application Layer",
"Software Bill-of-Materials",
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
"", // run image layer
}
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
} else {
assertDaemonImageDoesNotHaveHistory(t, exportedImageName)
}
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
})
when("using extensions", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
}) })
it("app is created from the extended run image", func() { when("using extensions", func() {
exportFlags := []string{ it.Before(func() {
"-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
"-daemon", })
"-extended", "/layers/some-extended-dir",
"-log-level", "debug",
"-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = "some-exported-image-" + h.RandString(10)
exportArgs = append(exportArgs, exportedImageName)
// get run image top layer it("is created from the extended run image", func() {
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage) exportFlags := []string{
h.AssertNil(t, err) "-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
layers := inspect.RootFS.Layers "-daemon",
runImageFixtureTopLayerSHA := layers[len(layers)-1] "-extended", "/layers/some-extended-dir",
runImageFixtureSHA := inspect.ID "-log-level", "debug",
"-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = "some-exported-image-" + h.RandString(10)
exportArgs = append(exportArgs, exportedImageName)
experimentalMode := "warn" // get run image top layer
if api.MustParse(platformAPI).AtLeast("0.13") { inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
experimentalMode = "error" h.AssertNil(t, err)
} layers := inspect.RootFS.Layers
runImageFixtureTopLayerSHA := layers[len(layers)-1]
runImageFixtureSHA := inspect.ID
output := h.DockerRun(t, experimentalMode := "warn"
exportImage, if api.MustParse(platformAPI).AtLeast("0.13") {
h.WithFlags(append( experimentalMode = "error"
dockerSocketMount, }
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
"--env", "CNB_PLATFORM_API="+platformAPI,
)...),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) output := h.DockerRun(t,
expectedHistory := []string{ exportImage,
"Buildpacks Launcher Config", h.WithFlags(append(
"Buildpacks Application Launcher", dockerSocketMount,
"Application Layer", "--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
"Software Bill-of-Materials", "--env", "CNB_PLATFORM_API="+platformAPI,
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1", )...),
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1", h.WithArgs(exportArgs...),
"Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension", )
"Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension", h.AssertStringContains(t, output, "Saving "+exportedImageName)
"", // run image layer
} assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory) expectedHistory := []string{
t.Log("bases the exported image on the extended run image") "Buildpacks Launcher Config",
inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName) "Buildpacks Application Launcher",
h.AssertNil(t, err) "Application Layer",
h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config> "Software Bill-of-Materials",
t.Log("Adds extension layers") "Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
type testCase struct { "Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension",
expectedDiffID string "Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension",
layerIndex int "", // run image layer
} }
testCases := []testCase{ assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
{ t.Log("bases the exported image on the extended run image")
expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
layerIndex: 1, h.AssertNil(t, err)
}, h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
{ t.Log("Adds extension layers")
expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps type testCase struct {
layerIndex: 2, expectedDiffID string
}, layerIndex int
} }
for _, tc := range testCases { testCases := []testCase{
h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID) {
} expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps
t.Log("sets the layers metadata label according to the new spec") layerIndex: 1,
var lmd files.LayersMetadata },
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"] {
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml layerIndex: 2,
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml },
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA) }
h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:")) for _, tc := range testCases {
h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID)
}
t.Log("sets the layers metadata label according to the new spec")
var lmd files.LayersMetadata
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA)
h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:"))
})
}) })
}) })
when("SOURCE_DATE_EPOCH is set", func() { when("SOURCE_DATE_EPOCH is set", func() {
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() { it("Image CreatedAt is set to SOURCE_DATE_EPOCH", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9") h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC) expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
exportFlags := []string{"-daemon"} exportFlags := []string{"-daemon"}
@ -230,93 +231,10 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
}) })
when("registry case", func() { when("registry case", func() {
var exportedImageName string when("first build", func() {
when("app", func() {
it.After(func() { it("is created", func() {
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204 var exportFlags []string
})
it("app is created", func() {
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
})
when("registry is insecure", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
})
it("uses http protocol", func() {
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
insecureRegistry := "host.docker.internal/bar"
insecureAnalyzed := "/layers/analyzed_insecure.toml"
_, _, err := h.DockerRunWithError(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
"--env", "CNB_ANALYZED_PATH="+insecureAnalyzed,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
})
})
when("SOURCE_DATE_EPOCH is set", func() {
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
})
})
// FIXME: move this out of the registry block
when("cache", func() {
when("image case", func() {
it("cache is created", func() {
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName) exportArgs = append(exportArgs, exportedImageName)
@ -331,14 +249,92 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
h.WithArgs(exportArgs...), h.WithArgs(exportArgs...),
) )
h.AssertStringContains(t, output, "Saving "+exportedImageName) h.AssertStringContains(t, output, "Saving "+exportedImageName)
// To detect whether the export of cacheImage and exportedImage is successful
h.Run(t, exec.Command("docker", "pull", exportedImageName)) h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
h.Run(t, exec.Command("docker", "pull", cacheImageName)) })
})
when("app using insecure registry", func() {
it.Before(func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
}) })
when("parallel export is enabled", func() { it("does an http request", func() {
it("cache is created", func() { var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
insecureRegistry := "host.docker.internal/bar"
insecureAnalyzed := "/layers/analyzed_insecure.toml"
_, _, err := h.DockerRunWithError(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
"--env", "CNB_ANALYZED_PATH="+insecureAnalyzed,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
})
})
when("SOURCE_DATE_EPOCH is set", func() {
it("Image CreatedAt is set to SOURCE_DATE_EPOCH", func() {
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
var exportFlags []string
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
})
})
when("cache", func() {
when("cache image case", func() {
it("is created", func() {
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
// To detect whether the export of cacheImage and exportedImage is successful
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
h.Run(t, exec.Command("docker", "pull", cacheImageName))
})
it("is created with parallel export enabled", func() {
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10)) cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName, "-parallel"} exportFlags := []string{"-cache-image", cacheImageName, "-parallel"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
@ -360,10 +356,8 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
h.Run(t, exec.Command("docker", "pull", cacheImageName)) h.Run(t, exec.Command("docker", "pull", cacheImageName))
}) })
})
when("cache is provided but no data was cached", func() { it("is created with empty layer", func() {
it("cache is created with an empty layer", func() {
cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10)) cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10))
exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"} exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
@ -398,186 +392,108 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
}) })
}) })
when("directory case", func() { when("using extensions", func() {
when("original cache was corrupted", func() { it.Before(func() {
var cacheDir string h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
it.Before(func() {
var err error
cacheDir, err = os.MkdirTemp("", "cache")
h.AssertNil(t, err)
h.AssertNil(t, os.Chmod(cacheDir, 0777)) // Override umask
cacheFixtureDir := filepath.Join("testdata", "exporter", "cache-dir")
h.AssertNil(t, fsutil.Copy(cacheFixtureDir, cacheDir))
// We have to pre-create the tar files so that their digests do not change due to timestamps
// But, ':' in the filepath on Windows is not allowed
h.AssertNil(t, os.Rename(
filepath.Join(cacheDir, "committed", "sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
))
})
it.After(func() {
_ = os.RemoveAll(cacheDir)
})
it("overwrites the original layer", func() {
exportFlags := []string{
"-cache-dir", "/cache",
"-log-level", "debug",
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
"--volume", fmt.Sprintf("%s:/cache", cacheDir),
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Skipping reuse for layer corrupted_buildpack:corrupted-layer: expected layer contents to have SHA 'sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59'; found 'sha256:9e0b77ed599eafdab8611f7eeefef084077f91f02f1da0a3870c7ff20a08bee8'")
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
defer h.Run(t, exec.Command("docker", "image", "rm", exportedImageName))
// Verify the app has the correct sha for the layer
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
h.AssertNil(t, err)
var lmd files.LayersMetadata
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
h.AssertEq(t, lmd.Buildpacks[2].Layers["corrupted-layer"].SHA, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
// Verify the cache has correct contents now
foundDiffID, err := func() (string, error) {
layerPath := filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar")
layerRC, err := os.Open(layerPath)
if err != nil {
return "", err
}
defer func() {
_ = layerRC.Close()
}()
hasher := sha256.New()
if _, err = io.Copy(hasher, layerRC); err != nil {
return "", errors.Wrap(err, "hashing layer")
}
foundDiffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
return foundDiffID, nil
}()
h.AssertNil(t, err)
h.AssertEq(t, foundDiffID, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
})
}) })
})
})
when("using extensions", func() { it("is created from the extended run image", func() {
it.Before(func() { exportFlags := []string{
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") "-analyzed", "/layers/run-image-extended-analyzed.toml",
}) "-extended", "/layers/some-extended-dir",
"-log-level", "debug",
"-run", "/cnb/run.toml",
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
it("app is created from the extended run image", func() { // get run image SHA & top layer
exportFlags := []string{ ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
"-analyzed", "/layers/run-image-extended-analyzed.toml",
"-extended", "/layers/some-extended-dir",
"-log-level", "debug",
"-run", "/cnb/run.toml",
}
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
exportArgs = append(exportArgs, exportedImageName)
// get run image SHA & top layer
ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
h.AssertNil(t, err)
remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth))
h.AssertNil(t, err)
layers, err := remoteImage.Layers()
h.AssertNil(t, err)
runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID()
h.AssertNil(t, err)
runImageFixtureSHA, err := remoteImage.Digest()
h.AssertNil(t, err)
experimentalMode := "warn"
if api.MustParse(platformAPI).AtLeast("0.13") {
experimentalMode = "error"
}
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
t.Log("bases the exported image on the extended run image")
ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName)
h.AssertNil(t, err)
remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth))
h.AssertNil(t, err)
configFile, err := remoteImage.ConfigFile()
h.AssertNil(t, err)
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
t.Log("Adds extension layers")
layers, err = remoteImage.Layers()
h.AssertNil(t, err)
type testCase struct {
expectedDigest string
layerIndex int
}
testCases := []testCase{
{
expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing
layerIndex: 1,
},
{
expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing
layerIndex: 2,
},
}
for _, tc := range testCases {
layer := layers[tc.layerIndex]
digest, err := layer.Digest()
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, digest.String(), tc.expectedDigest) remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth))
} h.AssertNil(t, err)
t.Log("sets the layers metadata label according to the new spec") layers, err := remoteImage.Layers()
var lmd files.LayersMetadata h.AssertNil(t, err)
lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"] runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID()
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) h.AssertNil(t, err)
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml runImageFixtureSHA, err := remoteImage.Digest()
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml h.AssertNil(t, err)
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String())
h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String())) experimentalMode := "warn"
if api.MustParse(platformAPI).AtLeast("0.13") {
experimentalMode = "error"
}
output := h.DockerRun(t,
exportImage,
h.WithFlags(
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
"--env", "CNB_PLATFORM_API="+platformAPI,
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
"--network", exportRegNetwork,
),
h.WithArgs(exportArgs...),
)
h.AssertStringContains(t, output, "Saving "+exportedImageName)
h.Run(t, exec.Command("docker", "pull", exportedImageName))
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
t.Log("bases the exported image on the extended run image")
ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName)
h.AssertNil(t, err)
remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth))
h.AssertNil(t, err)
configFile, err := remoteImage.ConfigFile()
h.AssertNil(t, err)
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
t.Log("Adds extension layers")
layers, err = remoteImage.Layers()
h.AssertNil(t, err)
type testCase struct {
expectedDigest string
layerIndex int
}
testCases := []testCase{
{
expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing
layerIndex: 1,
},
{
expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing
layerIndex: 2,
},
}
for _, tc := range testCases {
layer := layers[tc.layerIndex]
digest, err := layer.Digest()
h.AssertNil(t, err)
h.AssertEq(t, digest.String(), tc.expectedDigest)
}
t.Log("sets the layers metadata label according to the new spec")
var lmd files.LayersMetadata
lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"]
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String())
h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String()))
})
}) })
}) })
}) })
when("layout case", func() { when("layout case", func() {
var ( var (
containerName string containerName string
err error err error
layoutDir string layoutDir string
tmpDir string tmpDir string
exportedImageName string
) )
when("experimental mode is enabled", func() { when("experimental mode is enabled", func() {
it.Before(func() { it.Before(func() {
// create the directory to save all OCI images on disk // creates the directory to save all the OCI images on disk
tmpDir, err = os.MkdirTemp("", "layout") tmpDir, err = os.MkdirTemp("", "layout")
h.AssertNil(t, err) h.AssertNil(t, err)
@ -592,31 +508,35 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
os.RemoveAll(tmpDir) os.RemoveAll(tmpDir)
}) })
when("using a custom layout directory", func() { when("custom layout directory", func() {
it.Before(func() { when("first build", func() {
exportedImageName = "my-custom-layout-app" when("app", func() {
layoutDir = filepath.Join(path.RootDir, "my-layout-dir") it.Before(func() {
}) exportedImageName = "my-custom-layout-app"
layoutDir = filepath.Join(path.RootDir, "my-layout-dir")
})
it("app is created", func() { it("is created", func() {
var exportFlags []string var exportFlags []string
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag") h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...) exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...)
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
exportArgs = append(exportArgs, exportedImageName) exportArgs = append(exportArgs, exportedImageName)
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage, output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage,
h.WithFlags( h.WithFlags(
"--env", "CNB_EXPERIMENTAL_MODE=warn", "--env", "CNB_EXPERIMENTAL_MODE=warn",
"--env", "CNB_PLATFORM_API="+platformAPI, "--env", "CNB_PLATFORM_API="+platformAPI,
), ),
h.WithArgs(exportArgs...)) h.WithArgs(exportArgs...))
h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest") h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest")
// assert the image was saved on disk in OCI layout format // assert the image was saved on disk in OCI layout format
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest")) index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest"))
h.AssertEq(t, len(index.Manifests), 1) h.AssertEq(t, len(index.Manifests), 1)
})
})
}) })
}) })
}) })

View File

@ -1,4 +1,5 @@
//go:build acceptance //go:build acceptance
// +build acceptance
package acceptance package acceptance
@ -7,6 +8,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/buildpacks/imgutil/layout/sparse" "github.com/buildpacks/imgutil/layout/sparse"
@ -44,6 +46,8 @@ const (
) )
func TestExtender(t *testing.T) { func TestExtender(t *testing.T) {
h.SkipIf(t, runtime.GOOS == "windows", "Extender is not supported on Windows")
testImageDockerContext := filepath.Join("testdata", "extender") testImageDockerContext := filepath.Join("testdata", "extender")
extendTest = NewPhaseTest(t, "extender", testImageDockerContext) extendTest = NewPhaseTest(t, "extender", testImageDockerContext)
extendTest.Start(t) extendTest.Start(t)

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"testing" "testing"
@ -24,6 +25,9 @@ func TestLauncher(t *testing.T) {
launchTest = NewPhaseTest(t, "launcher", testImageDockerContext, withoutDaemonFixtures, withoutRegistry) launchTest = NewPhaseTest(t, "launcher", testImageDockerContext, withoutDaemonFixtures, withoutRegistry)
containerBinaryDir := filepath.Join("testdata", "launcher", "linux", "container", "cnb", "lifecycle") containerBinaryDir := filepath.Join("testdata", "launcher", "linux", "container", "cnb", "lifecycle")
if launchTest.targetDaemon.os == "windows" {
containerBinaryDir = filepath.Join("testdata", "launcher", "windows", "container", "cnb", "lifecycle")
}
withCustomContainerBinaryDir := func(_ *testing.T, phaseTest *PhaseTest) { withCustomContainerBinaryDir := func(_ *testing.T, phaseTest *PhaseTest) {
phaseTest.containerBinaryDir = containerBinaryDir phaseTest.containerBinaryDir = containerBinaryDir
} }
@ -88,7 +92,11 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
"--env=CNB_PLATFORM_API="+latestPlatformAPI, "--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "with user provided args", launchImage, "with user provided args",
) )
assertOutput(t, cmd, "Executing web process-type with user provided args") if runtime.GOOS == "windows" {
assertOutput(t, cmd, `Executing web process-type "with user provided args"`)
} else {
assertOutput(t, cmd, "Executing web process-type with user provided args")
}
}) })
}) })
@ -101,6 +109,15 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
launchImage, "--", launchImage, "--",
"env", "env",
) )
if runtime.GOOS == "windows" {
cmd = exec.Command( //nolint
"docker", "run", "--rm",
`--entrypoint=launcher`,
"--env=CNB_PLATFORM_API=0.7",
launchImage, "--",
"cmd", "/c", "set",
)
}
assertOutput(t, cmd, assertOutput(t, cmd,
"SOME_VAR=some-bp-val", "SOME_VAR=some-bp-val",
@ -143,11 +160,22 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
launchImage, launchImage,
"echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR", "echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR",
) )
if runtime.GOOS == "windows" {
cmd = exec.Command( //nolint
"docker", "run", "--rm",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage,
"echo", "%SOME_VAR%", "%OTHER_VAR%", "%WORKER_VAR%",
)
}
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val") assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val")
}) })
it("passes through env vars from user, excluding excluded vars", func() { it("passes through env vars from user, excluding excluded vars", func() {
args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"} args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"}
if runtime.GOOS == "windows" {
args = []string{"echo", "%SOME_USER_VAR%, %CNB_APP_DIR%, %OTHER_VAR%"}
}
cmd := exec.Command("docker", cmd := exec.Command("docker",
append( append(
[]string{ []string{
@ -161,7 +189,13 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
args...)..., args...)...,
) // #nosec G204 ) // #nosec G204
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val") if runtime.GOOS == "windows" {
// windows values with spaces will contain quotes
// empty values on windows preserve variable names instead of interpolating to empty strings
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\n\"some-user-val, %CNB_APP_DIR%, other-user-val**other-bp-val\"")
} else {
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val")
}
}) })
it("adds buildpack bin dirs to the path", func() { it("adds buildpack bin dirs to the path", func() {
@ -177,13 +211,23 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
when("CMD provided starts with --", func() { when("CMD provided starts with --", func() {
it("launches command directly", func() { it("launches command directly", func() {
cmd := exec.Command( //nolint if runtime.GOOS == "windows" {
"docker", "run", "--rm", cmd := exec.Command( //nolint
"--env=CNB_PLATFORM_API="+latestPlatformAPI, "docker", "run", "--rm",
launchImage, "--", "--env=CNB_PLATFORM_API="+latestPlatformAPI,
"echo", "something", launchImage, "--",
) "ping", "/?",
assertOutput(t, cmd, "something") )
assertOutput(t, cmd, "Usage: ping")
} else {
cmd := exec.Command( //nolint
"docker", "run", "--rm",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "--",
"echo", "something",
)
assertOutput(t, cmd, "something")
}
}) })
it("sets env vars from layers", func() { it("sets env vars from layers", func() {
@ -193,6 +237,14 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
launchImage, "--", launchImage, "--",
"env", "env",
) )
if runtime.GOOS == "windows" {
cmd = exec.Command( //nolint
"docker", "run", "--rm",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
launchImage, "--",
"cmd", "/c", "set",
)
}
assertOutput(t, cmd, assertOutput(t, cmd,
"SOME_VAR=some-bp-val", "SOME_VAR=some-bp-val",
@ -209,6 +261,16 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
launchImage, "--", launchImage, "--",
"env", "env",
) )
if runtime.GOOS == "windows" {
cmd = exec.Command( //nolint
"docker", "run", "--rm",
"--env", "CNB_APP_DIR=/workspace",
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
"--env", "SOME_USER_VAR=some-user-val",
launchImage, "--",
"cmd", "/c", "set",
)
}
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err != nil { if err != nil {

View File

@ -17,8 +17,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/docker/docker/api/types/image"
ih "github.com/buildpacks/imgutil/testhelpers" ih "github.com/buildpacks/imgutil/testhelpers"
"github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/registry" "github.com/google/go-containerregistry/pkg/registry"
@ -428,30 +426,14 @@ func SBOMComponents() []string {
} }
func assertImageOSAndArch(t *testing.T, imageName string, phaseTest *PhaseTest) { //nolint - these functions are in fact used, i promise func assertImageOSAndArch(t *testing.T, imageName string, phaseTest *PhaseTest) { //nolint - these functions are in fact used, i promise
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName) inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), imageName)
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os) h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch) h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
} }
func assertImageOSAndArchAndCreatedAt(t *testing.T, imageName string, phaseTest *PhaseTest, expectedCreatedAt time.Time) { //nolint func assertImageOSAndArchAndCreatedAt(t *testing.T, imageName string, phaseTest *PhaseTest, expectedCreatedAt time.Time) { //nolint
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName) inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), imageName)
if err != nil {
list, _ := h.DockerCli(t).ImageList(context.TODO(), image.ListOptions{})
fmt.Println("Error encountered running ImageInspectWithRaw. imageName: ", imageName)
fmt.Println(err)
for _, value := range list {
fmt.Println("Image Name: ", value)
}
if strings.Contains(err.Error(), "No such image") {
t.Log("Image not found, retrying...")
time.Sleep(1 * time.Second)
inspect, err = h.DockerCli(t).ImageInspect(context.TODO(), imageName)
}
}
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os) h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch) h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)

View File

@ -1,4 +1,5 @@
//go:build acceptance //go:build acceptance
// +build acceptance
package acceptance package acceptance

View File

@ -1,4 +1,5 @@
//go:build acceptance //go:build acceptance
// +build acceptance
package acceptance package acceptance
@ -6,6 +7,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
@ -31,6 +33,8 @@ var (
) )
func TestRestorer(t *testing.T) { func TestRestorer(t *testing.T) {
h.SkipIf(t, runtime.GOOS == "windows", "Restorer acceptance tests are not yet supported on Windows")
testImageDockerContext := filepath.Join("testdata", "restorer") testImageDockerContext := filepath.Join("testdata", "restorer")
restoreTest = NewPhaseTest(t, "restorer", testImageDockerContext) restoreTest = NewPhaseTest(t, "restorer", testImageDockerContext)
restoreTest.Start(t, updateTOMLFixturesWithTestRegistry) restoreTest.Start(t, updateTOMLFixturesWithTestRegistry)
@ -161,7 +165,7 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
h.AssertPathDoesNotExist(t, uncachedFile) h.AssertPathDoesNotExist(t, uncachedFile)
}) })
it("does not restore layer data from unused buildpacks", func() { it("does not restore unused buildpack layer data", func() {
h.DockerRunAndCopy(t, h.DockerRunAndCopy(t,
containerName, containerName,
copyDir, copyDir,
@ -175,21 +179,6 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe
unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack") unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack")
h.AssertPathDoesNotExist(t, unusedBpLayer) h.AssertPathDoesNotExist(t, unusedBpLayer)
}) })
it("does not restore corrupted layer data", func() {
h.DockerRunAndCopy(t,
containerName,
copyDir,
"/layers",
restoreImage,
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
h.WithArgs("-cache-dir", "/cache"),
)
// check corrupted layer is not restored
corruptedFile := filepath.Join(copyDir, "layers", "corrupted_buildpack", "corrupted-layer")
h.AssertPathDoesNotExist(t, corruptedFile)
})
}) })
}) })

View File

@ -0,0 +1,12 @@
FROM mcr.microsoft.com/windows/nanoserver:1809
USER ContainerAdministrator
COPY container /
WORKDIR /layers
ENV CNB_USER_ID=1
ENV CNB_GROUP_ID=1
ENV CNB_PLATFORM_API=${cnb_platform_api}

View File

@ -0,0 +1,14 @@
FROM mcr.microsoft.com/windows/nanoserver:1809
USER ContainerAdministrator
COPY container /
ENTRYPOINT ["/cnb/lifecycle/builder"]
WORKDIR /layers
ENV CNB_USER_ID=1
ENV CNB_GROUP_ID=1
ENV CNB_PLATFORM_API=${cnb_platform_api}

View File

@ -1,4 +1,4 @@
FROM ubuntu:jammy FROM ubuntu:bionic
ARG cnb_uid=1234 ARG cnb_uid=1234
ARG cnb_gid=1000 ARG cnb_gid=1000

View File

@ -1,12 +1,6 @@
api = "0.9" api = "0.10"
[buildpack] [buildpack]
id = "simple_buildpack" id = "simple_buildpack"
version = "simple_buildpack_version" version = "simple_buildpack_version"
name = "Simple Buildpack" name = "Simple Buildpack"
[[stacks]]
id = "io.buildpacks.stacks.bionic"
[[stacks]]
id = "io.buildpacks.stacks.jammy"

View File

@ -1,17 +0,0 @@
{
"buildpacks": [
{
"key": "corrupted_buildpack",
"version": "corrupted_v1",
"layers": {
"corrupted-layer": {
"sha": "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59",
"data": null,
"build": false,
"launch": true,
"cache": true
}
}
}
]
}

View File

@ -1 +1 @@
sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944 sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494

View File

@ -1,3 +0,0 @@
[types]
cache = true
launch = true

View File

@ -1 +0,0 @@
digest-not-match-data

View File

@ -7,8 +7,3 @@
id = "cacher_buildpack" id = "cacher_buildpack"
version = "cacher_v1" version = "cacher_v1"
api = "0.8" api = "0.8"
[[group]]
id = "corrupted_buildpack"
version = "corrupted_v1"
api = "0.8"

View File

@ -1,4 +1,4 @@
FROM golang:1.24.6 as builder FROM golang:1.22 as builder
COPY exec.d/ /go/src/exec.d COPY exec.d/ /go/src/exec.d
RUN GO111MODULE=off go build -o helper ./src/exec.d RUN GO111MODULE=off go build -o helper ./src/exec.d

View File

@ -0,0 +1,16 @@
FROM golang:1.22-nanoserver-1809
COPY exec.d/ /go/src/exec.d
WORKDIR /go/src
ENV GO111MODULE=off
RUN go build -o helper.exe exec.d
COPY windows/container /
RUN mkdir c:\layers\0.9_buildpack\some_layer\exec.d\exec.d-checker
RUN copy helper.exe c:\layers\0.9_buildpack\some_layer\exec.d\helper.exe
RUN copy helper.exe c:\layers\0.9_buildpack\some_layer\exec.d\exec.d-checker\helper.exe
ENV PATH="c:\cnb\process;c:\cnb\lifecycle;C:\Windows\system32;C:\Windows;"
ENTRYPOINT ["c:\\cnb\\lifecycle\\launcher"]

View File

@ -1,4 +1,5 @@
//go:build unix //go:build linux || darwin
// +build linux darwin
package main package main

View File

@ -0,0 +1,10 @@
FROM mcr.microsoft.com/windows/nanoserver:1809
USER ContainerAdministrator
COPY container /
ENV CNB_USER_ID=1
ENV CNB_GROUP_ID=1
ENV CNB_PLATFORM_API=${cnb_platform_api}

View File

@ -8,10 +8,10 @@ ENV CNB_GROUP_ID=${cnb_gid}
COPY ./container/ / COPY ./container/ /
# We have to pre-create the tar files so that their digests do not change due to timestamps # turn /to_cache/<buildpack> directories into cache tarballs
# But, ':' in the filepath on Windows is not allowed # these are referenced by sha in /cache/committed/io.buildpacks.lifecycle.cache.metadata
RUN mv /cache/committed/sha256_2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar /cache/committed/sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar RUN tar cvf /cache/committed/sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494.tar -C /to_cache/cacher_buildpack layers
RUN mv /cache/committed/sha256_430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar /cache/committed/sha256:430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar RUN tar cvf /cache/committed/sha256:58bafa1e79c8e44151141c95086beb37ca85b69578fc890bce33bb4c6c8e851f.tar -C /to_cache/unused_buildpack layers
ENTRYPOINT ["/cnb/lifecycle/restorer"] ENTRYPOINT ["/cnb/lifecycle/restorer"]

View File

@ -1,43 +1 @@
{ {"buildpacks":[{"key":"cacher_buildpack","version":"cacher_v1","layers":{"cached-layer":{"sha":"sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494","data":null,"build":false,"launch":false,"cache":true}}},{"key":"unused_buildpack","version":"v1","layers":{"cached-layer":{"sha":"sha256:58bafa1e79c8e44151141c95086beb37ca85b69578fc890bce33bb4c6c8e851f","data":null,"build":false,"launch":false,"cache":true}}}]}
"buildpacks": [
{
"key": "cacher_buildpack",
"version": "cacher_v1",
"layers": {
"cached-layer": {
"sha": "sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944",
"data": null,
"build": false,
"launch": false,
"cache": true
}
}
},
{
"key": "corrupted_buildpack",
"version": "corrupted_v1",
"layers": {
"corrupted-layer": {
"sha": "sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c",
"data": null,
"build": false,
"launch": false,
"cache": true
}
}
},
{
"key": "unused_buildpack",
"version": "v1",
"layers": {
"cached-layer": {
"sha": "sha256:430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78",
"data": null,
"build": false,
"launch": false,
"cache": true
}
}
}
]
}

View File

@ -1 +1 @@
sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944 sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494

View File

@ -8,11 +8,6 @@
version = "cacher_v1" version = "cacher_v1"
api = "0.10" api = "0.10"
[[group]]
id = "corrupted_buildpack"
version = "corrupted_v1"
api = "0.11"
[[group-extensions]] [[group-extensions]]
id = "some-extension-id" id = "some-extension-id"
version = "v1" version = "v1"

View File

@ -1,4 +1,5 @@
//go:build unix //go:build linux || darwin
// +build linux darwin
package acceptance package acceptance

View File

@ -0,0 +1,30 @@
package acceptance
import (
"path"
"path/filepath"
"strings"
)
const (
containerBaseImage = "mcr.microsoft.com/windows/nanoserver:1809"
containerBaseImageFull = "mcr.microsoft.com/windows/nanoserver:1809"
dockerfileName = "Dockerfile.windows"
exe = ".exe"
execDBpDir = "0.9_buildpack"
)
var dockerSocketMount = []string{
"--mount", `type=npipe,source=\\.\pipe\docker_engine,target=\\.\pipe\docker_engine`,
"--user", "ContainerAdministrator",
}
// ctrPath equivalent to path.Join but converts to Windows slashes and drive prefix when needed
func ctrPath(unixPathParts ...string) string {
unixPath := path.Join(unixPathParts...)
windowsPath := filepath.FromSlash(unixPath)
if strings.HasPrefix(windowsPath, `\`) {
return "c:" + windowsPath
}
return windowsPath
}

View File

@ -11,7 +11,7 @@ var (
// Platform is a pair of lists of Platform API versions: // Platform is a pair of lists of Platform API versions:
// 1. All supported versions (including deprecated versions) // 1. All supported versions (including deprecated versions)
// 2. The versions that are deprecated // 2. The versions that are deprecated
Platform = newApisMustParse([]string{"0.7", "0.8", "0.9", "0.10", "0.11", "0.12", "0.13", "0.14"}, []string{}) Platform = newApisMustParse([]string{"0.7", "0.8", "0.9", "0.10", "0.11", "0.12", "0.13"}, []string{})
// Buildpack is a pair of lists of Buildpack API versions: // Buildpack is a pair of lists of Buildpack API versions:
// 1. All supported versions (including deprecated versions) // 1. All supported versions (including deprecated versions)
// 2. The versions that are deprecated // 2. The versions that are deprecated

View File

@ -4,6 +4,7 @@ import (
"archive/tar" "archive/tar"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/sclevine/spec" "github.com/sclevine/spec"
@ -30,16 +31,31 @@ func testExtract(t *testing.T, when spec.G, it spec.S) {
it.Before(func() { it.Before(func() {
tr, tmpDir = newFakeTarReader(t) tr, tmpDir = newFakeTarReader(t)
pathModes = []archive.PathMode{ // Golang for Windows only implements owner permissions
{"root", os.ModeDir + 0755}, if runtime.GOOS == "windows" {
{"root/readonly", os.ModeDir + 0500}, pathModes = []archive.PathMode{
{"root/readonly/readonlysub", os.ModeDir + 0500}, {`root`, os.ModeDir + 0777},
{"root/readonly/readonlysub/somefile", 0444}, {`root\readonly`, os.ModeDir + 0555},
{"root/standarddir", os.ModeDir + 0755}, {`root\readonly\readonlysub`, os.ModeDir + 0555},
{"root/standarddir/somefile", 0644}, {`root\readonly\readonlysub\somefile`, 0444},
{"root/nonexistdirnotintar", os.ModeDir + os.FileMode(int(os.ModePerm)&^originalUmask)}, //nolint {`root\standarddir`, os.ModeDir + 0777},
{"root/symlinkdir", os.ModeSymlink + 0777}, // symlink permissions are not preserved from archive {`root\standarddir\somefile`, 0666},
{"root/symlinkfile", os.ModeSymlink + 0777}, // symlink permissions are not preserved from archive {`root\nonexistdirnotintar`, os.ModeDir + 0777},
{`root\symlinkdir`, os.ModeSymlink + 0666},
{`root\symlinkfile`, os.ModeSymlink + 0666},
}
} else {
pathModes = []archive.PathMode{
{"root", os.ModeDir + 0755},
{"root/readonly", os.ModeDir + 0500},
{"root/readonly/readonlysub", os.ModeDir + 0500},
{"root/readonly/readonlysub/somefile", 0444},
{"root/standarddir", os.ModeDir + 0755},
{"root/standarddir/somefile", 0644},
{"root/nonexistdirnotintar", os.ModeDir + os.FileMode(int(os.ModePerm)&^originalUmask)},
{"root/symlinkdir", os.ModeSymlink + 0777}, // symlink permissions are not preserved from archive
{"root/symlinkfile", os.ModeSymlink + 0777}, // symlink permissions are not preserved from archive
}
} }
}) })
@ -94,7 +110,12 @@ func testExtract(t *testing.T, when spec.G, it spec.S) {
fileInfo, err := os.Stat(filepath.Join(tmpDir, "root")) fileInfo, err := os.Stat(filepath.Join(tmpDir, "root"))
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, fileInfo.Mode(), os.ModeDir+0744) if runtime.GOOS != "windows" {
h.AssertEq(t, fileInfo.Mode(), os.ModeDir+0744)
} else {
// Golang for Windows only implements owner permissions
h.AssertEq(t, fileInfo.Mode(), os.ModeDir+0777)
}
}) })
}) })
} }

View File

@ -3,6 +3,7 @@ package archive_test
import ( import (
"archive/tar" "archive/tar"
"io" "io"
"runtime"
"testing" "testing"
"github.com/sclevine/spec" "github.com/sclevine/spec"
@ -32,7 +33,11 @@ func testNormalizingTarReader(t *testing.T, when spec.G, it spec.S) {
it("converts path separators", func() { it("converts path separators", func() {
hdr, err := ntr.Next() hdr, err := ntr.Next()
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, hdr.Name, `/some/path`) if runtime.GOOS == "windows" {
h.AssertEq(t, hdr.Name, `\some\path`)
} else {
h.AssertEq(t, hdr.Name, `/some/path`)
}
}) })
when("#Strip", func() { when("#Strip", func() {
@ -40,7 +45,11 @@ func testNormalizingTarReader(t *testing.T, when spec.G, it spec.S) {
ntr.Strip("/some") ntr.Strip("/some")
hdr, err := ntr.Next() hdr, err := ntr.Next()
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, hdr.Name, `/path`) if runtime.GOOS == "windows" {
h.AssertEq(t, hdr.Name, `\path`)
} else {
h.AssertEq(t, hdr.Name, `/path`)
}
}) })
}) })
@ -49,7 +58,11 @@ func testNormalizingTarReader(t *testing.T, when spec.G, it spec.S) {
ntr.PrependDir("/super-dir") ntr.PrependDir("/super-dir")
hdr, err := ntr.Next() hdr, err := ntr.Next()
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, hdr.Name, `/super-dir/some/path`) if runtime.GOOS == "windows" {
h.AssertEq(t, hdr.Name, `\super-dir\some\path`)
} else {
h.AssertEq(t, hdr.Name, `/super-dir/some/path`)
}
}) })
}) })
@ -60,7 +73,11 @@ func testNormalizingTarReader(t *testing.T, when spec.G, it spec.S) {
ntr.ExcludePaths([]string{"excluded-dir"}) ntr.ExcludePaths([]string{"excluded-dir"})
hdr, err := ntr.Next() hdr, err := ntr.Next()
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertEq(t, hdr.Name, `/some/path`) if runtime.GOOS == "windows" {
h.AssertEq(t, hdr.Name, `\some\path`)
} else {
h.AssertEq(t, hdr.Name, `/some/path`)
}
}) })
}) })
}) })

View File

@ -1,4 +1,5 @@
//go:build unix //go:build linux || darwin
// +build linux darwin
package archive package archive

View File

@ -2,6 +2,7 @@ package archive_test
import ( import (
"archive/tar" "archive/tar"
"runtime"
"testing" "testing"
"time" "time"
@ -37,6 +38,21 @@ func testNormalizingTarWriter(t *testing.T, when spec.G, it spec.S) {
h.AssertEq(t, ftw.getLastHeader().Gname, "") h.AssertEq(t, ftw.getLastHeader().Gname, "")
}) })
when("windows", func() {
it.Before(func() {
if runtime.GOOS != "windows" {
t.Skip("windows specific test")
}
})
it("converts path separators", func() {
h.AssertNil(t, ntw.WriteHeader(&tar.Header{
Name: `c:\some\file\path`,
}))
h.AssertEq(t, ftw.getLastHeader().Name, "/some/file/path")
})
})
when("#WithUID", func() { when("#WithUID", func() {
it("sets the uid", func() { it("sets the uid", func() {
ntw.WithUID(999) ntw.WithUID(999)

View File

@ -38,15 +38,13 @@ func DefaultKeychain(images ...string) (authn.Keychain, error) {
return nil, err return nil, err
} }
keychains := []authn.Keychain{ keychains := []authn.Keychain{envKeychain, authn.DefaultKeychain}
envKeychain,
NewResolvedKeychain(authn.DefaultKeychain, images...),
}
if vendorKeychainEnabled("amazon") { if vendorKeychainEnabled("amazon") {
keychains = append(keychains, NewResolvedKeychain(amazonKeychain, images...)) keychains = append(keychains, amazonKeychain)
} }
if vendorKeychainEnabled("azure") { if vendorKeychainEnabled("azure") {
keychains = append(keychains, NewResolvedKeychain(azureKeychain, images...)) keychains = append(keychains, azureKeychain)
} }
return authn.NewMultiKeychain( return authn.NewMultiKeychain(

View File

@ -8,7 +8,6 @@ import (
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/internal/encoding" "github.com/buildpacks/lifecycle/internal/encoding"
) )
@ -18,7 +17,7 @@ type BpDescriptor struct {
Order Order `toml:"order"` Order Order `toml:"order"`
WithRootDir string `toml:"-"` WithRootDir string `toml:"-"`
Targets []TargetMetadata `toml:"targets"` Targets []TargetMetadata `toml:"targets"`
Stacks []StackMetadata `toml:"stacks"` // just for backwards compat so we can check if it's the bionic stack, which we translate to a target Stacks []StackMetadata `tome:"stacks"` // just for backwards compat so we can check if it's the bionic stack, which we translate to a target
} }
@ -70,9 +69,7 @@ func ReadBpDescriptor(path string) (*BpDescriptor, error) {
if len(descriptor.Targets) == 0 { if len(descriptor.Targets) == 0 {
for _, stack := range descriptor.Stacks { for _, stack := range descriptor.Stacks {
if stack.ID == "io.buildpacks.stacks.bionic" { if stack.ID == "io.buildpacks.stacks.bionic" {
if api.MustParse(descriptor.API()).AtLeast("0.10") || len(descriptor.Stacks) == 1 { descriptor.Targets = append(descriptor.Targets, TargetMetadata{OS: "linux", Arch: "amd64", Distros: []OSDistro{{Name: "ubuntu", Version: "18.04"}}})
descriptor.Targets = append(descriptor.Targets, TargetMetadata{OS: "linux", Arch: "amd64", Distros: []OSDistro{{Name: "ubuntu", Version: "18.04"}}})
}
} else if stack.ID == "*" { } else if stack.ID == "*" {
descriptor.Targets = append(descriptor.Targets, TargetMetadata{}) // matches any descriptor.Targets = append(descriptor.Targets, TargetMetadata{}) // matches any
} }
@ -87,8 +84,11 @@ func ReadBpDescriptor(path string) (*BpDescriptor, error) {
return &BpDescriptor{}, err return &BpDescriptor{}, err
} }
for i := 0; i < len(binFiles); i++ { for i := 0; i < len(binFiles); i++ {
bf := binFiles[len(binFiles)-i-1] bf := binFiles[len(binFiles)-i-1] // we're iterating backwards b/c os.ReadDir sorts "build.exe" after "build" but we want to preferentially detect windows first.
fname := bf.Name() fname := bf.Name()
if fname == "build.exe" || fname == "build.bat" {
descriptor.Targets = append(descriptor.Targets, TargetMetadata{OS: "windows"})
}
if fname == "build" { if fname == "build" {
descriptor.Targets = append(descriptor.Targets, TargetMetadata{OS: "linux"}) descriptor.Targets = append(descriptor.Targets, TargetMetadata{OS: "linux"})
} }

View File

@ -81,140 +81,44 @@ func testBpDescriptor(t *testing.T, when spec.G, it spec.S) {
h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "V8.4-2L3") h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "V8.4-2L3")
}) })
when("translating stacks to targets", func() { it("does translate one special stack value into target values for older apis", func() {
when("older buildpacks", func() { path := filepath.Join("testdata", "buildpack", "by-id", "B", "v1", "buildpack.toml")
when("there is only bionic", func() { descriptor, err := buildpack.ReadBpDescriptor(path)
it("creates a target", func() { h.AssertNil(t, err)
path := filepath.Join("testdata", "buildpack", "by-id", "B", "v1", "buildpack.toml") // common sanity checks
descriptor, err := buildpack.ReadBpDescriptor(path) h.AssertEq(t, descriptor.WithAPI, "0.7")
h.AssertNil(t, err) h.AssertEq(t, descriptor.Buildpack.ID, "B")
// common sanity checks h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B")
h.AssertEq(t, descriptor.WithAPI, "0.7") h.AssertEq(t, descriptor.Buildpack.Version, "v1")
h.AssertEq(t, descriptor.Buildpack.ID, "B") h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B") h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
h.AssertEq(t, descriptor.Buildpack.Version, "v1") // specific behaviors for this test
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage") h.AssertEq(t, descriptor.Stacks[0].ID, "io.buildpacks.stacks.bionic")
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"}) h.AssertEq(t, len(descriptor.Targets), 1)
// specific behaviors for this test h.AssertEq(t, descriptor.Targets[0].Arch, "amd64")
h.AssertEq(t, descriptor.Stacks[0].ID, "io.buildpacks.stacks.bionic") h.AssertEq(t, descriptor.Targets[0].OS, "linux")
h.AssertEq(t, len(descriptor.Targets), 1) h.AssertEq(t, descriptor.Targets[0].Distros[0].Name, "ubuntu")
h.AssertEq(t, descriptor.Targets[0].Arch, "amd64") h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "18.04")
h.AssertEq(t, descriptor.Targets[0].OS, "linux") })
h.AssertEq(t, descriptor.Targets[0].Distros[0].Name, "ubuntu")
h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "18.04")
})
})
when("there are multiple stacks", func() { it("translates one special stack value into target values", func() {
it("does NOT create a target", func() { path := filepath.Join("testdata", "buildpack", "by-id", "B", "v2", "buildpack.toml")
path := filepath.Join("testdata", "buildpack", "by-id", "B", "v1.2", "buildpack.toml") descriptor, err := buildpack.ReadBpDescriptor(path)
descriptor, err := buildpack.ReadBpDescriptor(path) h.AssertNil(t, err)
h.AssertNil(t, err) // common sanity checks
// common sanity checks h.AssertEq(t, descriptor.WithAPI, "0.12")
h.AssertEq(t, descriptor.WithAPI, "0.7") h.AssertEq(t, descriptor.Buildpack.ID, "B")
h.AssertEq(t, descriptor.Buildpack.ID, "B") h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B") h.AssertEq(t, descriptor.Buildpack.Version, "v1")
h.AssertEq(t, descriptor.Buildpack.Version, "v1.2") h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage")
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage") h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"}) // specific behaviors for this test
// specific behaviors for this test h.AssertEq(t, descriptor.Stacks[0].ID, "io.buildpacks.stacks.bionic")
h.AssertEq(t, descriptor.Stacks[0].ID, "io.buildpacks.stacks.bionic") h.AssertEq(t, len(descriptor.Targets), 1)
h.AssertEq(t, len(descriptor.Targets), 0) h.AssertEq(t, descriptor.Targets[0].Arch, "amd64")
}) h.AssertEq(t, descriptor.Targets[0].OS, "linux")
}) h.AssertEq(t, descriptor.Targets[0].Distros[0].Name, "ubuntu")
h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "18.04")
when("there is a wildcard stack", func() {
it("creates a wildcard target", func() {
path := filepath.Join("testdata", "buildpack", "by-id", "B", "v1.star", "buildpack.toml")
descriptor, err := buildpack.ReadBpDescriptor(path)
h.AssertNil(t, err)
// common sanity checks
h.AssertEq(t, descriptor.WithAPI, "0.7")
h.AssertEq(t, descriptor.Buildpack.ID, "B")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B")
h.AssertEq(t, descriptor.Buildpack.Version, "v1.star")
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage")
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
// specific behaviors for this test
h.AssertEq(t, descriptor.Stacks[0].ID, "*")
h.AssertEq(t, len(descriptor.Targets), 1)
// a target that is completely empty will always match whatever is the base image target
h.AssertEq(t, descriptor.Targets[0].Arch, "")
h.AssertEq(t, descriptor.Targets[0].OS, "")
h.AssertEq(t, descriptor.Targets[0].ArchVariant, "")
h.AssertEq(t, len(descriptor.Targets[0].Distros), 0)
})
})
})
when("newer buildpacks", func() {
when("there is only bionic", func() {
it("creates a target", func() {
path := filepath.Join("testdata", "buildpack", "by-id", "B", "v2", "buildpack.toml")
descriptor, err := buildpack.ReadBpDescriptor(path)
h.AssertNil(t, err)
// common sanity checks
h.AssertEq(t, descriptor.WithAPI, "0.12")
h.AssertEq(t, descriptor.Buildpack.ID, "B")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B")
h.AssertEq(t, descriptor.Buildpack.Version, "v2")
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage")
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
// specific behaviors for this test
h.AssertEq(t, descriptor.Stacks[0].ID, "io.buildpacks.stacks.bionic")
h.AssertEq(t, len(descriptor.Targets), 1)
h.AssertEq(t, descriptor.Targets[0].Arch, "amd64")
h.AssertEq(t, descriptor.Targets[0].OS, "linux")
h.AssertEq(t, descriptor.Targets[0].Distros[0].Name, "ubuntu")
h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "18.04")
})
})
when("there are multiple stacks", func() {
it("creates a target", func() {
path := filepath.Join("testdata", "buildpack", "by-id", "B", "v2.2", "buildpack.toml")
descriptor, err := buildpack.ReadBpDescriptor(path)
h.AssertNil(t, err)
// common sanity checks
h.AssertEq(t, descriptor.WithAPI, "0.12")
h.AssertEq(t, descriptor.Buildpack.ID, "B")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B")
h.AssertEq(t, descriptor.Buildpack.Version, "v2.2")
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage")
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
// specific behaviors for this test
h.AssertEq(t, descriptor.Stacks[0].ID, "io.buildpacks.stacks.bionic")
h.AssertEq(t, len(descriptor.Targets), 1)
h.AssertEq(t, descriptor.Targets[0].Arch, "amd64")
h.AssertEq(t, descriptor.Targets[0].OS, "linux")
h.AssertEq(t, descriptor.Targets[0].Distros[0].Name, "ubuntu")
h.AssertEq(t, descriptor.Targets[0].Distros[0].Version, "18.04")
})
})
when("there is a wildcard stack", func() {
it("creates a wildcard target", func() {
path := filepath.Join("testdata", "buildpack", "by-id", "B", "v2.star", "buildpack.toml")
descriptor, err := buildpack.ReadBpDescriptor(path)
h.AssertNil(t, err)
// common sanity checks
h.AssertEq(t, descriptor.WithAPI, "0.12")
h.AssertEq(t, descriptor.Buildpack.ID, "B")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack B")
h.AssertEq(t, descriptor.Buildpack.Version, "v2.star")
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack B Homepage")
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
// specific behaviors for this test
h.AssertEq(t, descriptor.Stacks[0].ID, "*")
h.AssertEq(t, len(descriptor.Targets), 1)
// a target that is completely empty will always match whatever is the base image target
h.AssertEq(t, descriptor.Targets[0].Arch, "")
h.AssertEq(t, descriptor.Targets[0].OS, "")
h.AssertEq(t, descriptor.Targets[0].ArchVariant, "")
h.AssertEq(t, len(descriptor.Targets[0].Distros), 0)
})
})
})
}) })
it("does not translate non-special stack values", func() { it("does not translate non-special stack values", func() {
@ -252,5 +156,24 @@ func testBpDescriptor(t *testing.T, when spec.G, it spec.S) {
h.AssertEq(t, descriptor.Targets[0].OS, "linux") h.AssertEq(t, descriptor.Targets[0].OS, "linux")
h.AssertEq(t, len(descriptor.Targets[0].Distros), 0) h.AssertEq(t, len(descriptor.Targets[0].Distros), 0)
}) })
it("detects windows/* if batch files are present and ignores linux", func() {
path := filepath.Join("testdata", "buildpack", "by-id", "A", "v1", "buildpack.toml")
descriptor, err := buildpack.ReadBpDescriptor(path)
h.AssertNil(t, err)
// common sanity assertions
h.AssertEq(t, descriptor.WithAPI, "0.7")
h.AssertEq(t, descriptor.Buildpack.ID, "A")
h.AssertEq(t, descriptor.Buildpack.Name, "Buildpack A")
h.AssertEq(t, descriptor.Buildpack.Version, "v1")
h.AssertEq(t, descriptor.Buildpack.Homepage, "Buildpack A Homepage")
h.AssertEq(t, descriptor.Buildpack.SBOM, []string{"application/vnd.cyclonedx+json"})
// specific behaviors for this test
h.AssertEq(t, len(descriptor.Targets), 2)
h.AssertEq(t, descriptor.Targets[0].Arch, "")
h.AssertEq(t, descriptor.Targets[0].OS, "windows")
h.AssertEq(t, descriptor.Targets[1].Arch, "")
h.AssertEq(t, descriptor.Targets[1].OS, "linux")
})
}) })
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/buildpacks/lifecycle/api" "github.com/buildpacks/lifecycle/api"
"github.com/buildpacks/lifecycle/env" "github.com/buildpacks/lifecycle/env"
"github.com/buildpacks/lifecycle/internal/encoding" "github.com/buildpacks/lifecycle/internal/encoding"
"github.com/buildpacks/lifecycle/internal/fsutil"
"github.com/buildpacks/lifecycle/launch" "github.com/buildpacks/lifecycle/launch"
"github.com/buildpacks/lifecycle/layers" "github.com/buildpacks/lifecycle/layers"
"github.com/buildpacks/lifecycle/log" "github.com/buildpacks/lifecycle/log"
@ -93,7 +94,7 @@ func (e *DefaultBuildExecutor) Build(d BpDescriptor, inputs BuildInputs, logger
} }
logger.Debug("Updating environment") logger.Debug("Updating environment")
if err := d.setupEnv(bpLayersDir, createdLayers, inputs.Env); err != nil { if err := d.setupEnv(createdLayers, inputs.Env); err != nil {
return BuildOutputs{}, err return BuildOutputs{}, err
} }
@ -132,7 +133,7 @@ func runBuildCmd(d BpDescriptor, bpLayersDir, planPath string, inputs BuildInput
) // #nosec G204 ) // #nosec G204
cmd.Dir = inputs.AppDir cmd.Dir = inputs.AppDir
cmd.Stdout = inputs.Out cmd.Stdout = inputs.Out
cmd.Stderr = inputs.Out cmd.Stderr = inputs.Err
var err error var err error
if d.Buildpack.ClearEnv { if d.Buildpack.ClearEnv {
@ -161,72 +162,68 @@ func runBuildCmd(d BpDescriptor, bpLayersDir, planPath string, inputs BuildInput
return nil return nil
} }
func (d BpDescriptor) processLayers(bpLayersDir string, logger log.Logger) (map[string]LayerMetadataFile, error) { func (d BpDescriptor) processLayers(layersDir string, logger log.Logger) (map[string]LayerMetadataFile, error) {
bpLayers := make(map[string]LayerMetadataFile) return eachLayer(layersDir, d.WithAPI, func(path, buildpackAPI string) (LayerMetadataFile, error) {
if err := eachLayer(bpLayersDir, func(layerPath string) error { layerMetadataFile, err := DecodeLayerMetadataFile(path+".toml", buildpackAPI, logger)
layerFile, err := DecodeLayerMetadataFile(layerPath+".toml", d.WithAPI, logger)
if err != nil { if err != nil {
return fmt.Errorf("failed to decode layer metadata file: %w", err) return LayerMetadataFile{}, err
} }
if err = renameLayerDirIfNeeded(layerFile, layerPath); err != nil { if err := renameLayerDirIfNeeded(layerMetadataFile, path); err != nil {
return fmt.Errorf("failed to rename layer directory: %w", err) return LayerMetadataFile{}, err
} }
bpLayers[layerPath] = layerFile return layerMetadataFile, nil
return nil })
}); err != nil {
return nil, fmt.Errorf("failed to process buildpack layer: %w", err)
}
return bpLayers, nil
} }
func eachLayer(bpLayersDir string, fn func(layerPath string) error) error { func eachLayer(bpLayersDir, buildpackAPI string, fn func(path, api string) (LayerMetadataFile, error)) (map[string]LayerMetadataFile, error) {
files, err := os.ReadDir(bpLayersDir) files, err := os.ReadDir(bpLayersDir)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil return map[string]LayerMetadataFile{}, nil
} else if err != nil { } else if err != nil {
return err return map[string]LayerMetadataFile{}, err
} }
bpLayers := map[string]LayerMetadataFile{}
for _, f := range files { for _, f := range files {
if f.IsDir() || !strings.HasSuffix(f.Name(), ".toml") { if f.IsDir() || !strings.HasSuffix(f.Name(), ".toml") {
continue continue
} }
path := filepath.Join(bpLayersDir, strings.TrimSuffix(f.Name(), ".toml")) path := filepath.Join(bpLayersDir, strings.TrimSuffix(f.Name(), ".toml"))
if err = fn(path); err != nil { layerMetadataFile, err := fn(path, buildpackAPI)
return err if err != nil {
return map[string]LayerMetadataFile{}, err
} }
bpLayers[path] = layerMetadataFile
} }
return nil return bpLayers, nil
} }
func renameLayerDirIfNeeded(layerMetadataFile LayerMetadataFile, layerDir string) error { func renameLayerDirIfNeeded(layerMetadataFile LayerMetadataFile, layerDir string) error {
// rename <layers>/<layer> to <layers>/<layer>.ignore if all the types flags are set to false // rename <layers>/<layer> to <layers>/<layer>.ignore if all the types flags are set to false
if !layerMetadataFile.Launch && !layerMetadataFile.Cache && !layerMetadataFile.Build { if !layerMetadataFile.Launch && !layerMetadataFile.Cache && !layerMetadataFile.Build {
if err := os.Rename(layerDir, layerDir+".ignore"); err != nil && !os.IsNotExist(err) { if err := fsutil.RenameWithWindowsFallback(layerDir, layerDir+".ignore"); err != nil && !os.IsNotExist(err) {
return err return err
} }
} }
return nil return nil
} }
func (d BpDescriptor) setupEnv(bpLayersDir string, createdLayers map[string]LayerMetadataFile, buildEnv BuildEnv) error { func (d BpDescriptor) setupEnv(createdLayers map[string]LayerMetadataFile, buildEnv BuildEnv) error {
bpAPI := api.MustParse(d.WithAPI) bpAPI := api.MustParse(d.WithAPI)
return eachLayer(bpLayersDir, func(layerPath string) error { for path, layerMetadataFile := range createdLayers {
var err error
layerMetadataFile, ok := createdLayers[layerPath]
if !ok {
return fmt.Errorf("failed to find layer metadata for %s", layerPath)
}
if !layerMetadataFile.Build { if !layerMetadataFile.Build {
return nil continue
} }
if err = buildEnv.AddRootDir(layerPath); err != nil { if err := buildEnv.AddRootDir(path); err != nil {
return err return err
} }
if err = buildEnv.AddEnvDir(filepath.Join(layerPath, "env"), env.DefaultActionType(bpAPI)); err != nil { if err := buildEnv.AddEnvDir(filepath.Join(path, "env"), env.DefaultActionType(bpAPI)); err != nil {
return err return err
} }
return buildEnv.AddEnvDir(filepath.Join(layerPath, "env.build"), env.DefaultActionType(bpAPI)) if err := buildEnv.AddEnvDir(filepath.Join(path, "env.build"), env.DefaultActionType(bpAPI)); err != nil {
}) return err
}
}
return nil
} }
func (d BpDescriptor) readOutputFilesBp(bpLayersDir, bpPlanPath string, bpPlanIn Plan, bpLayers map[string]LayerMetadataFile, logger log.Logger) (BuildOutputs, error) { func (d BpDescriptor) readOutputFilesBp(bpLayersDir, bpPlanPath string, bpPlanIn Plan, bpLayers map[string]LayerMetadataFile, logger log.Logger) (BuildOutputs, error) {

View File

@ -291,9 +291,12 @@ func testBuild(t *testing.T, when spec.G, it spec.S) {
if _, err := executor.Build(descriptor, inputs, logger); err != nil { if _, err := executor.Build(descriptor, inputs, logger); err != nil {
t.Fatalf("Unexpected error:\n%s\n", err) t.Fatalf("Unexpected error:\n%s\n", err)
} }
if s := cmp.Diff(h.CleanEndings(stdout.String()), "build out: A@v1\nbuild err: A@v1\n"); s != "" { if s := cmp.Diff(h.CleanEndings(stdout.String()), "build out: A@v1\n"); s != "" {
t.Fatalf("Unexpected stdout:\n%s\n", s) t.Fatalf("Unexpected stdout:\n%s\n", s)
} }
if s := cmp.Diff(h.CleanEndings(stderr.String()), "build err: A@v1\n"); s != "" {
t.Fatalf("Unexpected stderr:\n%s\n", s)
}
}) })
when("modifying the env fails", func() { when("modifying the env fails", func() {
@ -938,7 +941,7 @@ func testBuild(t *testing.T, when spec.G, it spec.S) {
filepath.Join(appDir, "launch-A-v1.toml"), filepath.Join(appDir, "launch-A-v1.toml"),
) )
_, err := executor.Build(descriptor, inputs, logger) _, err := executor.Build(descriptor, inputs, logger)
h.AssertError(t, err, "toml: line 2 (last key \"processes.command\"): incompatible types: TOML value has type []any; destination has type string") h.AssertError(t, err, "toml: line 2 (last key \"processes.command\"): incompatible types: TOML value has type []interface {}; destination has type string")
}) })
}) })
}) })

View File

@ -4,6 +4,7 @@ import (
"errors" "errors"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/apex/log" "github.com/apex/log"
@ -294,6 +295,8 @@ func testDetect(t *testing.T, when spec.G, it spec.S) {
) )
it.Before(func() { it.Before(func() {
h.SkipIf(t, runtime.GOOS == "windows", "Image extensions are not supported for Windows builds")
descriptorPath = filepath.Join("testdata", "extension", "by-id", "A", "v1", "extension.toml") descriptorPath = filepath.Join("testdata", "extension", "by-id", "A", "v1", "extension.toml")
var err error var err error
descriptor, err = buildpack.ReadExtDescriptor(descriptorPath) descriptor, err = buildpack.ReadExtDescriptor(descriptorPath)

View File

@ -8,7 +8,6 @@ import (
"strings" "strings"
"github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/linter"
"github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/buildpacks/lifecycle/log" "github.com/buildpacks/lifecycle/log"
@ -75,7 +74,7 @@ func parseDockerfile(dockerfile string) ([]instructions.Stage, []instructions.Ar
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
stages, metaArgs, err := instructions.Parse(p.AST, &linter.Linter{}) stages, metaArgs, err := instructions.Parse(p.AST)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -43,10 +43,14 @@ func (d *ExtDescriptor) inferTargets() error {
if err != nil { if err != nil {
return err return err
} }
var linuxDetected bool var windowsDetected, linuxDetected bool
for i := 0; i < len(binFiles); i++ { // detect and generate files are optional for i := 0; i < len(binFiles); i++ { // detect and generate files are optional
bf := binFiles[len(binFiles)-i-1] bf := binFiles[len(binFiles)-i-1] // we're iterating backwards b/c os.ReadDir sorts "foo.exe" after "foo" but we want to preferentially detect windows first.
fname := bf.Name() fname := bf.Name()
if !windowsDetected && (fname == "detect.exe" || fname == "detect.bat" || fname == "generate.exe" || fname == "generate.bat") {
d.Targets = append(d.Targets, TargetMetadata{OS: "windows"})
windowsDetected = true
}
if !linuxDetected && (fname == "detect" || fname == "generate") { if !linuxDetected && (fname == "detect" || fname == "generate") {
d.Targets = append(d.Targets, TargetMetadata{OS: "linux"}) d.Targets = append(d.Targets, TargetMetadata{OS: "linux"})
linuxDetected = true linuxDetected = true

View File

@ -39,5 +39,13 @@ func testExtDescriptor(t *testing.T, when spec.G, it spec.S) {
h.AssertEq(t, descriptor.Targets[0].OS, "") h.AssertEq(t, descriptor.Targets[0].OS, "")
h.AssertEq(t, descriptor.Targets[0].Arch, "") h.AssertEq(t, descriptor.Targets[0].Arch, "")
}) })
it("slices, it dices, it even does windows", func() {
path := filepath.Join("testdata", "extension", "by-id", "D", "v1", "extension.toml")
descriptor, err := buildpack.ReadExtDescriptor(path)
h.AssertNil(t, err)
h.AssertEq(t, len(descriptor.Targets), 1)
h.AssertEq(t, descriptor.Targets[0].OS, "windows")
h.AssertEq(t, descriptor.Targets[0].Arch, "")
})
}) })
} }

View File

@ -88,7 +88,7 @@ func runGenerateCmd(d ExtDescriptor, extOutputDir, planPath string, inputs Gener
) // #nosec G204 ) // #nosec G204
cmd.Dir = inputs.AppDir cmd.Dir = inputs.AppDir
cmd.Stdout = inputs.Out cmd.Stdout = inputs.Out
cmd.Stderr = inputs.Out cmd.Stderr = inputs.Err
var err error var err error
if d.Extension.ClearEnv { if d.Extension.ClearEnv {

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"testing" "testing"
@ -23,7 +24,9 @@ import (
) )
func TestGenerate(t *testing.T) { func TestGenerate(t *testing.T) {
spec.Run(t, "unit-generate", testGenerate, spec.Report(report.Terminal{})) if runtime.GOOS != "windows" {
spec.Run(t, "unit-generate", testGenerate, spec.Report(report.Terminal{}))
}
} }
func testGenerate(t *testing.T, when spec.G, it spec.S) { func testGenerate(t *testing.T, when spec.G, it spec.S) {
@ -248,9 +251,12 @@ func testGenerate(t *testing.T, when spec.G, it spec.S) {
if _, err := executor.Generate(descriptor, inputs, logger); err != nil { if _, err := executor.Generate(descriptor, inputs, logger); err != nil {
t.Fatalf("Unexpected error:\n%s\n", err) t.Fatalf("Unexpected error:\n%s\n", err)
} }
if s := cmp.Diff(h.CleanEndings(stdout.String()), "build out: A@v1\nbuild err: A@v1\n"); s != "" { if s := cmp.Diff(h.CleanEndings(stdout.String()), "build out: A@v1\n"); s != "" {
t.Fatalf("Unexpected stdout:\n%s\n", s) t.Fatalf("Unexpected stdout:\n%s\n", s)
} }
if s := cmp.Diff(h.CleanEndings(stderr.String()), "build err: A@v1\n"); s != "" {
t.Fatalf("Unexpected stderr:\n%s\n", s)
}
}) })
it("errors when the command fails", func() { it("errors when the command fails", func() {

View File

@ -1,14 +0,0 @@
api = "0.7"
[buildpack]
id = "B"
name = "Buildpack B"
version = "v1.2"
homepage = "Buildpack B Homepage"
sbom-formats = ["application/vnd.cyclonedx+json"]
[[stacks]]
id = "io.buildpacks.stacks.bionic"
[[stacks]]
id = "io.buildpacks.stacks.jammy"

View File

@ -1,11 +0,0 @@
api = "0.7"
[buildpack]
id = "B"
name = "Buildpack B"
version = "v1.star"
homepage = "Buildpack B Homepage"
sbom-formats = ["application/vnd.cyclonedx+json"]
[[stacks]]
id = "*"

View File

@ -1,14 +0,0 @@
api = "0.12"
[buildpack]
id = "B"
name = "Buildpack B"
version = "v2.2"
homepage = "Buildpack B Homepage"
sbom-formats = ["application/vnd.cyclonedx+json"]
[[stacks]]
id = "io.buildpacks.stacks.bionic"
[[stacks]]
id = "io.buildpacks.stacks.jammy"

View File

@ -1,11 +0,0 @@
api = "0.12"
[buildpack]
id = "B"
name = "Buildpack B"
version = "v2.star"
homepage = "Buildpack B Homepage"
sbom-formats = ["application/vnd.cyclonedx+json"]
[[stacks]]
id = "*"

View File

@ -3,7 +3,7 @@ api = "0.12"
[buildpack] [buildpack]
id = "B" id = "B"
name = "Buildpack B" name = "Buildpack B"
version = "v2" version = "v1"
homepage = "Buildpack B Homepage" homepage = "Buildpack B Homepage"
sbom-formats = ["application/vnd.cyclonedx+json"] sbom-formats = ["application/vnd.cyclonedx+json"]

View File

@ -4,6 +4,8 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings"
"testing" "testing"
"github.com/buildpacks/imgutil" "github.com/buildpacks/imgutil"
@ -11,8 +13,6 @@ import (
"github.com/sclevine/spec" "github.com/sclevine/spec"
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/cache" "github.com/buildpacks/lifecycle/cache"
h "github.com/buildpacks/lifecycle/testhelpers" h "github.com/buildpacks/lifecycle/testhelpers"
) )
@ -37,7 +37,7 @@ func testCachingImage(t *testing.T, when spec.G, it spec.S) {
fakeImage = fakes.NewImage("some-image", "", nil) fakeImage = fakes.NewImage("some-image", "", nil)
tmpDir, err = os.MkdirTemp("", "") tmpDir, err = os.MkdirTemp("", "")
h.AssertNil(t, err) h.AssertNil(t, err)
volumeCache, err = cache.NewVolumeCache(tmpDir, cmd.DefaultLogger) volumeCache, err = cache.NewVolumeCache(tmpDir)
h.AssertNil(t, err) h.AssertNil(t, err)
subject = cache.NewCachingImage(fakeImage, volumeCache) subject = cache.NewCachingImage(fakeImage, volumeCache)
layerPath, layerSHA, layerData = h.RandomLayer(t, tmpDir) layerPath, layerSHA, layerData = h.RandomLayer(t, tmpDir)
@ -68,6 +68,9 @@ func testCachingImage(t *testing.T, when spec.G, it spec.S) {
h.AssertNil(t, err) h.AssertNil(t, err)
defer from.Close() defer from.Close()
if runtime.GOOS == "windows" {
layerSHA = strings.TrimPrefix(layerSHA, "sha256:")
}
to, err := os.Create(filepath.Join(tmpDir, "committed", layerSHA+".tar")) to, err := os.Create(filepath.Join(tmpDir, "committed", layerSHA+".tar"))
h.AssertNil(t, err) h.AssertNil(t, err)
defer to.Close() defer to.Close()

22
cache/common.go vendored
View File

@ -5,25 +5,3 @@ import (
) )
var errCacheCommitted = errors.New("cache cannot be modified after commit") var errCacheCommitted = errors.New("cache cannot be modified after commit")
// ReadErr is an error type for filesystem read errors.
type ReadErr struct {
msg string
}
// NewReadErr creates a new ReadErr.
func NewReadErr(msg string) ReadErr {
return ReadErr{msg: msg}
}
// Error returns the error message.
func (e ReadErr) Error() string {
return e.msg
}
// IsReadErr checks if an error is a ReadErr.
func IsReadErr(err error) (bool, *ReadErr) {
var e ReadErr
isReadErr := errors.As(err, &e)
return isReadErr, &e
}

39
cache/image_cache.go vendored
View File

@ -99,44 +99,15 @@ func (c *ImageCache) AddLayerFile(tarPath string, diffID string) error {
return c.newImage.AddLayerWithDiffID(tarPath, diffID) return c.newImage.AddLayerWithDiffID(tarPath, diffID)
} }
// isLayerNotFound checks if the error is a layer not found error
//
// FIXME: we should not have to rely on trapping ErrUnexpectedEOF.
// If a blob is not present in the registry, we should get imgutil.ErrLayerNotFound,
// but we do not and instead get io.ErrUnexpectedEOF
func isLayerNotFound(err error) bool {
var e imgutil.ErrLayerNotFound
return errors.As(err, &e) || errors.Is(err, io.ErrUnexpectedEOF)
}
func (c *ImageCache) ReuseLayer(diffID string) error { func (c *ImageCache) ReuseLayer(diffID string) error {
if c.committed { if c.committed {
return errCacheCommitted return errCacheCommitted
} }
err := c.newImage.ReuseLayer(diffID) return c.newImage.ReuseLayer(diffID)
if err != nil {
// FIXME: this path is not currently executed.
// If a blob is not present in the registry, we should get imgutil.ErrLayerNotFound.
// We should then skip attempting to reuse the layer.
// However, we do not get imgutil.ErrLayerNotFound when the blob is not present.
if isLayerNotFound(err) {
return NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID))
}
return fmt.Errorf("failed to reuse cache layer with SHA '%s'", diffID)
}
return nil
} }
// RetrieveLayer retrieves a layer from the cache
func (c *ImageCache) RetrieveLayer(diffID string) (io.ReadCloser, error) { func (c *ImageCache) RetrieveLayer(diffID string) (io.ReadCloser, error) {
closer, err := c.origImage.GetLayer(diffID) return c.origImage.GetLayer(diffID)
if err != nil {
if isLayerNotFound(err) {
return nil, NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID))
}
return nil, fmt.Errorf("failed to get cache layer with SHA '%s'", diffID)
}
return closer, nil
} }
func (c *ImageCache) Commit() error { func (c *ImageCache) Commit() error {
@ -158,9 +129,3 @@ func (c *ImageCache) Commit() error {
return nil return nil
} }
// VerifyLayer returns an error if the layer contents do not match the provided sha.
func (c *ImageCache) VerifyLayer(_ string) error {
// we assume the registry is verifying digests for us
return nil
}

View File

@ -146,7 +146,7 @@ func testImageCache(t *testing.T, when spec.G, it spec.S) {
when("layer does not exist", func() { when("layer does not exist", func() {
it("returns an error", func() { it("returns an error", func() {
_, err := subject.RetrieveLayer("some_nonexistent_sha") _, err := subject.RetrieveLayer("some_nonexistent_sha")
h.AssertError(t, err, "failed to get cache layer with SHA 'some_nonexistent_sha'") h.AssertError(t, err, "failed to get layer with sha 'some_nonexistent_sha'")
}) })
}) })
}) })
@ -236,7 +236,7 @@ func testImageCache(t *testing.T, when spec.G, it spec.S) {
h.AssertNil(t, subject.AddLayerFile(testLayerTarPath, testLayerSHA)) h.AssertNil(t, subject.AddLayerFile(testLayerTarPath, testLayerSHA))
_, err := subject.RetrieveLayer(testLayerSHA) _, err := subject.RetrieveLayer(testLayerSHA)
h.AssertError(t, err, fmt.Sprintf("failed to get cache layer with SHA '%s'", testLayerSHA)) h.AssertError(t, err, fmt.Sprintf("failed to get layer with sha '%s'", testLayerSHA))
}) })
}) })
}) })

View File

@ -9,7 +9,7 @@ import (
// ImageComparer provides a way to compare images // ImageComparer provides a way to compare images
type ImageComparer interface { type ImageComparer interface {
ImagesEq(origImage imgutil.Image, newImage imgutil.Image) (bool, error) ImagesEq(orig imgutil.Image, new imgutil.Image) (bool, error)
} }
// ImageComparerImpl implements the ImageComparer interface // ImageComparerImpl implements the ImageComparer interface

View File

@ -12,7 +12,6 @@ import (
// ImageDeleter defines the methods available to delete and compare cached images // ImageDeleter defines the methods available to delete and compare cached images
type ImageDeleter interface { type ImageDeleter interface {
DeleteOrigImageIfDifferentFromNewImage(origImage, newImage imgutil.Image) DeleteOrigImageIfDifferentFromNewImage(origImage, newImage imgutil.Image)
DeleteImage(image imgutil.Image)
} }
// ImageDeleterImpl is a component to manage cache image deletion // ImageDeleterImpl is a component to manage cache image deletion
@ -36,13 +35,13 @@ func (c *ImageDeleterImpl) DeleteOrigImageIfDifferentFromNewImage(origImage, new
} }
if !same { if !same {
c.DeleteImage(origImage) c.deleteImage(origImage)
} }
} }
} }
// DeleteImage deletes an image // deleteImage deletes an image
func (c *ImageDeleterImpl) DeleteImage(image imgutil.Image) { func (c *ImageDeleterImpl) deleteImage(image imgutil.Image) {
if c.deletionEnabled { if c.deletionEnabled {
if err := image.Delete(); err != nil { if err := image.Delete(); err != nil {
c.logger.Warnf("Unable to delete cache image: %v", err.Error()) c.logger.Warnf("Unable to delete cache image: %v", err.Error())

72
cache/volume_cache.go vendored
View File

@ -1,17 +1,15 @@
package cache package cache
import ( import (
"crypto/sha256"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/buildpacks/lifecycle/log"
"github.com/buildpacks/lifecycle/internal/fsutil" "github.com/buildpacks/lifecycle/internal/fsutil"
"github.com/buildpacks/lifecycle/platform" "github.com/buildpacks/lifecycle/platform"
) )
@ -22,11 +20,9 @@ type VolumeCache struct {
backupDir string backupDir string
stagingDir string stagingDir string
committedDir string committedDir string
logger log.Logger
} }
// NewVolumeCache creates a new VolumeCache func NewVolumeCache(dir string) (*VolumeCache, error) {
func NewVolumeCache(dir string, logger log.Logger) (*VolumeCache, error) {
if _, err := os.Stat(dir); err != nil { if _, err := os.Stat(dir); err != nil {
return nil, err return nil, err
} }
@ -36,7 +32,6 @@ func NewVolumeCache(dir string, logger log.Logger) (*VolumeCache, error) {
backupDir: filepath.Join(dir, "committed-backup"), backupDir: filepath.Join(dir, "committed-backup"),
stagingDir: filepath.Join(dir, "staging"), stagingDir: filepath.Join(dir, "staging"),
committedDir: filepath.Join(dir, "committed"), committedDir: filepath.Join(dir, "committed"),
logger: logger,
} }
if err := c.setupStagingDir(); err != nil { if err := c.setupStagingDir(); err != nil {
@ -138,17 +133,7 @@ func (c *VolumeCache) ReuseLayer(diffID string) error {
if c.committed { if c.committed {
return errCacheCommitted return errCacheCommitted
} }
committedPath := diffIDPath(c.committedDir, diffID) if err := os.Link(diffIDPath(c.committedDir, diffID), diffIDPath(c.stagingDir, diffID)); err != nil && !os.IsExist(err) {
stagingPath := diffIDPath(c.stagingDir, diffID)
if _, err := os.Stat(committedPath); err != nil {
if err = handleFileError(err, diffID); errors.Is(err, ReadErr{}) {
return err
}
return fmt.Errorf("failed to re-use cache layer with SHA '%s': %w", diffID, err)
}
if err := os.Link(committedPath, stagingPath); err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "reusing layer (%s)", diffID) return errors.Wrapf(err, "reusing layer (%s)", diffID)
} }
return nil return nil
@ -161,10 +146,7 @@ func (c *VolumeCache) RetrieveLayer(diffID string) (io.ReadCloser, error) {
} }
file, err := os.Open(path) file, err := os.Open(path)
if err != nil { if err != nil {
if err = handleFileError(err, diffID); errors.Is(err, ReadErr{}) { return nil, errors.Wrapf(err, "opening layer with SHA '%s'", diffID)
return nil, err
}
return nil, fmt.Errorf("failed to get cache layer with SHA '%s'", diffID)
} }
return file, nil return file, nil
} }
@ -182,8 +164,8 @@ func (c *VolumeCache) HasLayer(diffID string) (bool, error) {
func (c *VolumeCache) RetrieveLayerFile(diffID string) (string, error) { func (c *VolumeCache) RetrieveLayerFile(diffID string) (string, error) {
path := diffIDPath(c.committedDir, diffID) path := diffIDPath(c.committedDir, diffID)
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
if err = handleFileError(err, diffID); errors.Is(err, ReadErr{}) { if os.IsNotExist(err) {
return "", err return "", errors.Wrapf(err, "layer with SHA '%s' not found", diffID)
} }
return "", errors.Wrapf(err, "retrieving layer with SHA '%s'", diffID) return "", errors.Wrapf(err, "retrieving layer with SHA '%s'", diffID)
} }
@ -195,13 +177,13 @@ func (c *VolumeCache) Commit() error {
return errCacheCommitted return errCacheCommitted
} }
c.committed = true c.committed = true
if err := os.Rename(c.committedDir, c.backupDir); err != nil { if err := fsutil.RenameWithWindowsFallback(c.committedDir, c.backupDir); err != nil {
return errors.Wrap(err, "backing up cache") return errors.Wrap(err, "backing up cache")
} }
defer os.RemoveAll(c.backupDir) defer os.RemoveAll(c.backupDir)
if err1 := os.Rename(c.stagingDir, c.committedDir); err1 != nil { if err1 := fsutil.RenameWithWindowsFallback(c.stagingDir, c.committedDir); err1 != nil {
if err2 := os.Rename(c.backupDir, c.committedDir); err2 != nil { if err2 := fsutil.RenameWithWindowsFallback(c.backupDir, c.committedDir); err2 != nil {
return errors.Wrap(err2, "rolling back cache") return errors.Wrap(err2, "rolling back cache")
} }
return errors.Wrap(err1, "committing cache") return errors.Wrap(err1, "committing cache")
@ -211,6 +193,10 @@ func (c *VolumeCache) Commit() error {
} }
func diffIDPath(basePath, diffID string) string { func diffIDPath(basePath, diffID string) string {
if runtime.GOOS == "windows" {
// Avoid colons in Windows file paths
diffID = strings.TrimPrefix(diffID, "sha256:")
}
return filepath.Join(basePath, diffID+".tar") return filepath.Join(basePath, diffID+".tar")
} }
@ -220,33 +206,3 @@ func (c *VolumeCache) setupStagingDir() error {
} }
return os.MkdirAll(c.stagingDir, 0777) return os.MkdirAll(c.stagingDir, 0777)
} }
// VerifyLayer returns an error if the layer contents do not match the provided sha.
func (c *VolumeCache) VerifyLayer(diffID string) error {
layerRC, err := c.RetrieveLayer(diffID)
if err != nil {
return err
}
defer func() {
_ = layerRC.Close()
}()
hasher := sha256.New()
if _, err := io.Copy(hasher, layerRC); err != nil {
return errors.Wrap(err, "hashing layer")
}
foundDiffID := fmt.Sprintf("sha256:%x", hasher.Sum(nil))
if diffID != foundDiffID {
return NewReadErr(fmt.Sprintf("expected layer contents to have SHA '%s'; found '%s'", diffID, foundDiffID))
}
return err
}
func handleFileError(err error, diffID string) error {
if os.IsNotExist(err) {
return NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID))
}
if os.IsPermission(err) {
return NewReadErr(fmt.Sprintf("failed to read cache layer with SHA '%s' due to insufficient permissions", diffID))
}
return err
}

View File

@ -10,9 +10,6 @@ import (
"github.com/sclevine/spec" "github.com/sclevine/spec"
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/cmd"
"github.com/buildpacks/lifecycle/log"
"github.com/buildpacks/lifecycle/buildpack" "github.com/buildpacks/lifecycle/buildpack"
"github.com/buildpacks/lifecycle/cache" "github.com/buildpacks/lifecycle/cache"
"github.com/buildpacks/lifecycle/platform" "github.com/buildpacks/lifecycle/platform"
@ -31,7 +28,6 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
backupDir string backupDir string
stagingDir string stagingDir string
committedDir string committedDir string
testLogger log.Logger
) )
it.Before(func() { it.Before(func() {
@ -46,7 +42,6 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
backupDir = filepath.Join(volumeDir, "committed-backup") backupDir = filepath.Join(volumeDir, "committed-backup")
stagingDir = filepath.Join(volumeDir, "staging") stagingDir = filepath.Join(volumeDir, "staging")
committedDir = filepath.Join(volumeDir, "committed") committedDir = filepath.Join(volumeDir, "committed")
testLogger = cmd.DefaultLogger
}) })
it.After(func() { it.After(func() {
@ -55,7 +50,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
when("#NewVolumeCache", func() { when("#NewVolumeCache", func() {
it("returns an error when the volume path does not exist", func() { it("returns an error when the volume path does not exist", func() {
_, err := cache.NewVolumeCache(filepath.Join(tmpDir, "does_not_exist"), testLogger) _, err := cache.NewVolumeCache(filepath.Join(tmpDir, "does_not_exist"))
if err == nil { if err == nil {
t.Fatal("expected NewVolumeCache to fail because volume path does not exist") t.Fatal("expected NewVolumeCache to fail because volume path does not exist")
} }
@ -71,7 +66,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
it("clears staging", func() { it("clears staging", func() {
var err error var err error
subject, err = cache.NewVolumeCache(volumeDir, testLogger) subject, err = cache.NewVolumeCache(volumeDir)
h.AssertNil(t, err) h.AssertNil(t, err)
_, err = os.Stat(filepath.Join(stagingDir, "some-layer.tar")) _, err = os.Stat(filepath.Join(stagingDir, "some-layer.tar"))
@ -85,7 +80,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
it("creates staging dir", func() { it("creates staging dir", func() {
var err error var err error
subject, err = cache.NewVolumeCache(volumeDir, testLogger) subject, err = cache.NewVolumeCache(volumeDir)
h.AssertNil(t, err) h.AssertNil(t, err)
_, err = os.Stat(stagingDir) _, err = os.Stat(stagingDir)
@ -97,7 +92,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
it("creates committed dir", func() { it("creates committed dir", func() {
var err error var err error
subject, err = cache.NewVolumeCache(volumeDir, testLogger) subject, err = cache.NewVolumeCache(volumeDir)
h.AssertNil(t, err) h.AssertNil(t, err)
_, err = os.Stat(committedDir) _, err = os.Stat(committedDir)
@ -114,7 +109,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
it("clears the backup dir", func() { it("clears the backup dir", func() {
var err error var err error
subject, err = cache.NewVolumeCache(volumeDir, testLogger) subject, err = cache.NewVolumeCache(volumeDir)
h.AssertNil(t, err) h.AssertNil(t, err)
_, err = os.Stat(filepath.Join(backupDir, "some-layer.tar")) _, err = os.Stat(filepath.Join(backupDir, "some-layer.tar"))
@ -129,7 +124,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
it.Before(func() { it.Before(func() {
var err error var err error
subject, err = cache.NewVolumeCache(volumeDir, testLogger) subject, err = cache.NewVolumeCache(volumeDir)
h.AssertNil(t, err) h.AssertNil(t, err)
}) })
@ -211,7 +206,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
when("layer does not exist", func() { when("layer does not exist", func() {
it("returns an error", func() { it("returns an error", func() {
_, err := subject.RetrieveLayer("some_nonexistent_sha") _, err := subject.RetrieveLayer("some_nonexistent_sha")
h.AssertError(t, err, "failed to find cache layer with SHA 'some_nonexistent_sha'") h.AssertError(t, err, "layer with SHA 'some_nonexistent_sha' not found")
}) })
}) })
}) })
@ -235,7 +230,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
when("layer does not exist", func() { when("layer does not exist", func() {
it("returns an error", func() { it("returns an error", func() {
_, err := subject.RetrieveLayerFile("some_nonexistent_sha") _, err := subject.RetrieveLayerFile("some_nonexistent_sha")
h.AssertError(t, err, "failed to find cache layer with SHA 'some_nonexistent_sha'") h.AssertError(t, err, "layer with SHA 'some_nonexistent_sha' not found")
}) })
}) })
}) })
@ -345,7 +340,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
h.AssertNil(t, subject.AddLayerFile(tarPath, "some_sha")) h.AssertNil(t, subject.AddLayerFile(tarPath, "some_sha"))
_, err := subject.RetrieveLayer("some_sha") _, err := subject.RetrieveLayer("some_sha")
h.AssertError(t, err, "failed to find cache layer with SHA 'some_sha'") h.AssertError(t, err, "layer with SHA 'some_sha' not found")
}) })
}) })
@ -420,7 +415,7 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
h.AssertNil(t, subject.AddLayer(layerReader, layerSha)) h.AssertNil(t, subject.AddLayer(layerReader, layerSha))
_, err := subject.RetrieveLayer(layerSha) _, err := subject.RetrieveLayer(layerSha)
h.AssertError(t, err, fmt.Sprintf("failed to find cache layer with SHA '%s'", layerSha)) h.AssertError(t, err, fmt.Sprintf("layer with SHA '%s' not found", layerSha))
}) })
}) })
@ -512,21 +507,6 @@ func testVolumeCache(t *testing.T, when spec.G, it spec.S) {
h.AssertEq(t, string(bytes), "existing data") h.AssertEq(t, string(bytes), "existing data")
}) })
}) })
when("the layer does not exist", func() {
it("fails with a read error", func() {
err := subject.ReuseLayer("some_nonexistent_sha")
isReadErr, _ := cache.IsReadErr(err)
h.AssertEq(t, isReadErr, true)
err = subject.Commit()
h.AssertNil(t, err)
_, err = subject.RetrieveLayer("some_sha")
isReadErr, _ = cache.IsReadErr(err)
h.AssertEq(t, isReadErr, true)
})
})
}) })
when("attempting to commit more than once", func() { when("attempting to commit more than once", func() {

View File

@ -60,6 +60,6 @@ func Exit(err error) {
} }
func ExitWithVersion() { func ExitWithVersion() {
DefaultLogger.Info(buildVersion()) DefaultLogger.Infof(buildVersion())
os.Exit(0) os.Exit(0)
} }

View File

@ -19,8 +19,8 @@ import (
type analyzeCmd struct { type analyzeCmd struct {
*platform.Platform *platform.Platform
docker client.APIClient // construct if necessary before dropping privileges docker client.CommonAPIClient // construct if necessary before dropping privileges
keychain authn.Keychain // construct if necessary before dropping privileges keychain authn.Keychain // construct if necessary before dropping privileges
} }
// DefineFlags defines the flags that are considered valid and reads their values (if provided). // DefineFlags defines the flags that are considered valid and reads their values (if provided).

View File

@ -61,9 +61,6 @@ func Run(c Command, withPhaseName string, asSubcommand bool) {
// We print a warning here, so we should disable color if needed and set the log level before exercising this logic. // We print a warning here, so we should disable color if needed and set the log level before exercising this logic.
for _, arg := range flagSet.Args() { for _, arg := range flagSet.Args() {
if len(arg) == 0 {
continue
}
if arg[0:1] == "-" { if arg[0:1] == "-" {
cmd.DefaultLogger.Warnf("Warning: unconsumed flag-like positional arg: \n\t%s\n\t This will not be interpreted as a flag.\n\t Did you mean to put this before the first positional argument?", arg) cmd.DefaultLogger.Warnf("Warning: unconsumed flag-like positional arg: \n\t%s\n\t This will not be interpreted as a flag.\n\t Did you mean to put this before the first positional argument?", arg)
} }

View File

@ -23,8 +23,8 @@ import (
type createCmd struct { type createCmd struct {
*platform.Platform *platform.Platform
docker client.APIClient // construct if necessary before dropping privileges docker client.CommonAPIClient // construct if necessary before dropping privileges
keychain authn.Keychain // construct if necessary before dropping privileges keychain authn.Keychain // construct if necessary before dropping privileges
} }
// DefineFlags defines the flags that are considered valid and reads their values (if provided). // DefineFlags defines the flags that are considered valid and reads their values (if provided).

View File

@ -12,15 +12,15 @@ import (
"github.com/buildpacks/imgutil/layout" "github.com/buildpacks/imgutil/layout"
"github.com/buildpacks/imgutil/local" "github.com/buildpacks/imgutil/local"
"github.com/buildpacks/imgutil/remote" "github.com/buildpacks/imgutil/remote"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/buildpacks/lifecycle/log"
"github.com/buildpacks/lifecycle/auth" "github.com/buildpacks/lifecycle/auth"
"github.com/buildpacks/lifecycle/buildpack" "github.com/buildpacks/lifecycle/buildpack"
"github.com/buildpacks/lifecycle/cache" "github.com/buildpacks/lifecycle/cache"
@ -202,7 +202,7 @@ func (e *exportCmd) export(group buildpack.Group, cacheStore phase.Cache, analyz
case e.UseLayout: case e.UseLayout:
appImage, runImageID, err = e.initLayoutAppImage(analyzedMD) appImage, runImageID, err = e.initLayoutAppImage(analyzedMD)
case e.UseDaemon: case e.UseDaemon:
appImage, runImageID, err = e.initDaemonAppImage(analyzedMD, cmd.DefaultLogger) appImage, runImageID, err = e.initDaemonAppImage(analyzedMD)
default: default:
appImage, runImageID, err = e.initRemoteAppImage(analyzedMD) appImage, runImageID, err = e.initRemoteAppImage(analyzedMD)
} }
@ -260,8 +260,8 @@ func (e *exportCmd) export(group buildpack.Group, cacheStore phase.Cache, analyz
return nil return nil
} }
func (e *exportCmd) initDaemonAppImage(analyzedMD files.Analyzed, logger log.Logger) (imgutil.Image, string, error) { func (e *exportCmd) initDaemonAppImage(analyzedMD files.Analyzed) (imgutil.Image, string, error) {
var opts = []imgutil.ImageOption{ var opts = []local.ImageOption{
local.FromBaseImage(e.RunImageRef), local.FromBaseImage(e.RunImageRef),
} }
if e.supportsRunImageExtension() { if e.supportsRunImageExtension() {
@ -270,7 +270,7 @@ func (e *exportCmd) initDaemonAppImage(analyzedMD files.Analyzed, logger log.Log
return nil, "", cmd.FailErr(err, "get extended image config") return nil, "", cmd.FailErr(err, "get extended image config")
} }
if extendedConfig != nil { if extendedConfig != nil {
opts = append(opts, local.WithConfig(extendedConfig)) opts = append(opts, local.WithConfig(toContainerConfig(extendedConfig)))
} }
} }
@ -303,7 +303,7 @@ func (e *exportCmd) initDaemonAppImage(analyzedMD files.Analyzed, logger log.Log
} }
if e.LaunchCacheDir != "" { if e.LaunchCacheDir != "" {
volumeCache, err := cache.NewVolumeCache(e.LaunchCacheDir, logger) volumeCache, err := cache.NewVolumeCache(e.LaunchCacheDir)
if err != nil { if err != nil {
return nil, "", cmd.FailErr(err, "create launch cache") return nil, "", cmd.FailErr(err, "create launch cache")
} }
@ -312,8 +312,59 @@ func (e *exportCmd) initDaemonAppImage(analyzedMD files.Analyzed, logger log.Log
return appImage, runImageID.String(), nil return appImage, runImageID.String(), nil
} }
func toContainerConfig(v1C *v1.Config) *container.Config {
return &container.Config{
ArgsEscaped: v1C.ArgsEscaped,
AttachStderr: v1C.AttachStderr,
AttachStdin: v1C.AttachStdin,
AttachStdout: v1C.AttachStdout,
Cmd: v1C.Cmd,
Domainname: v1C.Domainname,
Entrypoint: v1C.Entrypoint,
Env: v1C.Env,
ExposedPorts: toNATPortSet(v1C.ExposedPorts),
Healthcheck: toHealthConfig(v1C.Healthcheck),
Hostname: v1C.Hostname,
Image: v1C.Image,
Labels: v1C.Labels,
MacAddress: v1C.MacAddress,
NetworkDisabled: v1C.NetworkDisabled,
OnBuild: v1C.OnBuild,
OpenStdin: v1C.OpenStdin,
Shell: v1C.Shell,
StdinOnce: v1C.StdinOnce,
StopSignal: v1C.StopSignal,
StopTimeout: nil,
Tty: v1C.Tty,
User: v1C.User,
Volumes: v1C.Volumes,
WorkingDir: v1C.WorkingDir,
}
}
func toHealthConfig(v1H *v1.HealthConfig) *container.HealthConfig {
if v1H == nil {
return &container.HealthConfig{}
}
return &container.HealthConfig{
Interval: v1H.Interval,
Retries: v1H.Retries,
StartPeriod: v1H.StartPeriod,
Test: v1H.Test,
Timeout: v1H.Timeout,
}
}
func toNATPortSet(v1Ps map[string]struct{}) nat.PortSet {
portSet := make(map[nat.Port]struct{})
for k, v := range v1Ps {
portSet[nat.Port(k)] = v
}
return portSet
}
func (e *exportCmd) initRemoteAppImage(analyzedMD files.Analyzed) (imgutil.Image, string, error) { func (e *exportCmd) initRemoteAppImage(analyzedMD files.Analyzed) (imgutil.Image, string, error) {
var appOpts = []imgutil.ImageOption{ var opts = []remote.ImageOption{
remote.FromBaseImage(e.RunImageRef), remote.FromBaseImage(e.RunImageRef),
} }
@ -324,57 +375,43 @@ func (e *exportCmd) initRemoteAppImage(analyzedMD files.Analyzed) (imgutil.Image
} }
if extendedConfig != nil { if extendedConfig != nil {
cmd.DefaultLogger.Debugf("Using config from extensions...") cmd.DefaultLogger.Debugf("Using config from extensions...")
appOpts = append(appOpts, remote.WithConfig(extendedConfig)) opts = append(opts, remote.WithConfig(extendedConfig))
} }
} }
if e.supportsHistory() { if e.supportsHistory() {
appOpts = append(appOpts, remote.WithHistory()) opts = append(opts, remote.WithHistory())
} }
appOpts = append(appOpts, image.GetInsecureOptions(e.InsecureRegistries)...) opts = append(opts, image.GetInsecureOptions(e.InsecureRegistries)...)
if analyzedMD.PreviousImageRef() != "" { if analyzedMD.PreviousImageRef() != "" {
cmd.DefaultLogger.Infof("Reusing layers from image '%s'", analyzedMD.PreviousImageRef()) cmd.DefaultLogger.Infof("Reusing layers from image '%s'", analyzedMD.PreviousImageRef())
appOpts = append(appOpts, remote.WithPreviousImage(analyzedMD.PreviousImageRef())) opts = append(opts, remote.WithPreviousImage(analyzedMD.PreviousImageRef()))
} }
if !e.customSourceDateEpoch().IsZero() { if !e.customSourceDateEpoch().IsZero() {
appOpts = append(appOpts, remote.WithCreatedAt(e.customSourceDateEpoch())) opts = append(opts, remote.WithCreatedAt(e.customSourceDateEpoch()))
} }
appImage, err := remote.NewImage( appImage, err := remote.NewImage(
e.OutputImageRef, e.OutputImageRef,
e.keychain, e.keychain,
appOpts..., opts...,
) )
if err != nil { if err != nil {
return nil, "", cmd.FailErr(err, "create new app image") return nil, "", cmd.FailErr(err, "create new app image")
} }
runImageID, err := func() (string, error) { runImage, err := remote.NewImage(e.RunImageRef, e.keychain, remote.FromBaseImage(e.RunImageRef))
runImage, err := remote.NewImage(
e.RunImageRef,
e.keychain,
append(
image.GetInsecureOptions(e.InsecureRegistries),
remote.FromBaseImage(e.RunImageRef),
)...,
)
if err != nil {
return "", fmt.Errorf("failed to access run image: %w", err)
}
runImageID, err := runImage.Identifier()
if err != nil {
return "", fmt.Errorf("failed to get run image identifier: %w", err)
}
return runImageID.String(), nil
}()
if err != nil { if err != nil {
return nil, "", cmd.FailErr(err, "get run image ID") return nil, "", cmd.FailErr(err, "access run image")
} }
runImageID, err := runImage.Identifier()
return appImage, runImageID, nil if err != nil {
return nil, "", cmd.FailErr(err, "get run image reference")
}
return appImage, runImageID.String(), nil
} }
func (e *exportCmd) initLayoutAppImage(analyzedMD files.Analyzed) (imgutil.Image, string, error) { func (e *exportCmd) initLayoutAppImage(analyzedMD files.Analyzed) (imgutil.Image, string, error) {
@ -383,7 +420,7 @@ func (e *exportCmd) initLayoutAppImage(analyzedMD files.Analyzed) (imgutil.Image
return nil, "", cmd.FailErr(err, "parsing run image reference") return nil, "", cmd.FailErr(err, "parsing run image reference")
} }
var opts = []imgutil.ImageOption{ var opts = []layout.ImageOption{
layout.FromBaseImagePath(runImageIdentifier.Path), layout.FromBaseImagePath(runImageIdentifier.Path),
} }

View File

@ -96,14 +96,14 @@ func (ch *DefaultCacheHandler) InitCache(cacheImageRef string, cacheDir string,
cacheStore phase.Cache cacheStore phase.Cache
err error err error
) )
logger := cmd.DefaultLogger
if cacheImageRef != "" { if cacheImageRef != "" {
logger := cmd.DefaultLogger
cacheStore, err = cache.NewImageCacheFromName(cacheImageRef, ch.keychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, deletionEnabled)) cacheStore, err = cache.NewImageCacheFromName(cacheImageRef, ch.keychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, deletionEnabled))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating image cache") return nil, errors.Wrap(err, "creating image cache")
} }
} else if cacheDir != "" { } else if cacheDir != "" {
cacheStore, err = cache.NewVolumeCache(cacheDir, logger) cacheStore, err = cache.NewVolumeCache(cacheDir)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "creating volume cache") return nil, errors.Wrap(err, "creating volume cache")
} }
@ -118,14 +118,14 @@ func initCache(cacheImageTag, cacheDir string, keychain authn.Keychain, deletion
cacheStore phase.Cache cacheStore phase.Cache
err error err error
) )
logger := cmd.DefaultLogger
if cacheImageTag != "" { if cacheImageTag != "" {
logger := cmd.DefaultLogger
cacheStore, err = cache.NewImageCacheFromName(cacheImageTag, keychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, deletionEnabled)) cacheStore, err = cache.NewImageCacheFromName(cacheImageTag, keychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, deletionEnabled))
if err != nil { if err != nil {
return nil, cmd.FailErr(err, "create image cache") return nil, cmd.FailErr(err, "create image cache")
} }
} else if cacheDir != "" { } else if cacheDir != "" {
cacheStore, err = cache.NewVolumeCache(cacheDir, logger) cacheStore, err = cache.NewVolumeCache(cacheDir)
if err != nil { if err != nil {
return nil, cmd.FailErr(err, "create volume cache") return nil, cmd.FailErr(err, "create volume cache")
} }

View File

@ -113,7 +113,7 @@ func (r *rebaseCmd) Exec() error {
local.FromBaseImage(r.RunImageRef), local.FromBaseImage(r.RunImageRef),
) )
} else { } else {
var opts []imgutil.ImageOption var opts []remote.ImageOption
opts = append(opts, append(image.GetInsecureOptions(r.InsecureRegistries), remote.FromBaseImage(r.RunImageRef))...) opts = append(opts, append(image.GetInsecureOptions(r.InsecureRegistries), remote.FromBaseImage(r.RunImageRef))...)
newBaseImage, err = remote.NewImage( newBaseImage, err = remote.NewImage(
@ -168,7 +168,7 @@ func (r *rebaseCmd) setAppImage() error {
return err return err
} }
var opts = []imgutil.ImageOption{ var opts = []remote.ImageOption{
remote.FromBaseImage(targetImageRef), remote.FromBaseImage(targetImageRef),
} }
@ -196,7 +196,7 @@ func (r *rebaseCmd) setAppImage() error {
} }
// we find the best mirror for the run image as this point // we find the best mirror for the run image as this point
r.RunImageRef, err = platform.BestRunImageMirrorFor(registry, runImage, r.AccessChecker()) r.RunImageRef, err = platform.BestRunImageMirrorFor(registry, runImage, r.LifecycleInputs.AccessChecker())
if err != nil { if err != nil {
return err return err
} }

View File

@ -31,8 +31,8 @@ const kanikoDir = "/kaniko"
type restoreCmd struct { type restoreCmd struct {
*platform.Platform *platform.Platform
docker client.APIClient // construct if necessary before dropping privileges docker client.CommonAPIClient // construct if necessary before dropping privileges
keychain authn.Keychain // construct if necessary before dropping privileges keychain authn.Keychain // construct if necessary before dropping privileges
} }
// DefineFlags defines the flags that are considered valid and reads their values (if provided). // DefineFlags defines the flags that are considered valid and reads their values (if provided).
@ -50,9 +50,6 @@ func (r *restoreCmd) DefineFlags() {
cli.FlagBuildImage(&r.BuildImageRef) cli.FlagBuildImage(&r.BuildImageRef)
} }
cli.FlagAnalyzedPath(&r.AnalyzedPath) cli.FlagAnalyzedPath(&r.AnalyzedPath)
if r.PlatformAPI.AtLeast("0.14") {
cli.FlagRunPath(&r.RunPath)
}
cli.FlagCacheDir(&r.CacheDir) cli.FlagCacheDir(&r.CacheDir)
cli.FlagCacheImage(&r.CacheImageRef) cli.FlagCacheImage(&r.CacheImageRef)
cli.FlagGID(&r.GID) cli.FlagGID(&r.GID)
@ -119,27 +116,18 @@ func (r *restoreCmd) Exec() error {
return cmd.FailErr(err, "get digest reference for builder image") return cmd.FailErr(err, "get digest reference for builder image")
} }
analyzedMD.BuildImage = &files.ImageIdentifier{Reference: digestRef.String()} analyzedMD.BuildImage = &files.ImageIdentifier{Reference: digestRef.String()}
cmd.DefaultLogger.Debug("Adding build image info to analyzed metadata: ") cmd.DefaultLogger.Debugf("Adding build image info to analyzed metadata: ")
cmd.DefaultLogger.Debug(encoding.ToJSONMaybe(analyzedMD.BuildImage)) cmd.DefaultLogger.Debugf(encoding.ToJSONMaybe(analyzedMD.BuildImage))
} }
var ( var (
runImage imgutil.Image runImage imgutil.Image
) )
runImageName := analyzedMD.RunImageImage() // FIXME: if we have a digest reference available in `Reference` (e.g., in the non-daemon case) we should use it runImageName := analyzedMD.RunImageImage() // FIXME: if we have a digest reference available in `Reference` (e.g., in the non-daemon case) we should use it
accessibleRunImage, err := r.runImageAccessCheck(runImageName)
if err != nil {
return err
}
if runImageName != accessibleRunImage {
analyzedMD.RunImage.Image = accessibleRunImage
analyzedMD.RunImage.Reference = accessibleRunImage
}
if r.supportsRunImageExtension() && needsPulling(analyzedMD.RunImage) { if r.supportsRunImageExtension() && needsPulling(analyzedMD.RunImage) {
cmd.DefaultLogger.Debugf("Pulling run image metadata for %s...", accessibleRunImage) cmd.DefaultLogger.Debugf("Pulling run image metadata for %s...", runImageName)
runImage, err = r.pullSparse(accessibleRunImage) runImage, err = r.pullSparse(runImageName)
if err != nil { if err != nil {
return cmd.FailErr(err, fmt.Sprintf("pull run image %s", accessibleRunImage)) return cmd.FailErr(err, fmt.Sprintf("pull run image %s", runImageName))
} }
// update analyzed metadata, even if we only needed to pull the image metadata, because // update analyzed metadata, even if we only needed to pull the image metadata, because
// the extender needs a digest reference in analyzed.toml, // the extender needs a digest reference in analyzed.toml,
@ -150,9 +138,9 @@ func (r *restoreCmd) Exec() error {
} else if r.needsUpdating(analyzedMD.RunImage, group) { } else if r.needsUpdating(analyzedMD.RunImage, group) {
cmd.DefaultLogger.Debugf("Updating run image info in analyzed metadata...") cmd.DefaultLogger.Debugf("Updating run image info in analyzed metadata...")
h := image.NewHandler(r.docker, r.keychain, r.LayoutDir, r.UseLayout, r.InsecureRegistries) h := image.NewHandler(r.docker, r.keychain, r.LayoutDir, r.UseLayout, r.InsecureRegistries)
runImage, err = h.InitImage(accessibleRunImage) runImage, err = h.InitImage(runImageName)
if err != nil || !runImage.Found() { if err != nil || !runImage.Found() {
return cmd.FailErr(err, fmt.Sprintf("get run image %s", accessibleRunImage)) return cmd.FailErr(err, fmt.Sprintf("get run image %s", runImageName))
} }
if err = r.updateAnalyzedMD(&analyzedMD, runImage); err != nil { if err = r.updateAnalyzedMD(&analyzedMD, runImage); err != nil {
return cmd.FailErr(err, "update analyzed metadata") return cmd.FailErr(err, "update analyzed metadata")
@ -187,12 +175,12 @@ func (r *restoreCmd) updateAnalyzedMD(analyzedMD *files.Analyzed, runImage imgut
return cmd.FailErr(err, "read target data from run image") return cmd.FailErr(err, "read target data from run image")
} }
} }
cmd.DefaultLogger.Debug("Run image info in analyzed metadata was: ") cmd.DefaultLogger.Debugf("Run image info in analyzed metadata was: ")
cmd.DefaultLogger.Debug(encoding.ToJSONMaybe(analyzedMD.RunImage)) cmd.DefaultLogger.Debugf(encoding.ToJSONMaybe(analyzedMD.RunImage))
analyzedMD.RunImage.Reference = digestRef.String() analyzedMD.RunImage.Reference = digestRef.String()
analyzedMD.RunImage.TargetMetadata = targetData analyzedMD.RunImage.TargetMetadata = targetData
cmd.DefaultLogger.Debug("Run image info in analyzed metadata is: ") cmd.DefaultLogger.Debugf("Run image info in analyzed metadata is: ")
cmd.DefaultLogger.Debug(encoding.ToJSONMaybe(analyzedMD.RunImage)) cmd.DefaultLogger.Debugf(encoding.ToJSONMaybe(analyzedMD.RunImage))
return nil return nil
} }
@ -204,23 +192,6 @@ func needsPulling(runImage *files.RunImage) bool {
return runImage.Extend return runImage.Extend
} }
func (r *restoreCmd) runImageAccessCheck(runImageName string) (string, error) {
if r.PlatformAPI.LessThan("0.14") {
return runImageName, nil
}
runToml, err := files.Handler.ReadRun(r.RunPath, cmd.DefaultLogger)
if err != nil {
return "", err
}
if !runToml.Contains(runImageName) {
return runImageName, nil
}
return platform.BestRunImageMirrorFor("", runToml.FindByRef(runImageName), r.AccessChecker())
}
func (r *restoreCmd) needsUpdating(runImage *files.RunImage, group buildpack.Group) bool { func (r *restoreCmd) needsUpdating(runImage *files.RunImage, group buildpack.Group) bool {
if r.PlatformAPI.LessThan("0.10") { if r.PlatformAPI.LessThan("0.10") {
return false return false
@ -260,7 +231,7 @@ func (r *restoreCmd) pullSparse(imageRef string) (imgutil.Image, error) {
return nil, fmt.Errorf("failed to create cache directory: %w", err) return nil, fmt.Errorf("failed to create cache directory: %w", err)
} }
var opts []imgutil.ImageOption var opts []remote.ImageOption
opts = append(opts, append(image.GetInsecureOptions(r.InsecureRegistries), remote.FromBaseImage(imageRef))...) opts = append(opts, append(image.GetInsecureOptions(r.InsecureRegistries), remote.FromBaseImage(imageRef))...)
// get remote image // get remote image
@ -305,7 +276,7 @@ func (r *restoreCmd) restore(layerMetadata files.LayersMetadata, group buildpack
Buildpacks: group.Group, Buildpacks: group.Group,
Logger: cmd.DefaultLogger, Logger: cmd.DefaultLogger,
PlatformAPI: r.PlatformAPI, PlatformAPI: r.PlatformAPI,
LayerMetadataRestorer: layer.NewDefaultMetadataRestorer(r.LayersDir, r.SkipLayers, cmd.DefaultLogger, r.PlatformAPI), LayerMetadataRestorer: layer.NewDefaultMetadataRestorer(r.LayersDir, r.SkipLayers, cmd.DefaultLogger),
LayersMetadata: layerMetadata, LayersMetadata: layerMetadata,
SBOMRestorer: layer.NewSBOMRestorer(layer.SBOMRestorerOpts{ SBOMRestorer: layer.NewSBOMRestorer(layer.SBOMRestorerOpts{
LayersDir: r.LayersDir, LayersDir: r.LayersDir,

3
env/build.go vendored
View File

@ -1,6 +1,7 @@
package env package env
import ( import (
"runtime"
"strings" "strings"
) )
@ -17,7 +18,7 @@ var BuildEnvIncludelist = []string{
"no_proxy", "no_proxy",
} }
var ignoreEnvVarCase = false var ignoreEnvVarCase = runtime.GOOS == "windows"
// NewBuildEnv returns a build-time Env from the given environment. // NewBuildEnv returns a build-time Env from the given environment.
// //

31
env/build_test.go vendored
View File

@ -1,6 +1,7 @@
package env_test package env_test
import ( import (
"runtime"
"sort" "sort"
"testing" "testing"
@ -10,6 +11,7 @@ import (
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/env" "github.com/buildpacks/lifecycle/env"
h "github.com/buildpacks/lifecycle/testhelpers"
) )
func TestBuildEnv(t *testing.T) { func TestBuildEnv(t *testing.T) {
@ -62,9 +64,15 @@ func testBuildEnv(t *testing.T, when spec.G, it spec.S) {
"NO_PROXY=some-no-proxy", "NO_PROXY=some-no-proxy",
"PATH=some-path", "PATH=some-path",
"PKG_CONFIG_PATH=some-pkg-config-path", "PKG_CONFIG_PATH=some-pkg-config-path",
"http_proxy=some-http-proxy", }
"https_proxy=some-https-proxy", // Environment variables in Windows are case insensitive, and are added by the lifecycle in uppercase.
"no_proxy=some-no-proxy", if runtime.GOOS != "windows" {
expectedVars = append(
expectedVars,
"http_proxy=some-http-proxy",
"https_proxy=some-https-proxy",
"no_proxy=some-no-proxy",
)
} }
if s := cmp.Diff(out, expectedVars); s != "" { if s := cmp.Diff(out, expectedVars); s != "" {
t.Fatalf("Unexpected env\n%s\n", s) t.Fatalf("Unexpected env\n%s\n", s)
@ -88,5 +96,22 @@ func testBuildEnv(t *testing.T, when spec.G, it spec.S) {
t.Fatalf("Unexpected root dir map\n%s\n", s) t.Fatalf("Unexpected root dir map\n%s\n", s)
} }
}) })
when("building in Windows", func() {
it.Before(func() {
if runtime.GOOS != "windows" {
t.Skip("This test only applies to Windows builds")
}
})
it("ignores case when initializing", func() {
benv := env.NewBuildEnv([]string{
"Path=some-path",
})
out := benv.List()
h.AssertEq(t, len(out), 1)
h.AssertEq(t, out[0], "PATH=some-path")
})
})
}) })
} }

3
env/env_test.go vendored
View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"sort" "sort"
"strings" "strings"
"testing" "testing"
@ -41,7 +42,7 @@ func testEnv(t *testing.T, when spec.G, it spec.S) {
"LIBRARY_PATH", "LIBRARY_PATH",
}, },
}, },
Vars: env.NewVars(map[string]string{}, false), Vars: env.NewVars(map[string]string{}, runtime.GOOS == "windows"),
} }
}) })

19
env/launch_test.go vendored
View File

@ -2,6 +2,7 @@ package env_test
import ( import (
"os" "os"
"runtime"
"strings" "strings"
"testing" "testing"
@ -10,6 +11,7 @@ import (
"github.com/sclevine/spec/report" "github.com/sclevine/spec/report"
"github.com/buildpacks/lifecycle/env" "github.com/buildpacks/lifecycle/env"
h "github.com/buildpacks/lifecycle/testhelpers"
) )
func TestLaunchEnv(t *testing.T) { func TestLaunchEnv(t *testing.T) {
@ -67,5 +69,22 @@ func testLaunchEnv(t *testing.T, when spec.G, it spec.S) {
t.Fatalf("Unexpected root dir map\n%s\n", s) t.Fatalf("Unexpected root dir map\n%s\n", s)
} }
}) })
when("launching in Windows", func() {
it.Before(func() {
if runtime.GOOS != "windows" {
t.Skip("This test only applies to Windows launches")
}
})
it("ignores case when initializing", func() {
benv := env.NewLaunchEnv([]string{
"Path=some-path",
}, "", "")
out := benv.List()
h.AssertEq(t, len(out), 1)
h.AssertEq(t, out[0], "PATH=some-path")
})
})
}) })
} }

394
go.mod
View File

@ -1,326 +1,144 @@
module github.com/buildpacks/lifecycle module github.com/buildpacks/lifecycle
require ( require (
github.com/BurntSushi/toml v1.5.0 github.com/BurntSushi/toml v1.3.2
github.com/GoogleContainerTools/kaniko v1.21.1
github.com/apex/log v1.9.0 github.com/apex/log v1.9.0
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231213181459-b0fcec718dc6
github.com/buildpacks/imgutil v0.0.0-20250814164739-4b1c8875ba7e github.com/buildpacks/imgutil v0.0.0-20240206215312-f8d38e1de03d
github.com/chainguard-dev/kaniko v1.25.1
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589
github.com/containerd/containerd v1.7.28 github.com/containerd/containerd v1.7.14
github.com/docker/docker v28.3.3+incompatible github.com/docker/docker v25.0.5+incompatible
github.com/docker/go-connections v0.5.0
github.com/golang/mock v1.6.0 github.com/golang/mock v1.6.0
github.com/google/go-cmp v0.7.0 github.com/google/go-cmp v0.6.0
github.com/google/go-containerregistry v0.20.6 github.com/google/go-containerregistry v0.19.1
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/heroku/color v0.0.6 github.com/heroku/color v0.0.6
github.com/moby/buildkit v0.23.2 github.com/moby/buildkit v0.13.1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/sclevine/spec v1.4.0 github.com/sclevine/spec v1.4.0
golang.org/x/sync v0.16.0 golang.org/x/sync v0.6.0
golang.org/x/sys v0.35.0 golang.org/x/sys v0.18.0
) )
require ( require (
4d63.com/gocheckcompilerdirectives v1.3.0 // indirect cloud.google.com/go/compute v1.24.0 // indirect
4d63.com/gochecknoglobals v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/4meepo/tagalign v1.4.2 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
github.com/Abirdcfly/dupword v0.1.3 // indirect
github.com/Antonboom/errname v1.1.0 // indirect
github.com/Antonboom/nilnil v1.1.0 // indirect
github.com/Antonboom/testifylint v1.6.1 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.30 // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.2 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Crocmagnon/fatcontext v0.7.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect
github.com/Masterminds/semver/v3 v3.3.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.13.0 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect
github.com/agext/levenshtein v1.2.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect
github.com/alecthomas/chroma/v2 v2.16.0 // indirect github.com/aws/aws-sdk-go-v2 v1.25.2 // indirect
github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/aws/aws-sdk-go-v2/config v1.27.4 // indirect
github.com/alexkohler/nakedret/v2 v2.0.6 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect
github.com/alingse/asasalint v0.0.11 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect
github.com/alingse/nilnesserr v0.2.0 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect
github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/ashanbrown/makezero v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 // indirect
github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 // indirect
github.com/aws/aws-sdk-go-v2/config v1.30.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.18.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/smithy-go v1.20.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.26.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.35.0 // indirect
github.com/aws/smithy-go v1.22.5 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/cilium/ebpf v0.12.3 // indirect
github.com/bombsimon/wsl/v4 v4.7.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/breml/bidichk v0.3.3 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect
github.com/breml/errchkjson v0.4.1 // indirect github.com/containerd/continuity v0.4.3 // indirect
github.com/butuzov/ireturn v0.4.0 // indirect github.com/containerd/fifo v1.1.0 // indirect
github.com/butuzov/mirror v1.3.0 // indirect
github.com/catenacyber/perfsprint v0.9.1 // indirect
github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charithe/durationcheck v0.0.10 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/lipgloss v1.1.0 // indirect
github.com/charmbracelet/x/ansi v0.8.0 // indirect
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/chavacava/garif v0.1.0 // indirect
github.com/ckaznocha/intrange v0.3.1 // indirect
github.com/containerd/cgroups/v3 v3.0.5 // indirect
github.com/containerd/containerd/api v1.9.0 // indirect
github.com/containerd/containerd/v2 v2.1.3 // indirect
github.com/containerd/continuity v0.4.5 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v1.0.0-rc.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/ttrpc v1.2.3 // indirect
github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/curioswitch/go-reassign v0.3.0 // indirect
github.com/daixiang0/gci v0.13.6 // indirect
github.com/dave/dst v0.27.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/denis-tingaikin/go-header v0.5.0 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect github.com/distribution/reference v0.5.0 // indirect
github.com/dlclark/regexp2 v1.11.5 // indirect github.com/docker/cli v25.0.3+incompatible // indirect
github.com/docker/cli v28.2.2+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/ePirat/docker-credential-gitlabci v1.0.0 // indirect github.com/ePirat/docker-credential-gitlabci v1.0.0 // indirect
github.com/ettle/strcase v0.2.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/firefart/nonamedreturns v1.0.6 // indirect github.com/go-logr/logr v1.4.1 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
github.com/ghostiam/protogetter v0.3.15 // indirect
github.com/go-critic/go-critic v0.13.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
github.com/go-toolsmith/astequal v1.2.0 // indirect
github.com/go-toolsmith/astfmt v1.1.0 // indirect
github.com/go-toolsmith/astp v1.1.0 // indirect
github.com/go-toolsmith/strparse v1.1.0 // indirect
github.com/go-toolsmith/typep v1.1.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/golangci/go-printf-func-name v0.1.0 // indirect github.com/gorilla/mux v1.8.0 // indirect
github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/golangci/golangci-lint/v2 v2.1.2 // indirect github.com/hashicorp/go-memdb v1.3.4 // indirect
github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/golangci/misspell v0.6.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/golangci/plugin-module-register v0.1.1 // indirect
github.com/golangci/revgrep v0.8.0 // indirect
github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect
github.com/gordonklaus/ineffassign v0.1.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
github.com/gostaticanalysis/comment v1.5.0 // indirect
github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jgautheron/goconst v1.8.1 // indirect
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
github.com/jjti/go-spancheck v0.6.4 // indirect
github.com/julz/importas v0.2.0 // indirect
github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect github.com/karrick/godirwalk v1.17.0 // indirect
github.com/kisielk/errcheck v1.9.0 // indirect github.com/klauspost/compress v1.17.4 // indirect
github.com/kkHAIKE/contextcheck v1.1.6 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/kulti/thelper v0.6.3 // indirect
github.com/kunwardeep/paralleltest v1.0.14 // indirect
github.com/lasiar/canonicalheader v1.1.2 // indirect
github.com/ldez/exptostd v0.4.3 // indirect
github.com/ldez/gomoddirectives v0.6.1 // indirect
github.com/ldez/grignotin v0.9.0 // indirect
github.com/ldez/tagliatelle v0.7.1 // indirect
github.com/ldez/usetesting v0.4.3 // indirect
github.com/leonklingele/grouper v1.1.2 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/macabu/inamedparam v0.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/manuelarte/funcorder v0.2.1 // indirect
github.com/maratori/testableexamples v1.0.0 // indirect
github.com/maratori/testpackage v1.1.1 // indirect
github.com/matoous/godox v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgechev/revive v1.9.0 // indirect github.com/minio/highwayhash v1.0.2 // indirect
github.com/minio/highwayhash v1.0.3 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.1.0 // indirect
github.com/moby/locker v1.0.1 // indirect github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/moby/swarmkit/v2 v2.0.0-20230911190601-f082dd7a0cee // indirect
github.com/moby/sys/mount v0.3.4 // indirect github.com/moby/sys/mount v0.3.3 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect
github.com/moby/sys/reexec v0.1.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/signal v0.7.1 // indirect github.com/moby/sys/symlink v0.2.0 // indirect
github.com/moby/sys/symlink v0.3.0 // indirect github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect github.com/moby/term v0.5.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/moricho/tparallel v0.3.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/muesli/termenv v0.16.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nakabonne/nestif v0.3.1 // indirect
github.com/nishanths/exhaustive v0.12.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
github.com/nunnatsa/ginkgolinter v0.19.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/otiai10/copy v1.14.1 // indirect github.com/opencontainers/selinux v1.11.0 // indirect
github.com/otiai10/mint v1.6.3 // indirect github.com/otiai10/copy v1.14.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect github.com/prometheus/client_golang v1.17.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/common v0.44.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/polyfloyd/go-errorlint v1.8.0 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quasilyte/go-ruleguard v0.4.4 // indirect
github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
github.com/raeperd/recvcheck v0.2.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/ryancurrah/gomodguard v1.4.1 // indirect
github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect
github.com/sclevine/yj v0.0.0-20210612025309-737bdf40a5d1 // indirect
github.com/securego/gosec/v2 v2.22.3 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sivchari/containedctx v1.0.3 // indirect github.com/spf13/afero v1.11.0 // indirect
github.com/sonatard/noctx v0.1.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect go.etcd.io/etcd/raft/v3 v3.5.9 // indirect
github.com/sourcegraph/go-diff v0.7.0 // indirect
github.com/spf13/afero v1.14.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/cobra v1.9.1 // indirect
github.com/spf13/pflag v1.0.7 // indirect
github.com/spf13/viper v1.18.2 // indirect
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tdakkota/asciicheck v0.4.1 // indirect
github.com/tetafro/godot v1.5.0 // indirect
github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect
github.com/timonwong/loggercheck v0.11.0 // indirect
github.com/tomarrell/wrapcheck/v2 v2.11.0 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 // indirect
github.com/ultraware/funlen v0.2.0 // indirect
github.com/ultraware/whitespace v0.2.0 // indirect
github.com/uudashr/gocognit v1.2.0 // indirect
github.com/uudashr/iface v1.3.1 // indirect
github.com/vbatts/tar-split v0.12.1 // indirect
github.com/xen0n/gosmopolitan v1.3.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
github.com/yeya24/promlinter v0.3.0 // indirect
github.com/ykadowak/zerologlint v0.1.5 // indirect
gitlab.com/bosi/decorder v0.4.2 // indirect
go-simpler.org/musttag v0.13.0 // indirect
go-simpler.org/sloglint v0.11.0 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.23.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect go.opentelemetry.io/otel/metric v1.23.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect go.opentelemetry.io/otel/trace v1.23.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect golang.org/x/crypto v0.19.0 // indirect
go.uber.org/atomic v1.9.0 // indirect golang.org/x/exp v0.0.0-20231219160207-73b9e39aefca // indirect
go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/mod v0.14.0 // indirect
go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.21.0 // indirect
go.uber.org/zap v1.24.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/crypto v0.40.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect golang.org/x/time v0.5.0 // indirect
golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect golang.org/x/tools v0.17.0 // indirect
golang.org/x/mod v0.26.0 // indirect google.golang.org/appengine v1.6.8 // indirect
golang.org/x/oauth2 v0.30.0 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
golang.org/x/text v0.27.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect
golang.org/x/time v0.12.0 // indirect google.golang.org/grpc v1.61.1 // indirect
golang.org/x/tools v0.34.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 // indirect
google.golang.org/grpc v1.74.2 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
honnef.co/go/tools v0.6.1 // indirect
mvdan.cc/gofumpt v0.8.0 // indirect
mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 // indirect
) )
tool golang.org/x/tools/cmd/goimports go 1.22
tool github.com/sclevine/yj
tool github.com/golang/mock/mockgen
tool github.com/golangci/golangci-lint/v2/cmd/golangci-lint
go 1.24.6

968
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +1,44 @@
version: "2"
run: run:
timeout: 6m timeout: 6m
linters: linters:
default: none disable-all: true
enable: enable:
- bodyclose - bodyclose
- copyloopvar
- dogsled - dogsled
- errcheck - errcheck
- exportloopref
- gocritic - gocritic
- goimports
- gosec - gosec
- gosimple
- govet - govet
- ineffassign - ineffassign
- intrange
- misspell - misspell
- nakedret - nakedret
- revive - revive
- staticcheck - staticcheck
- stylecheck
- typecheck
- unconvert - unconvert
- unused - unused
- whitespace - whitespace
settings:
govet:
enable: linters-settings:
- fieldalignment goimports:
exclusions: local-prefixes: github.com/buildpacks/lifecycle
generated: lax govet:
rules: enable:
- linters: - fieldalignment
- govet
text: pointer bytes could be
paths:
- third_party$
- builtin$
- examples$
issues: issues:
exclude-use-default: false
new-from-rev: 91593cf91797ca0a98ffa31842107a9d916da37b new-from-rev: 91593cf91797ca0a98ffa31842107a9d916da37b
formatters: exclude-rules:
enable: # Ignore this minor optimization.
- goimports # See https://github.com/golang/go/issues/44877#issuecomment-794565908
settings: - linters:
goimports: - govet
local-prefixes: text: "pointer bytes could be"
- github.com/buildpacks/lifecycle
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View File

@ -22,7 +22,7 @@ type Handler interface {
// - WHEN a docker client is provided then it returns a LocalHandler // - WHEN a docker client is provided then it returns a LocalHandler
// - WHEN an auth.Keychain is provided then it returns a RemoteHandler // - WHEN an auth.Keychain is provided then it returns a RemoteHandler
// - Otherwise nil is returned // - Otherwise nil is returned
func NewHandler(docker client.APIClient, keychain authn.Keychain, layoutDir string, useLayout bool, insecureRegistries []string) Handler { func NewHandler(docker client.CommonAPIClient, keychain authn.Keychain, layoutDir string, useLayout bool, insecureRegistries []string) Handler {
if layoutDir != "" && useLayout { if layoutDir != "" && useLayout {
return &LayoutHandler{ return &LayoutHandler{
layoutDir: layoutDir, layoutDir: layoutDir,

View File

@ -23,7 +23,7 @@ func testHandler(t *testing.T, when spec.G, it spec.S) {
var ( var (
mockController *gomock.Controller mockController *gomock.Controller
mockKeychain *testmockauth.MockKeychain mockKeychain *testmockauth.MockKeychain
dockerClient client.APIClient dockerClient client.CommonAPIClient
) )
it.Before(func() { it.Before(func() {

View File

@ -18,7 +18,7 @@ func TestLocalImageHandler(t *testing.T) {
func testLocalImageHandler(t *testing.T, when spec.G, it spec.S) { func testLocalImageHandler(t *testing.T, when spec.G, it spec.S) {
var ( var (
imageHandler image.Handler imageHandler image.Handler
dockerClient client.APIClient dockerClient client.CommonAPIClient
) )
when("Local handler", func() { when("Local handler", func() {

View File

@ -1,11 +1,11 @@
package image package image
import ( import (
"fmt"
"github.com/buildpacks/imgutil"
"github.com/buildpacks/imgutil/remote" "github.com/buildpacks/imgutil/remote"
"github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/authn"
"github.com/pkg/errors"
"github.com/buildpacks/lifecycle/cmd"
) )
// RegistryHandler takes care of the registry settings and checks // RegistryHandler takes care of the registry settings and checks
@ -56,15 +56,15 @@ TODO: This is a temporary solution in order to get insecure registries in other
TODO: Ideally we should fix the `imgutil.options` struct visibility in order to mock and test the `remote.WithRegistrySetting` TODO: Ideally we should fix the `imgutil.options` struct visibility in order to mock and test the `remote.WithRegistrySetting`
TODO: function correctly and use the RegistryHandler everywhere it is needed. TODO: function correctly and use the RegistryHandler everywhere it is needed.
*/ */
func GetInsecureOptions(insecureRegistries []string) []imgutil.ImageOption { func GetInsecureOptions(insecureRegistries []string) []remote.ImageOption {
var opts []imgutil.ImageOption var opts []remote.ImageOption
for _, insecureRegistry := range insecureRegistries { for _, insecureRegistry := range insecureRegistries {
opts = append(opts, remote.WithRegistrySetting(insecureRegistry, true)) opts = append(opts, remote.WithRegistrySetting(insecureRegistry, true))
} }
return opts return opts
} }
func verifyReadAccess(imageRef string, keychain authn.Keychain, opts []imgutil.ImageOption) error { func verifyReadAccess(imageRef string, keychain authn.Keychain, opts []remote.ImageOption) error {
if imageRef == "" { if imageRef == "" {
return nil return nil
} }
@ -72,13 +72,13 @@ func verifyReadAccess(imageRef string, keychain authn.Keychain, opts []imgutil.I
img, _ := remote.NewImage(imageRef, keychain, opts...) img, _ := remote.NewImage(imageRef, keychain, opts...)
canRead, err := img.CheckReadAccess() canRead, err := img.CheckReadAccess()
if !canRead { if !canRead {
return fmt.Errorf("failed to ensure registry read access to %s: %w", imageRef, err) cmd.DefaultLogger.Debugf("Error checking read access: %s", err)
return errors.Errorf("ensure registry read access to %s", imageRef)
} }
return nil return nil
} }
func verifyReadWriteAccess(imageRef string, keychain authn.Keychain, opts []imgutil.ImageOption) error { func verifyReadWriteAccess(imageRef string, keychain authn.Keychain, opts []remote.ImageOption) error {
if imageRef == "" { if imageRef == "" {
return nil return nil
} }
@ -86,7 +86,8 @@ func verifyReadWriteAccess(imageRef string, keychain authn.Keychain, opts []imgu
img, _ := remote.NewImage(imageRef, keychain, opts...) img, _ := remote.NewImage(imageRef, keychain, opts...)
canReadWrite, err := img.CheckReadWriteAccess() canReadWrite, err := img.CheckReadWriteAccess()
if !canReadWrite { if !canReadWrite {
return fmt.Errorf("failed to ensure registry read/write access to %s: %w", imageRef, err) cmd.DefaultLogger.Debugf("Error checking read/write access: %s", err)
return errors.Errorf("ensure registry read/write access to %s", imageRef)
} }
return nil return nil
} }

View File

@ -18,7 +18,7 @@ func (h *RemoteHandler) InitImage(imageRef string) (imgutil.Image, error) {
return nil, nil return nil, nil
} }
options := []imgutil.ImageOption{ options := []remote.ImageOption{
remote.FromBaseImage(imageRef), remote.FromBaseImage(imageRef),
} }

View File

@ -6,7 +6,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/chainguard-dev/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"

View File

@ -1,4 +1,4 @@
//go:build unix && !linux //go:build darwin
package kaniko package kaniko

View File

@ -7,11 +7,11 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/chainguard-dev/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/chainguard-dev/kaniko/pkg/executor" "github.com/GoogleContainerTools/kaniko/pkg/executor"
"github.com/chainguard-dev/kaniko/pkg/image" "github.com/GoogleContainerTools/kaniko/pkg/image"
"github.com/chainguard-dev/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/chainguard-dev/kaniko/pkg/util/proc" "github.com/GoogleContainerTools/kaniko/pkg/util/proc"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/mutate"
@ -39,7 +39,7 @@ func (a *DockerfileApplier) Apply(dockerfile extend.Dockerfile, toBaseImage v1.I
opts := createOptions(baseImageRef, dockerfile, withBuildOptions) opts := createOptions(baseImageRef, dockerfile, withBuildOptions)
// update ignore paths; kaniko does this here: // update ignore paths; kaniko does this here:
// https://github.com/chainguard-dev/kaniko/blob/v1.9.2/cmd/executor/cmd/root.go#L124 // https://github.com/GoogleContainerTools/kaniko/blob/v1.9.2/cmd/executor/cmd/root.go#L124
if opts.IgnoreVarRun { if opts.IgnoreVarRun {
// from kaniko: // from kaniko:
// /var/run is a special case. It's common to mount in /var/run/docker.sock // /var/run is a special case. It's common to mount in /var/run/docker.sock
@ -59,7 +59,7 @@ func (a *DockerfileApplier) Apply(dockerfile extend.Dockerfile, toBaseImage v1.I
} }
// change to root directory; kaniko does this here: // change to root directory; kaniko does this here:
// https://github.com/chainguard-dev/kaniko/blob/v1.9.2/cmd/executor/cmd/root.go#L160 // https://github.com/GoogleContainerTools/kaniko/blob/v1.9.2/cmd/executor/cmd/root.go#L160
if err = os.Chdir("/"); err != nil { if err = os.Chdir("/"); err != nil {
return nil, err return nil, err
} }

View File

@ -0,0 +1,14 @@
//go:build windows
package kaniko
import (
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/buildpacks/lifecycle/internal/extend"
"github.com/buildpacks/lifecycle/log"
)
func (a *DockerfileApplier) Apply(dockerfile extend.Dockerfile, toBaseImage v1.Image, withBuildOptions extend.Options, logger log.Logger) (v1.Image, error) {
return nil, nil
}

View File

@ -3,9 +3,6 @@ package fsutil
import ( import (
"os" "os"
"strings" "strings"
"sync"
"github.com/buildpacks/lifecycle/log"
) )
type OSInfo struct { type OSInfo struct {
@ -17,18 +14,12 @@ type Detector interface {
HasSystemdFile() bool HasSystemdFile() bool
ReadSystemdFile() (string, error) ReadSystemdFile() (string, error)
GetInfo(osReleaseContents string) OSInfo GetInfo(osReleaseContents string) OSInfo
StoredInfo() *OSInfo
InfoOnce(logger log.Logger)
} }
// DefaultDetector implements Detector type Detect struct {
type DefaultDetector struct {
once sync.Once
info *OSInfo
} }
// HasSystemdFile returns true if /etc/os-release exists with contents func (d *Detect) HasSystemdFile() bool {
func (d *DefaultDetector) HasSystemdFile() bool {
finfo, err := os.Stat("/etc/os-release") finfo, err := os.Stat("/etc/os-release")
if err != nil { if err != nil {
return false return false
@ -36,14 +27,12 @@ func (d *DefaultDetector) HasSystemdFile() bool {
return !finfo.IsDir() && finfo.Size() > 0 return !finfo.IsDir() && finfo.Size() > 0
} }
// ReadSystemdFile returns the contents of /etc/os-release func (d *Detect) ReadSystemdFile() (string, error) {
func (d *DefaultDetector) ReadSystemdFile() (string, error) {
bs, err := os.ReadFile("/etc/os-release") bs, err := os.ReadFile("/etc/os-release")
return string(bs), err return string(bs), err
} }
// GetInfo returns the OS distribution name and version from the contents of /etc/os-release func (d *Detect) GetInfo(osReleaseContents string) OSInfo {
func (d *DefaultDetector) GetInfo(osReleaseContents string) OSInfo {
ret := OSInfo{} ret := OSInfo{}
lines := strings.Split(osReleaseContents, "\n") lines := strings.Split(osReleaseContents, "\n")
for _, line := range lines { for _, line := range lines {
@ -62,18 +51,5 @@ func (d *DefaultDetector) GetInfo(osReleaseContents string) OSInfo {
break break
} }
} }
d.info = &ret // store for future use
return ret return ret
} }
// StoredInfo returns any OSInfo found during the last call to GetInfo
func (d *DefaultDetector) StoredInfo() *OSInfo {
return d.info
}
// InfoOnce logs an info message to the provided logger, but only once in the lifetime of the receiving DefaultDetector.
func (d *DefaultDetector) InfoOnce(logger log.Logger) {
d.once.Do(func() {
logger.Info("target distro name/version labels not found, reading /etc/os-release file")
})
}

View File

@ -1,4 +1,5 @@
//go:build linux //go:build linux
// +build linux
package fsutil_test package fsutil_test
@ -19,10 +20,10 @@ func TestDetectorUnix(t *testing.T) {
func testDetectorUnix(t *testing.T, when spec.G, it spec.S) { func testDetectorUnix(t *testing.T, when spec.G, it spec.S) {
when("we should have a file", func() { when("we should have a file", func() {
it("returns true correctly", func() { it("returns true correctly", func() {
h.AssertEq(t, (&fsutil.DefaultDetector{}).HasSystemdFile(), true) h.AssertEq(t, (&fsutil.Detect{}).HasSystemdFile(), true)
}) })
it("returns the file contents", func() { it("returns the file contents", func() {
s, err := (&fsutil.DefaultDetector{}).ReadSystemdFile() s, err := (&fsutil.Detect{}).ReadSystemdFile()
h.AssertNil(t, err) h.AssertNil(t, err)
h.AssertStringContains(t, s, "NAME") h.AssertStringContains(t, s, "NAME")
}) })

Some files were not shown because too many files have changed in this diff Show More