Compare commits
1238 Commits
Author | SHA1 | Date |
---|---|---|
|
0f224448e5 | |
|
d2f4fccf02 | |
|
4fd9dc8da9 | |
|
84bbd62a05 | |
|
3eec76970d | |
|
828680fd2d | |
|
c2d348fe76 | |
|
998df22b75 | |
|
22f90fc35f | |
|
d61099a58b | |
|
c4730aeb7f | |
|
61101937d3 | |
|
b26e08c712 | |
|
c924316f12 | |
|
d99c20ea89 | |
|
ced3359adc | |
|
24b93c486f | |
|
5ac11c505d | |
|
8f1b15834c | |
|
e25dc2d2f2 | |
|
f27995af70 | |
|
f3e3719918 | |
|
3a692e4a34 | |
|
f8bf3ed25e | |
|
f992fd6a6d | |
|
669686636a | |
|
21a01a93f5 | |
|
187d6555ad | |
|
d5a70ecfd6 | |
|
096e921bc0 | |
|
9f602ab430 | |
|
eff4eb9d82 | |
|
ed61c00263 | |
|
07705bb60a | |
|
ee501aec0a | |
|
4e5936a962 | |
|
f4c7a93419 | |
|
64b78965f6 | |
|
906b0e0779 | |
|
fb74763226 | |
|
c121049b41 | |
|
b98b668838 | |
|
761d2bdbb8 | |
|
14a6c36c78 | |
|
40c408d453 | |
|
99e4c87d75 | |
|
8bdb640857 | |
|
997840b2f2 | |
|
4f7f331816 | |
|
55c8b988a5 | |
|
92d9833a46 | |
|
5f776886ec | |
|
d34588e569 | |
|
f923ae5e07 | |
|
3d23976f2d | |
|
c895ed4002 | |
|
0adc6dd8ad | |
|
d4fe8f0554 | |
|
0727354ebf | |
|
e9bb570999 | |
|
3760f4bb9c | |
|
1b84db28e2 | |
|
ed2e33c6e6 | |
|
248343e3b7 | |
|
83faba2a2f | |
|
d71de96b56 | |
|
05b0768a0b | |
|
d1f38655a8 | |
|
60a94c4b03 | |
|
3a30c0b597 | |
|
c0cbaf1e02 | |
|
ba82c0d126 | |
|
fd1604734b | |
|
7bd8e735ea | |
|
e2ec390385 | |
|
3109b04667 | |
|
fe4a50b65f | |
|
036e579e28 | |
|
93c8b8b133 | |
|
b05882d689 | |
|
5d193d0374 | |
|
c8585e10fd | |
|
3a09c6dc98 | |
|
2c0f9bd6cd | |
|
35e4fb90bc | |
|
9a7a3b586f | |
|
93d5de7423 | |
|
45b52d389a | |
|
89aeb9ca7a | |
|
b58433202e | |
|
92c59d39d9 | |
|
952997fe48 | |
|
6fd74a9d56 | |
|
d5e9ad18c5 | |
|
890ce690cf | |
|
e38a4b5eb6 | |
|
2191fc1b5f | |
|
2062d5fefc | |
|
1534af909b | |
|
84c043350b | |
|
60bf07f8de | |
|
7fd497eac2 | |
|
ddcded12be | |
|
81ef5987de | |
|
acd4499d35 | |
|
68703b6cb7 | |
|
8cfa351ee8 | |
|
564dcd3ef3 | |
|
32f1113b28 | |
|
23922f4d08 | |
|
605a233fcf | |
|
85540b4c90 | |
|
eea0ba6a02 | |
|
3985f2e33a | |
|
a058e93f60 | |
|
21811fa4dc | |
|
0690d133ca | |
|
58f58e58a6 | |
|
5517e86a56 | |
|
e6091c97d3 | |
|
095affa914 | |
|
0872e08127 | |
|
4a52e3c9e7 | |
|
4d5a1b4437 | |
|
0d51c49e94 | |
|
2a5bb2f847 | |
|
dd23dd7de8 | |
|
ed3df30e1b | |
|
13adc3a8a8 | |
|
2e76236d20 | |
|
191e6c4790 | |
|
a43d5993a4 | |
|
9f262b3e5d | |
|
df6be88240 | |
|
a87e12e6cd | |
|
12e2de8449 | |
|
4c40dcadaf | |
|
04f1ad1d03 | |
|
f2a3bd78a8 | |
|
a02be035c8 | |
|
7b5a8ec1cf | |
|
36c0af0d81 | |
|
85b745ca87 | |
|
73f6927abc | |
|
1a1de0850a | |
|
83efa75a14 | |
|
c0590cda08 | |
|
8a148423a3 | |
|
effdf24b2c | |
|
44b70410fe | |
|
13d3691dd0 | |
|
37d69857b6 | |
|
82fdf23a6d | |
|
27ee43580b | |
|
85f22a0fa5 | |
|
267025743f | |
|
137225b961 | |
|
fea544183a | |
|
f309b50003 | |
|
cd50eb8848 | |
|
bb9eb96766 | |
|
9c8340112a | |
|
feb0d757c9 | |
|
955ab5e7c9 | |
|
a4e33e218a | |
|
9eaafdcfa8 | |
|
9f9ac3cbc6 | |
|
dff6494f28 | |
|
435d226f1e | |
|
5128c1360c | |
|
3a05fdea09 | |
|
548854fdf8 | |
|
f25f22c1de | |
|
de7bcf582c | |
|
1148b713e0 | |
|
f3f292c6b5 | |
|
41d885b2b4 | |
|
73af3c126d | |
|
f885774719 | |
|
553c041228 | |
|
2d2da0b9f0 | |
|
200b00a4a4 | |
|
4ec55dce9b | |
|
92bb4fb450 | |
|
7bdfb331bb | |
|
e2b30d8fe5 | |
|
d4e8609f49 | |
|
be5e24be99 | |
|
2f507d0121 | |
|
121d349e68 | |
|
ecbcbd94d4 | |
|
751c7a2737 | |
|
72671535c2 | |
|
7ab0850c64 | |
|
3c8876ce04 | |
|
2548af4b8b | |
|
b6a76bff09 | |
|
c8c4fc4f56 | |
|
6e199e6a64 | |
|
e9a83ae69a | |
|
f8b8669119 | |
|
b8a92ac78d | |
|
dbd27bdef0 | |
|
33c5b89004 | |
|
73648d2423 | |
|
7c591d11d6 | |
|
2b75adb9e5 | |
|
cdd883cdaf | |
|
736b776093 | |
|
653a46fa26 | |
|
9eec0f4f5c | |
|
2bf3ae75a0 | |
|
367b451eb7 | |
|
66472bc154 | |
|
a00c3832e3 | |
|
885847a4d5 | |
|
ab2fe4f2f3 | |
|
6885b2e6ff | |
|
292aa492a7 | |
|
7ce5b70299 | |
|
d0ed30b36f | |
|
be881f29d5 | |
|
6c7d16a718 | |
|
7ffcd58e74 | |
|
5b02ac1c8f | |
|
2b81abb8c4 | |
|
06ffeedd58 | |
|
874e2c17e8 | |
|
b384d8c3e6 | |
|
aeb177ca46 | |
|
105808ccf0 | |
|
a12fbff57d | |
|
325676eb6e | |
|
7fda8e271f | |
|
f1e60ccc34 | |
|
f82c23bd4f | |
|
c72dc20a93 | |
|
df9ae90e17 | |
|
56083d0680 | |
|
c3998654d3 | |
|
85fd513b83 | |
|
7001b6a594 | |
|
f8b3419877 | |
|
cc194b8666 | |
|
28c0253427 | |
|
8cb8336642 | |
|
9099967d7a | |
|
217a234476 | |
|
1a36a3b642 | |
|
1ca5500ad9 | |
|
8f0cb5886a | |
|
ae64f8848f | |
|
1ff8931f47 | |
|
9f7f501b95 | |
|
9c455eb4dd | |
|
e18a752b06 | |
|
d14bae1e26 | |
|
e06b69b5ac | |
|
94c3d31ec0 | |
|
44f4a7d8ab | |
|
8cdec9dc8e | |
|
1744539c83 | |
|
477aa7197d | |
|
c8e89ce07b | |
|
84a94d59df | |
|
f7708d95e6 | |
|
94da70f6b6 | |
|
12cb541fd1 | |
|
91593cf917 | |
|
7b886e0b80 | |
|
b1dd93cd14 | |
|
081106052c | |
|
9d07aa934c | |
|
7f4666ed2b | |
|
8bd0003a19 | |
|
82da83291a | |
|
29e694f8c3 | |
|
5ae441444e | |
|
eac881f32a | |
|
1fc9483341 | |
|
df5fca4fc0 | |
|
f9b8e9325a | |
|
28084b7412 | |
|
f506545986 | |
|
aa86fc68f7 | |
|
0e55ae5ea9 | |
|
c2609a0a52 | |
|
e1a8e5451d | |
|
3f1a3461ca | |
|
ae5c3e572c | |
|
89a1cfebef | |
|
5dbedd3ca0 | |
|
5c29f544ea | |
|
da6d62cec1 | |
|
a11cb6a5f0 | |
|
cf19426cf0 | |
|
1057837bae | |
|
a2dfc786a6 | |
|
e9072582a6 | |
|
68b4ce9a55 | |
|
710d938cf9 | |
|
5eb4eac531 | |
|
f48162ee68 | |
|
e0e30117b3 | |
|
627deb6dfb | |
|
c5a3549b07 | |
|
0a96158154 | |
|
1698cfaf58 | |
|
af79390a0b | |
|
1431839fad | |
|
39c4a7b571 | |
|
21fb915dce | |
|
4aa121f4dc | |
|
b7f652a764 | |
|
2b6ddbb7af | |
|
47594c2c29 | |
|
1299eb80d1 | |
|
00b292eb4a | |
|
584c6cbd35 | |
|
611dee801f | |
|
a6d834bbf4 | |
|
d409e5a939 | |
|
310acf90e1 | |
|
fa9a11abd2 | |
|
6d6b469717 | |
|
2f308e4f08 | |
|
b6af8b5515 | |
|
1400e8edce | |
|
d99d58f834 | |
|
cad291f00a | |
|
4cb2bdcbc5 | |
|
03ee829704 | |
|
6a10b63c67 | |
|
4e537685ce | |
|
e3e3590866 | |
|
3be3585644 | |
|
014ccebcee | |
|
5382f323cf | |
|
1398dfa30c | |
|
8040b9cccb | |
|
55ce12cecc | |
|
cabbe91325 | |
|
9b4c11c835 | |
|
73f5f1d291 | |
|
e5f7e4bb55 | |
|
ec4e8d3e7e | |
|
e5047bb3dd | |
|
e567b3c88f | |
|
9614cad734 | |
|
62e5236814 | |
|
b303de57f8 | |
|
9e8d9f576f | |
|
4be1c1c5ed | |
|
b41d4132c5 | |
|
72bb70603f | |
|
50646839c8 | |
|
90cdc81a26 | |
|
d0caf2a428 | |
|
e8b5735ee1 | |
|
4b1f2c3fa1 | |
|
a31d64d13a | |
|
6454eb0760 | |
|
213e143f5e | |
|
960c36cd81 | |
|
3648072784 | |
|
e1f7860500 | |
|
da5da9bb53 | |
|
02569f89db | |
|
3627c94baf | |
|
4e6e4f7ef2 | |
|
4c62475a8c | |
|
532fc6c413 | |
|
6667e59fc6 | |
|
dcf0f8ff69 | |
|
ef9a1449e4 | |
|
64c41f844e | |
|
a1801c09fe | |
|
51c7e68007 | |
|
caf3d06c00 | |
|
f78b5cec70 | |
|
5a4df34080 | |
|
6a2b4bfd54 | |
|
8dfa73f4df | |
|
8cf3678b28 | |
|
d69768d861 | |
|
ca6c1c9118 | |
|
7b5aa02886 | |
|
4b33990e43 | |
|
6643f35886 | |
|
2f8a818959 | |
|
0205531176 | |
|
68a937bb0a | |
|
52e7885192 | |
|
3fec123f29 | |
|
5fa84a4c38 | |
|
2885c01804 | |
|
f1398cef14 | |
|
cdc31cf671 | |
|
18f0abc6fe | |
|
a3304c34db | |
|
f3af3e0080 | |
|
4f52db0a90 | |
|
0c057cd21d | |
|
6b0943ba43 | |
|
328f07bbb3 | |
|
512528c687 | |
|
81da25326c | |
|
a9d5c7c2de | |
|
60295970b6 | |
|
cbdfa63223 | |
|
746b0978f7 | |
|
ce23c6593a | |
|
a23f9b1e4f | |
|
2e739d6de5 | |
|
b5f8bad1f1 | |
|
f1d94587d8 | |
|
1a76076709 | |
|
ce9e093c55 | |
|
7afe29415a | |
|
51046e7a98 | |
|
b81fe9c17e | |
|
141d74b0a7 | |
|
bb40e22710 | |
|
e498b7b189 | |
|
86d84b37ee | |
|
277f3b2834 | |
|
b6803be364 | |
|
917c1acd25 | |
|
4b0fe078d6 | |
|
774500dc77 | |
|
410339eb56 | |
|
0f5a9cdd7f | |
|
a820b84f52 | |
|
828906b144 | |
|
1f3a0c092b | |
|
82298b73ee | |
|
3ff2467525 | |
|
76dc230e3f | |
|
b0138eff17 | |
|
d9a734ec90 | |
|
b3975bf1b6 | |
|
745886f649 | |
|
a176d44f72 | |
|
4ebc445600 | |
|
9f48e5a10a | |
|
8df2237a8f | |
|
db1d753f63 | |
|
4a6089b483 | |
|
11ea281c1d | |
|
1a934bdcb5 | |
|
6f56a15303 | |
|
b6408639c0 | |
|
157e030cff | |
|
77904d2814 | |
|
42be2d161b | |
|
071939dab9 | |
|
ab007b17fe | |
|
1f904803d9 | |
|
bf99b6fdc7 | |
|
d45c5fd9b4 | |
|
dd0c0d5e16 | |
|
51280df55b | |
|
e6cad2ecf1 | |
|
2186200870 | |
|
5744dae9b0 | |
|
496670f2ab | |
|
9ba9bb88a9 | |
|
880a801db2 | |
|
196f637ace | |
|
b1a2a7c624 | |
|
91727236ce | |
|
8e4dbe0deb | |
|
7762adccff | |
|
6438e02f9a | |
|
075a63ee27 | |
|
f02ec0f830 | |
|
aaf097aa25 | |
|
577dba3bf5 | |
|
422f462fe6 | |
|
9b72550dd3 | |
|
cbbd75231f | |
|
84dbba1f22 | |
|
321d7d172d | |
|
919b8add4c | |
|
8c2edb2a85 | |
|
83ebcd4146 | |
|
b8e2767db7 | |
|
9895acb6a6 | |
|
af5a4ea443 | |
|
49e641132e | |
|
3a6fd83bd0 | |
|
90a36d1eb2 | |
|
ef606cf041 | |
|
aeabcc071f | |
|
7af70d23f8 | |
|
bd3debf449 | |
|
5374e0773c | |
|
dbbce69825 | |
|
49889a2e8c | |
|
604fafbd98 | |
|
8faa4ebb82 | |
|
6921d078f0 | |
|
655ff8ddf2 | |
|
00119d33e2 | |
|
8f801a55fd | |
|
0c5da2ac4c | |
|
b3590cab4a | |
|
7cee2d4f10 | |
|
cc327cf50c | |
|
33a6cbe6a0 | |
|
75ec546d6c | |
|
404e9d5077 | |
|
42020da183 | |
|
df089ed342 | |
|
4f711c2a1c | |
|
dbba763b8f | |
|
dfaf8103ff | |
|
88c2200724 | |
|
d713c5f9e1 | |
|
d020ec99f3 | |
|
3297dacad3 | |
|
32dc6bda98 | |
|
caab214cd8 | |
|
0b397de150 | |
|
0ed863bb1f | |
|
e0d2b69e1f | |
|
f15f80c673 | |
|
a6d8cd343b | |
|
15eaa2b480 | |
|
c333eb6334 | |
|
979103d8ef | |
|
f70dcab80e | |
|
fbf3c4b83c | |
|
4a7fd0a59c | |
|
19aee523d6 | |
|
766c761b56 | |
|
75eb6150ad | |
|
4be62057e7 | |
|
4ced9ae338 | |
|
0371cb8d82 | |
|
cf2d0ed8f2 | |
|
1275e62ad9 | |
|
0fadd29c88 | |
|
7c91ca7d18 | |
|
ad8c97992d | |
|
771ac0b9d0 | |
|
3eaf123d91 | |
|
244282d90c | |
|
8eda858b74 | |
|
d2fdd3a80d | |
|
b54bc9d2ca | |
|
dfdb1a2a11 | |
|
a29b587547 | |
|
62fd914fef | |
|
da51bd6626 | |
|
4f7ccfcc39 | |
|
2b2c61e76a | |
|
f0ff2801bf | |
|
1d7a850cf0 | |
|
b0f8657c53 | |
|
c761b88420 | |
|
8cb4cbc879 | |
|
15e453a094 | |
|
7c5f36b793 | |
|
55c17a5ca3 | |
|
cc21037f73 | |
|
162090f625 | |
|
827739271c | |
|
efa1357c43 | |
|
8e9b27e4e7 | |
|
dfbda710c8 | |
|
fda1b6ee1e | |
|
f0767a30d6 | |
|
fd43a4729a | |
|
edbabee025 | |
|
46ec28d33a | |
|
251ba00080 | |
|
76b95b3821 | |
|
debb186ed1 | |
|
9a2931dc0d | |
|
a7cdb8eb69 | |
|
873cf15e75 | |
|
0632982dac | |
|
5dc5a88f57 | |
|
438308fce7 | |
|
cb53965d3e | |
|
8b77181444 | |
|
f584e397d9 | |
|
e1f3c71c6c | |
|
b3f3d52df7 | |
|
98cad52ee4 | |
|
33f945aa97 | |
|
0e2047a2aa | |
|
a262941881 | |
|
76e0a34ba3 | |
|
8a9af603ec | |
|
7934b649bc | |
|
10a1edd554 | |
|
a1ebdd1a41 | |
|
7ed7a0d619 | |
|
1f4074b453 | |
|
326463f72d | |
|
091c1f7bfc | |
|
942ee2c82e | |
|
374e8774aa | |
|
23ea8bdd20 | |
|
5811854ea8 | |
|
d29b01627a | |
|
ac7046c2c5 | |
|
fab44466b5 | |
|
3ffc711f63 | |
|
22cfb66e89 | |
|
1b3ee80953 | |
|
323f6563e3 | |
|
84115de444 | |
|
5ec42aa2bc | |
|
0a2db27dec | |
|
4b27897047 | |
|
d564804b07 | |
|
b8fbc1b8cf | |
|
dd41c7bb4b | |
|
916b898c87 | |
|
56ee88796c | |
|
6787958c34 | |
|
9ceda0d9dc | |
|
356576c860 | |
|
5475df29d5 | |
|
5ff86130c9 | |
|
28ac71b7e6 | |
|
06bc7289a9 | |
|
8316a8edea | |
|
c5aa6d85d2 | |
|
7119678c53 | |
|
2b5c0a047e | |
|
416e597e35 | |
|
a0b1633583 | |
|
3dc792afcf | |
|
56ba78bfd6 | |
|
21b8fd3f1f | |
|
7a5c3e6680 | |
|
eadff4c0bb | |
|
77b09a929e | |
|
8821f80043 | |
|
808b29a79f | |
|
e13cb71d51 | |
|
562919b9ae | |
|
0ba0dd183f | |
|
f80df3c8fa | |
|
b368c915cc | |
|
9bde998d0f | |
|
02c79f82d2 | |
|
91821d1054 | |
|
b1513c6407 | |
|
2ab4678bd2 | |
|
f07f82e0e6 | |
|
8d93e58810 | |
|
a15705e2af | |
|
9b443d8d5f | |
|
a7428a55c2 | |
|
5a9429aa5a | |
|
92b47a86f2 | |
|
b92fe24ece | |
|
c24f2302d5 | |
|
497c18cd6a | |
|
0eedbf1e94 | |
|
75df86ccfc | |
|
32a4e3e2f2 | |
|
a1ee2390ea | |
|
6274261eb6 | |
|
5215f8e4ec | |
|
2051b167d8 | |
|
ef2576d609 | |
|
6d5219299b | |
|
e9cf4a6361 | |
|
ad4b9b2545 | |
|
9a0f2273be | |
|
5d723977d2 | |
|
18d1acfac6 | |
|
e51dac7ebf | |
|
9f778c11a8 | |
|
e5ab88bc3d | |
|
5dec88a165 | |
|
26483fca65 | |
|
057670f7e1 | |
|
072b3d5ddd | |
|
b592348df2 | |
|
4d153f9df0 | |
|
3f09d11ef7 | |
|
93aa9e2a66 | |
|
2fd733bf29 | |
|
6599946631 | |
|
c98e9ce3cc | |
|
918ddd6885 | |
|
dbd01a2c06 | |
|
ed506f5fc0 | |
|
4624a8a5bf | |
|
35d19cc3d2 | |
|
120abfec07 | |
|
dcf198250e | |
|
c4b6e59e89 | |
|
09975cb1b8 | |
|
f0b4cf554a | |
|
bf07a2a0e1 | |
|
8faa92e831 | |
|
2b18c6980e | |
|
a39a06edb4 | |
|
661128fd5e | |
|
e033d1c377 | |
|
1e588b88b9 | |
|
6d87ea6b02 | |
|
5309991f67 | |
|
051fe16842 | |
|
863f4ef5d4 | |
|
1691da0875 | |
|
ab0c8e9351 | |
|
111e15ccf4 | |
|
aa4fa93526 | |
|
f3807d3399 | |
|
cd367e8cc3 | |
|
9aa0691529 | |
|
25d7512086 | |
|
9ec3ad471b | |
|
1fb3be6157 | |
|
1defe582bd | |
|
619a8366ce | |
|
068224cb8c | |
|
6db157c2d8 | |
|
97bb838dc8 | |
|
772b86e43b | |
|
66be7f81ef | |
|
3ca28cd072 | |
|
f8d98380f2 | |
|
6bd40a98e0 | |
|
dc000fe7af | |
|
78af042338 | |
|
2c5bfa049b | |
|
52d2a503da | |
|
bc872621c9 | |
|
08d1e30e5a | |
|
5403362835 | |
|
f80c0ccd77 | |
|
94b091c417 | |
|
d22675a97f | |
|
18279c9e46 | |
|
b3ca467925 | |
|
57cb65b559 | |
|
56c8536d26 | |
|
53c486316a | |
|
875e0f1fe0 | |
|
9bd6a05415 | |
|
725f0c3fc0 | |
|
6d8b3b20fe | |
|
d6aa6e72ae | |
|
15d138b313 | |
|
203221553c | |
|
099a233fd2 | |
|
11ca8ef99c | |
|
1f8bae1526 | |
|
1f6fc0ee34 | |
|
4fc8fdb8f0 | |
|
4bfb55d7d0 | |
|
b6939067e4 | |
|
b1a68aa10d | |
|
2373122d43 | |
|
4943a042a0 | |
|
99f42f5a95 | |
|
487bafcd62 | |
|
de2f8e6885 | |
|
43438a6988 | |
|
63ee3a72e3 | |
|
2fd4a69688 | |
|
acf521c053 | |
|
8821424a7d | |
|
708df265ed | |
|
aaf6da6e78 | |
|
ed27b77008 | |
|
bfe4ac526b | |
|
d9d94adf1c | |
|
7f1c2db042 | |
|
a845a80c9e | |
|
89aed264d4 | |
|
dc3d35ef18 | |
|
a5ceff9602 | |
|
6dd617e107 | |
|
1e2b2d69a5 | |
|
b3ffb7ce34 | |
|
5b4bd49f42 | |
|
34b37f2c6c | |
|
4c9f30033f | |
|
49807e1dff | |
|
93219b34f5 | |
|
7b24df17bb | |
|
d410f33cc7 | |
|
4401dec2ac | |
|
5bfdbae414 | |
|
6e2fc9b528 | |
|
d7ab168860 | |
|
39b54f99de | |
|
f890408127 | |
|
196bc36e53 | |
|
e6f3139931 | |
|
4358aa88f7 | |
|
67586851cf | |
|
fae1fe27c0 | |
|
4559afd9e1 | |
|
8a1a2af4d8 | |
|
391c12c734 | |
|
1b8d76242a | |
|
960cf58365 | |
|
c75219d4bc | |
|
79d63197c8 | |
|
3e3c86c5ea | |
|
79e0fb2207 | |
|
a77e90db8b | |
|
b0c266b366 | |
|
e232e5f4b0 | |
|
ee2fd58e6e | |
|
f97af70eab | |
|
099e2d862a | |
|
89bfd234e8 | |
|
8d5fadab03 | |
|
2df484a4ba | |
|
2ef65f7ba2 | |
|
a6f14b0db3 | |
|
57ef9b7120 | |
|
ecd63db3c5 | |
|
45c5c8e46c | |
|
1d73abcd49 | |
|
7c83b0ee0a | |
|
e772f4f2a7 | |
|
527036c5f2 | |
|
50b3dc695c | |
|
5a40fcbd65 | |
|
54320ae915 | |
|
cafcb76217 | |
|
f9c23c1ec5 | |
|
d9c7de5395 | |
|
0132aca9c0 | |
|
9bb33743a0 | |
|
6efb164726 | |
|
67001276a3 | |
|
36da1709ee | |
|
09baa31313 | |
|
d5254c3aa4 | |
|
1e66862921 | |
|
9832537834 | |
|
f32ac4b4e4 | |
|
948827c8f8 | |
|
59e357af56 | |
|
6c73332c81 | |
|
a0deffe106 | |
|
58a5f5a066 | |
|
cda684e5fc | |
|
a61a187bf9 | |
|
5fcf7197fe | |
|
7c2aa51988 | |
|
d50dca3e2a | |
|
6c6aba167b | |
|
962fa27b15 | |
|
225003fc49 | |
|
80e3022365 | |
|
aed1de8f63 | |
|
55b2547e62 | |
|
8958ee14bf | |
|
81ce66c90d | |
|
ffb6c9aef1 | |
|
a669a363f8 | |
|
b978a0f7dd | |
|
eddedb8a86 | |
|
6b6b539bd6 | |
|
14797d3a4d | |
|
989bfc2d4c | |
|
ca7f3010f9 | |
|
a1afff3576 | |
|
f77cc9b374 | |
|
0207fb5f86 | |
|
34f505982d | |
|
ca47d3defd | |
|
6e6ca62447 | |
|
ad4eb25ac1 | |
|
9dd30ffdc0 | |
|
831f3f579c | |
|
f1e79eec9c | |
|
d58ab15f71 | |
|
f47f44084f | |
|
10667573d7 | |
|
6d819987bf | |
|
069992f8e4 | |
|
9a83731032 | |
|
2d84013fa0 | |
|
ba9772f7b9 | |
|
8282f72fb2 | |
|
756c49f10b | |
|
f147e98657 | |
|
3e36f22c0c | |
|
cfacbf11ff | |
|
9d781dedc6 | |
|
9f10b45220 | |
|
dadecdfb50 | |
|
2650b3b209 | |
|
e725927aa3 | |
|
550213b345 | |
|
ec698fb713 | |
|
c0e33588c4 | |
|
86ca41e244 | |
|
0e19f4bb95 | |
|
41e6d14aab | |
|
c7dfbe788c | |
|
33055602b2 | |
|
32871f2f25 | |
|
d8a96dbd6f | |
|
3df0e1f2c5 | |
|
51f4a28ef5 | |
|
79bde45a10 | |
|
6f28be833e | |
|
f85dc07123 | |
|
1fa60d5997 | |
|
e2ee4639c3 | |
|
bc54baf338 | |
|
626f133ee5 | |
|
a989e4f33d | |
|
59efbeafad | |
|
6de73b919d | |
|
77ade3887e | |
|
394a5e0a3c | |
|
b2ce634c61 | |
|
ba6be4a8a7 | |
|
85bfe69e76 | |
|
67a5f5c654 | |
|
9c5bae407f | |
|
b5b4289e7d | |
|
f50ab33d84 | |
|
75b37e8ceb | |
|
c76c5dc48b | |
|
69c187f263 | |
|
66180c2608 | |
|
b35aa88e69 | |
|
ee48447617 | |
|
55f3aa6f0c | |
|
d097d1665e | |
|
7085de6baa | |
|
f36e282bb4 | |
|
0fe1676fd4 | |
|
e5f7e15c60 | |
|
0710a0aed5 | |
|
a182d7d6ef | |
|
a4543fb280 | |
|
0f9d101a2b | |
|
b0694552d6 | |
|
624786506d | |
|
882595908c | |
|
14fa46a15f | |
|
e1c56cfc79 | |
|
9c9f7a2dae | |
|
46b248e8d7 | |
|
011ce4baa0 | |
|
72aa23946c | |
|
9315ca6a32 | |
|
5a59fc8b57 | |
|
d55bd445cc | |
|
322ce6ac15 | |
|
e461c2ccfb | |
|
de16638494 | |
|
e487a8bbe8 | |
|
23f1c9745a | |
|
dc9d6828f6 | |
|
62360803d5 | |
|
60ddd29baf | |
|
e579e7edc9 | |
|
ba1a554bc9 | |
|
408227b35b | |
|
9e4a36b023 | |
|
85d1b53d45 | |
|
9f820017d4 | |
|
77735c391b | |
|
5cc500f828 | |
|
fd943ff40c | |
|
4f48d5a7fc | |
|
dd4cc54ace | |
|
8b11bdd954 | |
|
eb5db1a0bb | |
|
c0dfdf41e0 | |
|
040b8db1ae | |
|
fd366b4520 | |
|
324e04c6b4 | |
|
5ba9ed5a56 | |
|
71a881192e | |
|
91ca88f62a | |
|
9817aeabda | |
|
72c95e1380 | |
|
411ec5d9d5 | |
|
c080172cff | |
|
2f74559b2a | |
|
da5f948b97 | |
|
fac746e26b | |
|
8fca023ac6 | |
|
70810357ad | |
|
81ace380d7 | |
|
c1d14d3343 | |
|
6a555842dd | |
|
2701084419 | |
|
0c1981458d | |
|
7879b8c9d6 | |
|
dd4bb671ab | |
|
a3c8d02a37 | |
|
e04035ba18 | |
|
4a46563f8e | |
|
101f194a2c | |
|
87b1727481 | |
|
b9919d9827 | |
|
4f2991fb12 | |
|
fbc6afe57e | |
|
dd2f800c6d | |
|
3922f389b6 | |
|
346706e294 | |
|
90419b1c10 | |
|
d258156bd3 | |
|
d90488b433 | |
|
27fe925719 | |
|
633c2123b6 | |
|
dfb814089b | |
|
c92e90d094 | |
|
a540cb7b82 | |
|
4bcb74c9b5 | |
|
6113be13e6 | |
|
32d615b807 | |
|
f008562f30 | |
|
f1f29dcab4 | |
|
4f2cf16ebe | |
|
7da78d6e97 | |
|
c3c551fba3 | |
|
76a783351a | |
|
e93dc33147 | |
|
c5adecf0fd | |
|
5fd647dcc1 | |
|
4497aa9bf1 | |
|
62f13cfb32 | |
|
c4b1a485be | |
|
a2767f276c | |
|
9b90cdc63c | |
|
61ccadf75e | |
|
f65216e2ad | |
|
2e522e334e | |
|
c7d0895672 | |
|
77d2e073fd | |
|
f453b936d8 | |
|
7617cba81b | |
|
190dff92ca | |
|
3b7e7a3d4c | |
|
d5cfd7567b | |
|
ec96286b2b | |
|
8748dce446 | |
|
ac0050333d | |
|
095d036c2c | |
|
a08f3ebdf4 | |
|
294a93d831 | |
|
25c9ae85e2 | |
|
cc490189e2 | |
|
2a887360dc | |
|
687be91ccf | |
|
1fcb8adb27 | |
|
67e7638bd4 | |
|
37c24b15d4 | |
|
08eec06dd1 | |
|
b3d1c4a439 | |
|
1c15ff9a5a | |
|
271d3e18ec | |
|
34f28649f7 | |
|
c9acf01c84 | |
|
da635bf4de | |
|
f4457cc3bd | |
|
841b014684 | |
|
d4cde19765 | |
|
11d602e788 | |
|
f302106ee9 | |
|
ed539be43a | |
|
fe7ca01f35 | |
|
36d9e56014 | |
|
117fcf22ad | |
|
7aa2a64698 | |
|
2577b442ea | |
|
3ac2eda9ec | |
|
b67143c61b | |
|
56f89ef797 | |
|
14d4cb60bc | |
|
22819d9463 | |
|
b73b82b9ad | |
|
9b84aab235 | |
|
adf0956a52 | |
|
3309dcc6ca | |
|
fe596d70e5 | |
|
27e3582b47 | |
|
fc3937b940 | |
|
38ad1ecb50 | |
|
0b42a49e68 | |
|
75f8e9cb44 | |
|
2462fd45c9 | |
|
9e33d21caf | |
|
79c9df99ca | |
|
61adf33b0c | |
|
b51c5f9e4b | |
|
11bc174170 | |
|
a09132c7b8 | |
|
8426de364c | |
|
1e16926745 | |
|
904d260d77 | |
|
3f7d030745 | |
|
b255e00be9 | |
|
ad18490797 | |
|
4827ae21f0 | |
|
6885c783de | |
|
946c1a17d1 | |
|
b588898c6b | |
|
ad5d287443 | |
|
257920c123 | |
|
a90bc617b0 | |
|
f92458eb84 | |
|
de5660f146 | |
|
a537f9b6f9 | |
|
080041335f | |
|
970c9ed62e | |
|
0f8870ecdd | |
|
35c6de70dd | |
|
7ceee79482 | |
|
f0c9efd731 | |
|
36db452da0 | |
|
cddc9b9a43 | |
|
dd1c4e137d | |
|
0f85fa9d62 | |
|
7227b3bd04 | |
|
8c5b9ba15b | |
|
209062ed95 | |
|
5e896feeae | |
|
218612eae5 | |
|
aafdf3c5ef | |
|
83d69bbe1f | |
|
5a474fcb36 | |
|
d16b743f94 | |
|
8e858e8676 | |
|
abe415c679 | |
|
b40e82fd13 | |
|
bc3a420574 | |
|
3a4427b452 | |
|
fd0825b559 | |
|
f76b3e6a79 | |
|
86a6f79950 | |
|
46f9ad01df | |
|
e2140ef330 | |
|
8b499fd19d | |
|
bf167f713b | |
|
7138ed9533 | |
|
47d77ea441 | |
|
e43c68ee5e | |
|
1d6f7f4055 | |
|
05a81322b2 | |
|
8f2465970f | |
|
41a52a0713 | |
|
036d00c4cd | |
|
0766151ace | |
|
63c5c6e6a2 | |
|
0bf824f4f1 | |
|
3480edd303 | |
|
9dd3dbcefa | |
|
f4e9cd93d1 | |
|
0dd0a6815b | |
|
397d7f58ee | |
|
7f9c331cb3 | |
|
ceb7a0291a | |
|
2dba55f289 | |
|
9337c64e20 | |
|
f30916c9bd | |
|
1831e44125 | |
|
f2cca3104c | |
|
e84bde3965 | |
|
8dc90adc9a | |
|
8dcbed60c7 | |
|
3f3687bb03 | |
|
c8b140ab73 | |
|
c5ca55f818 | |
|
811bd0f19c | |
|
3f7e6655ba | |
|
858d3bddb8 | |
|
c3ac0f4972 | |
|
80c37b7b8d | |
|
e5fe6ca2fb | |
|
dee8ab64d4 | |
|
322ee31c36 | |
|
880053c88f | |
|
a7e916f748 | |
|
eb421a1ba0 | |
|
5e2f70067e | |
|
b62429efd7 | |
|
27587be7fa | |
|
79c6b3d350 | |
|
78787a3c2f | |
|
e2f968771f | |
|
0880d8e670 | |
|
c808922cf0 | |
|
a2a8e32baf | |
|
1af518a16f | |
|
2324097bc3 | |
|
aabeb661a7 | |
|
ba20c05e29 | |
|
78886b642b | |
|
19f5009143 | |
|
c143b70c10 | |
|
bb8a894c37 | |
|
3c77d7f773 | |
|
038608425d | |
|
a14a8535e0 | |
|
77d86a06a1 | |
|
0c5c2f12ac | |
|
5a8a481848 | |
|
4ee9527dc8 | |
|
5c4d64fe74 | |
|
d7df44e82a | |
|
5981f5cd9a | |
|
34742312b7 | |
|
79dc3aefd2 | |
|
539f0817d0 | |
|
ae3507e7a5 | |
|
18c9b9f037 | |
|
0bdce4778e | |
|
4f069b9867 | |
|
951f092bac | |
|
1ef63ec511 | |
|
0e46fe74d6 | |
|
ec4bb16708 | |
|
2aaa548248 | |
|
ed11deb852 | |
|
9e53a47b65 | |
|
837db54b3e | |
|
7de4e60927 | |
|
141da70753 | |
|
2bedc78712 | |
|
8bd6de23a2 | |
|
bf3d0c140c | |
|
ce1a9c4525 |
|
@ -0,0 +1 @@
|
||||||
|
out
|
|
@ -2,12 +2,14 @@
|
||||||
name: Bug
|
name: Bug
|
||||||
about: Bug report
|
about: Bug report
|
||||||
title: ''
|
title: ''
|
||||||
labels: status/triage, type/bug
|
labels: type/bug, status/triage
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Summary
|
### Summary
|
||||||
<!--- Please provide a general summary of the issue. -->
|
<!-- Please provide a general summary of the issue. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -15,17 +17,20 @@ assignees: ''
|
||||||
### Reproduction
|
### Reproduction
|
||||||
|
|
||||||
##### Steps
|
##### Steps
|
||||||
<!--- What steps should be taken to reproduce the issue? -->
|
<!-- What steps should be taken to reproduce the issue? -->
|
||||||
|
|
||||||
1.
|
1.
|
||||||
2.
|
2.
|
||||||
3.
|
3.
|
||||||
|
|
||||||
##### Current behavior
|
##### Current behavior
|
||||||
<!--- What happened? Logs, etc. could go here. -->
|
<!-- What happened? Logs, etc. could go here. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### Expected behavior
|
||||||
|
<!-- What did you expect to happen? -->
|
||||||
|
|
||||||
##### Expected
|
|
||||||
<!--- What did you expect to happen? -->
|
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -33,10 +38,15 @@ assignees: ''
|
||||||
### Context
|
### Context
|
||||||
|
|
||||||
##### lifecycle version
|
##### lifecycle version
|
||||||
<!--- If you can find this, it helps us pin down the issue. For example, run `pack inspect-builder BUILDER` which should report the lifecycle version in question. -->
|
<!-- If you can find this, it helps us pin down the issue. For example, run `pack builder inspect <builder name>` which should report the lifecycle version in question. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
##### platform version(s)
|
##### platform version(s)
|
||||||
<!--- For example run `pack report` and `docker info` and copy output here. -->
|
<!-- For example run `pack report` and `docker info` and copy output here, redacting any sensitive information. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
##### anything else?
|
##### anything else?
|
||||||
<!--- Tekton task version, kpack version, etc. -->
|
<!-- Add any other context that may help (e.g., Tekton task version, kpack version, etc.). -->
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
name: Chore
|
||||||
|
about: Suggest a chore that will help contributors and doesn't affect end users
|
||||||
|
title: ''
|
||||||
|
labels: type/chore, status/triage
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Summary
|
||||||
|
<!-- Please describe why this chore matters, who will enjoy it and how. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Proposal
|
||||||
|
<!-- How do you think the chore should be implemented? -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Context
|
||||||
|
<!-- Add any other context that may help. -->
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest a new feature or an improvement to existing functionality
|
||||||
|
title: ''
|
||||||
|
labels: type/enhancement, status/triage
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Summary
|
||||||
|
<!-- Please describe the feature and why it matters. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Proposal
|
||||||
|
<!-- How do you think the feature should be implemented? -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Related
|
||||||
|
<!-- If this feature addresses an RFC, please provide the RFC number below. -->
|
||||||
|
|
||||||
|
RFC #___
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Context
|
||||||
|
<!-- Add any other context that may help. -->
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
groups:
|
||||||
|
# Group all minor/patch go dependencies into a single PR.
|
||||||
|
go-dependencies:
|
||||||
|
update-types:
|
||||||
|
- "minor"
|
||||||
|
- "patch"
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
|
@ -0,0 +1,25 @@
|
||||||
|
<!-- 🎉🎉🎉 Thank you for the PR!!! 🎉🎉🎉 -->
|
||||||
|
|
||||||
|
### Summary
|
||||||
|
<!-- Please describe your changes at a high level. -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### Release notes
|
||||||
|
<!-- Please provide 1-2 sentences for release notes. -->
|
||||||
|
<!-- Example: When using platform API `0.7` or greater, the `creator` logs the expected phase header for the analyze phase -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Related
|
||||||
|
<!-- If this PR addresses an issue, please provide the issue number below. -->
|
||||||
|
|
||||||
|
Resolves #___
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Context
|
||||||
|
<!-- Add any other context that may help reviewers (e.g., code that requires special attention, etc.). -->
|
||||||
|
|
|
@ -3,38 +3,227 @@ name: build
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- main
|
||||||
|
- 'release/**'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- main
|
||||||
|
- 'release/**'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-and-build:
|
test-linux-amd64:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Set up go
|
|
||||||
uses: actions/setup-go@v2-beta
|
|
||||||
with:
|
with:
|
||||||
go-version: '1.14'
|
fetch-depth: '0'
|
||||||
- name: Set up go env
|
- name: Setup go
|
||||||
run: |
|
uses: actions/setup-go@v5
|
||||||
echo "::set-env name=GOPATH::$(go env GOPATH)"
|
with:
|
||||||
echo "::add-path::$(go env GOPATH)/bin"
|
check-latest: true
|
||||||
shell: bash
|
go-version-file: 'go.mod'
|
||||||
- name: Install jq
|
- name: Install jq
|
||||||
run: |
|
run: |
|
||||||
mkdir -p deps/bin
|
mkdir -p deps/bin
|
||||||
curl -s -L -o deps/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
|
curl -s -L -o deps/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
|
||||||
chmod +x deps/bin/jq
|
chmod +x deps/bin/jq
|
||||||
echo "::add-path::${PWD}/deps/bin"
|
echo "${PWD}/deps/bin" >> $GITHUB_PATH
|
||||||
- name: Test
|
- name: Test
|
||||||
|
env:
|
||||||
|
TEST_COVERAGE: 1
|
||||||
run: make test
|
run: make test
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
file: ./out/tests/coverage-unit.txt
|
||||||
|
flags: unit,os_linux
|
||||||
|
fail_ci_if_error: true
|
||||||
|
verbose: true
|
||||||
|
test-linux-arm64:
|
||||||
|
runs-on: linux-arm64
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: '0'
|
||||||
|
- name: Setup go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
check-latest: true
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
make format || true
|
||||||
|
make test
|
||||||
|
build-and-publish:
|
||||||
|
needs:
|
||||||
|
- test-linux-amd64
|
||||||
|
- test-linux-arm64
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # fetch all history for all branches and tags
|
||||||
|
- name: Setup go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
check-latest: true
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- name: Install Cosign
|
||||||
|
uses: sigstore/cosign-installer@v3
|
||||||
|
- name: Set version
|
||||||
|
run: |
|
||||||
|
echo "LIFECYCLE_VERSION=$(go run tools/version/main.go)" | tee -a $GITHUB_ENV version.txt
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: version
|
||||||
|
path: version.txt
|
||||||
|
- name: Set tag
|
||||||
|
run: |
|
||||||
|
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> tag.txt
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: tag
|
||||||
|
path: tag.txt
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
|
make clean
|
||||||
make build
|
make build
|
||||||
make package
|
make package
|
||||||
- uses: actions/upload-artifact@v2
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: lifecycle-linux-x86-64
|
name: lifecycle-linux-x86-64
|
||||||
path: out/lifecycle-v*+linux.x86-64.tgz
|
path: out/lifecycle-v*+linux.x86-64.tgz
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-x86-64-sha256
|
||||||
|
path: out/lifecycle-v*+linux.x86-64.tgz.sha256
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-arm64
|
||||||
|
path: out/lifecycle-v*+linux.arm64.tgz
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-arm64-sha256
|
||||||
|
path: out/lifecycle-v*+linux.arm64.tgz.sha256
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-ppc64le
|
||||||
|
path: out/lifecycle-v*+linux.ppc64le.tgz
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-ppc64le-sha256
|
||||||
|
path: out/lifecycle-v*+linux.ppc64le.tgz.sha256
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-s390x
|
||||||
|
path: out/lifecycle-v*+linux.s390x.tgz
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-s390x-sha256
|
||||||
|
path: out/lifecycle-v*+linux.s390x.tgz.sha256
|
||||||
|
- name: Generate SBOM JSON
|
||||||
|
uses: CycloneDX/gh-gomod-generate-sbom@v2
|
||||||
|
with:
|
||||||
|
args: mod -licenses -json -output lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json
|
||||||
|
version: ^v1
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-bom-cdx
|
||||||
|
path: lifecycle-v*-bom.cdx.json
|
||||||
|
- name: Calculate SBOM sha
|
||||||
|
run: |
|
||||||
|
shasum -a 256 lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json > lifecycle-v${{ env.LIFECYCLE_VERSION }}-bom.cdx.json.sha256
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lifecycle-bom-cdx-sha256
|
||||||
|
path: lifecycle-v*-bom.cdx.json.sha256
|
||||||
|
- uses: azure/docker-login@v2
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
- uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
name: tag
|
||||||
|
- name: Set env
|
||||||
|
run: |
|
||||||
|
cat tag.txt >> $GITHUB_ENV
|
||||||
|
- name: Publish images
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
run: |
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)
|
||||||
|
|
||||||
|
LINUX_AMD64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.x86-64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64 | awk '{print $NF}')
|
||||||
|
echo "LINUX_AMD64_SHA: $LINUX_AMD64_SHA"
|
||||||
|
|
||||||
|
LINUX_ARM64_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.arm64.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64 -arch arm64 | awk '{print $NF}')
|
||||||
|
echo "LINUX_ARM64_SHA: $LINUX_ARM64_SHA"
|
||||||
|
|
||||||
|
LINUX_PPC64LE_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.ppc64le.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le -arch ppc64le | awk '{print $NF}')
|
||||||
|
echo "LINUX_PPC64LE_SHA: LINUX_PPC64LE_SHA"
|
||||||
|
|
||||||
|
LINUX_S390X_SHA=$(go run ./tools/image/main.go -lifecyclePath ./out/lifecycle-v*+linux.s390x.tgz -tag buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x -arch s390x | awk '{print $NF}')
|
||||||
|
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
|
||||||
|
|
||||||
|
docker manifest create buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG} \
|
||||||
|
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-x86-64@${LINUX_AMD64_SHA} \
|
||||||
|
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-arm64@${LINUX_ARM64_SHA} \
|
||||||
|
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-ppc64le@${LINUX_PPC64LE_SHA} \
|
||||||
|
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}-linux-s390x@${LINUX_S390X_SHA}
|
||||||
|
|
||||||
|
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG})
|
||||||
|
echo "MANIFEST_SHA: $MANIFEST_SHA"
|
||||||
|
|
||||||
|
cosign sign -r -y \
|
||||||
|
-a tag=${LIFECYCLE_IMAGE_TAG} \
|
||||||
|
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}@${MANIFEST_SHA}
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
-a tag=${LIFECYCLE_IMAGE_TAG} \
|
||||||
|
buildpacksio/lifecycle:${LIFECYCLE_IMAGE_TAG}
|
||||||
|
- name: Scan image
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
uses: anchore/scan-action@v6
|
||||||
|
with:
|
||||||
|
image: buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}
|
||||||
|
pack-acceptance-linux:
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
needs: build-and-publish
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: 'buildpacks/pack'
|
||||||
|
path: 'pack'
|
||||||
|
ref: 'main'
|
||||||
|
fetch-depth: 0 # fetch all history for all branches and tags
|
||||||
|
- name: Setup go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: 'pack/go.mod'
|
||||||
|
- uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
name: version
|
||||||
|
- uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
name: tag
|
||||||
|
- name: Set env
|
||||||
|
run: |
|
||||||
|
cat version.txt >> $GITHUB_ENV
|
||||||
|
cat tag.txt >> $GITHUB_ENV
|
||||||
|
- uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
name: lifecycle-linux-x86-64
|
||||||
|
path: pack
|
||||||
|
- name: Run pack acceptance
|
||||||
|
run: |
|
||||||
|
cd pack
|
||||||
|
git checkout $(git describe --abbrev=0 --tags) # check out the latest tag
|
||||||
|
LIFECYCLE_PATH="../lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz" \
|
||||||
|
LIFECYCLE_IMAGE="buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}" \
|
||||||
|
make acceptance
|
||||||
|
|
|
@ -0,0 +1,127 @@
|
||||||
|
name: check-latest-release
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: 0 2 * * 1,4
|
||||||
|
workflow_dispatch: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-release:
|
||||||
|
runs-on:
|
||||||
|
- ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
check-latest: true
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- name: Get previous release tag
|
||||||
|
id: get-previous-release-tag
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
return github.rest.repos.getLatestRelease({
|
||||||
|
owner: "buildpacks",
|
||||||
|
repo: "lifecycle",
|
||||||
|
}).then(result => {
|
||||||
|
return result.data.tag_name
|
||||||
|
})
|
||||||
|
- name: Read go and release versions
|
||||||
|
id: read-versions
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
LATEST_GO_VERSION=$(go version | cut -d ' ' -f 3)
|
||||||
|
|
||||||
|
LATEST_RELEASE_VERSION=${{ steps.get-previous-release-tag.outputs.result }}
|
||||||
|
|
||||||
|
wget https://github.com/buildpacks/lifecycle/releases/download/$LATEST_RELEASE_VERSION/lifecycle-$LATEST_RELEASE_VERSION+linux.x86-64.tgz -O lifecycle.tgz
|
||||||
|
tar xzf lifecycle.tgz
|
||||||
|
LATEST_RELEASE_GO_VERSION=$(go version ./lifecycle/lifecycle | cut -d ' ' -f 2)
|
||||||
|
|
||||||
|
echo "latest-go-version=${LATEST_GO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "latest-release-go-version=${LATEST_RELEASE_GO_VERSION}" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
LATEST_RELEASE_VERSION=$(echo $LATEST_RELEASE_VERSION | cut -d \v -f 2)
|
||||||
|
echo "latest-release-version=${LATEST_RELEASE_VERSION}" >> "$GITHUB_OUTPUT"
|
||||||
|
- name: Create issue if needed
|
||||||
|
if: ${{ steps.read-versions.outputs.latest-go-version != steps.read-versions.outputs.latest-release-go-version }}
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
title="Upgrade lifecycle to ${{ steps.read-versions.outputs.latest-go-version }}"
|
||||||
|
label=${{ steps.read-versions.outputs.latest-go-version }}
|
||||||
|
|
||||||
|
# Create label to use for exact search
|
||||||
|
gh label create "$label" || true
|
||||||
|
|
||||||
|
search_output=$(gh issue list --search "$title" --label "$label")
|
||||||
|
|
||||||
|
body="Latest lifecycle release v${{ steps.read-versions.outputs.latest-release-version }} is built with Go version ${{ steps.read-versions.outputs.latest-release-go-version }}; newer version ${{ steps.read-versions.outputs.latest-go-version }} is available."
|
||||||
|
|
||||||
|
if [ -z "${search_output// }" ]
|
||||||
|
then
|
||||||
|
echo "No issues matched search; creating new issue..."
|
||||||
|
gh issue create \
|
||||||
|
--label "type/bug" \
|
||||||
|
--label "status/triage" \
|
||||||
|
--label "$label" \
|
||||||
|
--title "$title" \
|
||||||
|
--body "$body"
|
||||||
|
else
|
||||||
|
echo "Found matching issues:"
|
||||||
|
echo $search_output
|
||||||
|
fi
|
||||||
|
- name: Scan latest release image
|
||||||
|
id: scan-image
|
||||||
|
uses: anchore/scan-action@v6
|
||||||
|
with:
|
||||||
|
image: buildpacksio/lifecycle:${{ steps.read-versions.outputs.latest-release-version }}
|
||||||
|
fail-build: true
|
||||||
|
severity-cutoff: medium
|
||||||
|
output-format: json
|
||||||
|
- name: Create issue if needed
|
||||||
|
if: failure() && steps.scan-image.outcome == 'failure'
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
title="CVE(s) found in v${{ steps.read-versions.outputs.latest-release-version }}"
|
||||||
|
label=cve
|
||||||
|
|
||||||
|
# Create label to use for exact search
|
||||||
|
gh label create "$label" || true
|
||||||
|
|
||||||
|
search_output=$(gh issue list --search "$title" --label "$label")
|
||||||
|
|
||||||
|
GITHUB_WORKFLOW_URL=https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID
|
||||||
|
body="Latest lifecycle release v${{ steps.read-versions.outputs.latest-release-version }} triggered CVE(s) from Grype. For further details, see: $GITHUB_WORKFLOW_URL json: $(cat ${{ steps.scan-image.outputs.json }} | jq '.matches[] | .vulnerability | {id, severity, description}' )"
|
||||||
|
|
||||||
|
if [ -z "${search_output// }" ]
|
||||||
|
then
|
||||||
|
echo "No issues matched search; creating new issue..."
|
||||||
|
gh issue create \
|
||||||
|
--label "type/bug" \
|
||||||
|
--label "status/triage" \
|
||||||
|
--label "$label" \
|
||||||
|
--title "$title" \
|
||||||
|
--body "$body"
|
||||||
|
else
|
||||||
|
echo "Found matching issues:"
|
||||||
|
echo $search_output
|
||||||
|
fi
|
|
@ -0,0 +1,209 @@
|
||||||
|
name: draft-release
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
draft-release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install jq
|
||||||
|
run: |
|
||||||
|
mkdir -p deps/bin
|
||||||
|
curl -s -L -o deps/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
|
||||||
|
chmod +x deps/bin/jq
|
||||||
|
echo "${PWD}/deps/bin" >> $GITHUB_PATH
|
||||||
|
- name: Derive lifecycle version from branch name
|
||||||
|
run: |
|
||||||
|
[[ $GITHUB_REF =~ ^refs\/heads\/release/(.*)$ ]] && version=${BASH_REMATCH[1]}
|
||||||
|
if [[ -z "${version}" ]]; then
|
||||||
|
echo "lifecycle version not detected."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "LIFECYCLE_VERSION=$version" >> $GITHUB_ENV
|
||||||
|
- name: Determine download urls for linux-x86-64, linux-arm64, linux-ppc64le, linux-s390x
|
||||||
|
id: artifact-urls
|
||||||
|
# FIXME: this script should be updated to work with actions/github-script@v6
|
||||||
|
uses: actions/github-script@v3
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
script: |
|
||||||
|
return github.actions
|
||||||
|
.listRepoWorkflows({
|
||||||
|
owner: "${{ github.repository_owner }}",
|
||||||
|
repo: "lifecycle",
|
||||||
|
})
|
||||||
|
.then(workflows_result => {
|
||||||
|
let workflows = workflows_result.data.workflows
|
||||||
|
.filter(a => a.name === "build" && a.state === "active")
|
||||||
|
.map(a => a.id);
|
||||||
|
if (workflows.length === 0) {
|
||||||
|
throw "no active workflows found with name build"
|
||||||
|
}
|
||||||
|
return workflows[0]
|
||||||
|
})
|
||||||
|
.then(workflow_id => {
|
||||||
|
return github.actions.listWorkflowRunsForRepo({
|
||||||
|
owner: "${{ github.repository_owner }}",
|
||||||
|
repo: "lifecycle",
|
||||||
|
workflow_id: workflow_id,
|
||||||
|
branch: "release/${{ env.LIFECYCLE_VERSION }}",
|
||||||
|
event: "push"
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.then(workflow_runs_result => {
|
||||||
|
let workflow_runs = workflow_runs_result.data.workflow_runs
|
||||||
|
.filter(run => run.conclusion === "success")
|
||||||
|
.filter(run => run.head_sha === "${{ github.sha }}");
|
||||||
|
if (workflow_runs.length === 0) {
|
||||||
|
throw "no successful workflow runs found for commit"
|
||||||
|
}
|
||||||
|
return workflow_runs[0].id
|
||||||
|
})
|
||||||
|
.then(workflow_runid => {
|
||||||
|
return github.actions.listWorkflowRunArtifacts({
|
||||||
|
owner: "${{ github.repository_owner }}",
|
||||||
|
repo: "lifecycle",
|
||||||
|
run_id: workflow_runid
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.then(artifacts_result => {
|
||||||
|
let tuples = artifacts_result.data.artifacts
|
||||||
|
.map(artifact => [artifact.name, artifact.archive_download_url]);
|
||||||
|
let urlList = new Array();
|
||||||
|
tuples.forEach(function(tuple) {
|
||||||
|
if (tuple[0].includes("lifecycle-")) {
|
||||||
|
urlList.push(tuple[1]);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if (urlList.length === 0) {
|
||||||
|
throw "no artifacts found"
|
||||||
|
}
|
||||||
|
if (urlList.length != 10) {
|
||||||
|
// found too many artifacts
|
||||||
|
// list them and throw
|
||||||
|
console.log(urlList);
|
||||||
|
throw "there should be exactly 10 artifacts, found " + urlList.length + " artifacts"
|
||||||
|
}
|
||||||
|
return urlList.join(",")
|
||||||
|
})
|
||||||
|
- name: Download artifacts
|
||||||
|
run: |
|
||||||
|
mkdir artifacts
|
||||||
|
echo "ARTIFACTS_PATH=$PWD/artifacts" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
urls=$(echo '${{ steps.artifact-urls.outputs.result }}' | jq -r . )
|
||||||
|
|
||||||
|
for url in $(echo $urls | tr "," "\n"); do
|
||||||
|
curl -sL -w 'RESP_CODE:%{response_code}\n' \
|
||||||
|
--header 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
|
||||||
|
-o tmp-artifact.zip $url
|
||||||
|
unzip -d artifacts tmp-artifact.zip
|
||||||
|
rm tmp-artifact.zip
|
||||||
|
done
|
||||||
|
- name: Combine checksums
|
||||||
|
run: |
|
||||||
|
cd ${{ env.ARTIFACTS_PATH }}
|
||||||
|
cat *.sha256 | sort > lifecycle-v${{ env.LIFECYCLE_VERSION }}-checksums.txt
|
||||||
|
rm *.sha256
|
||||||
|
- name: Set pre-release kind
|
||||||
|
if: "contains(env.LIFECYCLE_VERSION, 'rc') || contains(env.LIFECYCLE_VERSION, 'pre')" # e.g., 0.99.0-rc.1
|
||||||
|
run: |
|
||||||
|
echo "RELEASE_KIND=pre-release" >> $GITHUB_ENV
|
||||||
|
- name: Set release kind
|
||||||
|
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
|
||||||
|
run: |
|
||||||
|
echo "RELEASE_KIND=release" >> $GITHUB_ENV
|
||||||
|
- name: Get previous release tag
|
||||||
|
id: get-previous-release-tag
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
return github.rest.repos.getLatestRelease({
|
||||||
|
owner: "buildpacks",
|
||||||
|
repo: "lifecycle",
|
||||||
|
}).then(result => {
|
||||||
|
return result.data.tag_name
|
||||||
|
})
|
||||||
|
- name: Setup go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
check-latest: true
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- name: Get go version
|
||||||
|
id: get-go-version
|
||||||
|
run: |
|
||||||
|
mkdir tmp
|
||||||
|
tar xzvf ${{ env.ARTIFACTS_PATH }}/lifecycle-v${{ env.LIFECYCLE_VERSION }}+linux.x86-64.tgz -C tmp/
|
||||||
|
echo "GO_VERSION=$(go version tmp/lifecycle/lifecycle | cut -d ' ' -f 2 | sed -e 's/^go//')" >> $GITHUB_ENV
|
||||||
|
- name: Set release body text
|
||||||
|
run: |
|
||||||
|
cat << EOF > body.txt
|
||||||
|
# lifecycle v${{ env.LIFECYCLE_VERSION }}
|
||||||
|
|
||||||
|
Welcome to v${{ env.LIFECYCLE_VERSION }}, a ${{ env.RELEASE_KIND }} of the Cloud Native Buildpacks Lifecycle.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
The lifecycle runs as a normal user in a series of unprivileged containers. To export images and cache image layers, it requires access to a Docker (compatible) daemon **or** an OCI registry.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
Extract the .tgz file and copy the lifecycle binaries into a [build image](https://github.com/buildpacks/spec/blob/main/platform.md#build-image). The build image can then be orchestrated by a platform implementation such as the [pack CLI](https://github.com/buildpack/pack) or [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks).
|
||||||
|
|
||||||
|
## Lifecycle Image
|
||||||
|
|
||||||
|
An OCI image containing the lifecycle binaries is available at buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* TODO
|
||||||
|
* Updates go to version ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
## Bugfixes
|
||||||
|
|
||||||
|
* TODO
|
||||||
|
|
||||||
|
## Chores
|
||||||
|
|
||||||
|
* TODO
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/buildpacks/lifecycle/compare/${{ steps.get-previous-release-tag.outputs.result }}...release/${{ env.LIFECYCLE_VERSION }}
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
We'd like to acknowledge that this release wouldn't be as good without the help of the following amazing contributors:
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
EOF
|
||||||
|
- name: Create pre-release
|
||||||
|
if: "contains(env.LIFECYCLE_VERSION, 'rc') || contains(env.LIFECYCLE_VERSION, 'pre')" # e.g., 0.99.0-rc.1
|
||||||
|
run: |
|
||||||
|
cd ${{ env.ARTIFACTS_PATH }}
|
||||||
|
gh release create v${{ env.LIFECYCLE_VERSION }} \
|
||||||
|
$(ls | sort | paste -sd " " -) \
|
||||||
|
--draft \
|
||||||
|
--notes-file ../body.txt \
|
||||||
|
--prerelease \
|
||||||
|
--target $GITHUB_REF_NAME \
|
||||||
|
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Create release
|
||||||
|
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
|
||||||
|
run: |
|
||||||
|
cd ${{ env.ARTIFACTS_PATH }}
|
||||||
|
gh release create v${{ env.LIFECYCLE_VERSION }} \
|
||||||
|
$(ls | sort | paste -sd " " -) \
|
||||||
|
--draft \
|
||||||
|
--notes-file ../body.txt \
|
||||||
|
--target $GITHUB_REF_NAME \
|
||||||
|
--title "lifecycle v${{ env.LIFECYCLE_VERSION }}"
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -1,27 +0,0 @@
|
||||||
name: lifecycle-image
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types:
|
|
||||||
- published
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-lifecycle-image:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@master
|
|
||||||
- name: Get metadata
|
|
||||||
id: get_metadata
|
|
||||||
run: |
|
|
||||||
echo ::set-output name=LIFECYCLE_VERSION::`echo ${{ github.event.release.tag_name }} | cut -d "v" -f2`
|
|
||||||
echo ::set-output name=BUILDPACK_API::`cat Makefile | grep BUILDPACK_API?= | cut -d "=" -f2`
|
|
||||||
echo ::set-output name=PLATFORM_API::`cat Makefile | grep PLATFORM_API?= | cut -d "=" -f2`
|
|
||||||
- name: Publish to Registry
|
|
||||||
uses: elgohr/Publish-Docker-Github-Action@master
|
|
||||||
with:
|
|
||||||
name: buildpacksio/lifecycle
|
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
dockerfile: .github/workflows/lifecycle-image/Dockerfile
|
|
||||||
buildargs: LIFECYCLE_URI=https://github.com/buildpacks/lifecycle/releases/download/v${{ steps.get_metadata.outputs.LIFECYCLE_VERSION }}/lifecycle-v${{ steps.get_metadata.outputs.LIFECYCLE_VERSION }}+linux.x86-64.tgz,LIFECYCLE_VERSION=${{ steps.get_metadata.outputs.LIFECYCLE_VERSION }},BUILDPACK_API=${{ steps.get_metadata.outputs.BUILDPACK_API }},PLATFORM_API=${{ steps.get_metadata.outputs.PLATFORM_API }}
|
|
||||||
tags: "latest,${{ steps.get_metadata.outputs.LIFECYCLE_VERSION }}"
|
|
|
@ -1,24 +0,0 @@
|
||||||
# Stage 1
|
|
||||||
FROM ubuntu:bionic AS download-and-extract-lifecycle
|
|
||||||
|
|
||||||
ARG LIFECYCLE_URI
|
|
||||||
|
|
||||||
WORKDIR /tmp
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y wget
|
|
||||||
|
|
||||||
RUN wget $LIFECYCLE_URI -O lifecycle.tgz && tar xzvf lifecycle.tgz
|
|
||||||
|
|
||||||
|
|
||||||
# Stage 2
|
|
||||||
FROM gcr.io/distroless/static
|
|
||||||
|
|
||||||
ARG LIFECYCLE_VERSION
|
|
||||||
ARG BUILDPACK_API
|
|
||||||
ARG PLATFORM_API
|
|
||||||
|
|
||||||
COPY --from=download-and-extract-lifecycle /tmp/lifecycle /cnb/lifecycle
|
|
||||||
|
|
||||||
LABEL io.buildpacks.builder.metadata="{\"lifecycle\":{\"version\":\"$LIFECYCLE_VERSION\",\"api\":{\"buildpack\":\"$BUILDPACK_API\",\"platform\":\"$PLATFORM_API\"}}}"
|
|
||||||
|
|
||||||
WORKDIR /layers
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
name: post-release
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published # trigger for releases and pre-releases
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
retag-lifecycle-images:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
check-latest: true
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- name: Install crane
|
||||||
|
run: |
|
||||||
|
go install github.com/google/go-containerregistry/cmd/crane@latest
|
||||||
|
- name: Install cosign
|
||||||
|
uses: sigstore/cosign-installer@v3
|
||||||
|
- uses: azure/docker-login@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
- name: Set env
|
||||||
|
run: |
|
||||||
|
echo "LIFECYCLE_VERSION=$(echo ${{ github.event.release.tag_name }} | cut -d "v" -f2)" >> $GITHUB_ENV
|
||||||
|
echo "LIFECYCLE_IMAGE_TAG=$(git describe --always --abbrev=7)" >> $GITHUB_ENV
|
||||||
|
- name: Verify lifecycle images
|
||||||
|
run: |
|
||||||
|
LINUX_AMD64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64 | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||||
|
echo "LINUX_AMD64_SHA: $LINUX_AMD64_SHA"
|
||||||
|
echo "LINUX_AMD64_SHA=$LINUX_AMD64_SHA" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
LINUX_ARM64_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64 | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||||
|
echo "LINUX_ARM64_SHA: $LINUX_ARM64_SHA"
|
||||||
|
echo "LINUX_ARM64_SHA=$LINUX_ARM64_SHA" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
LINUX_PPC64LE_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||||
|
echo "LINUX_PPC64LE_SHA: $LINUX_PPC64LE_SHA"
|
||||||
|
echo "LINUX_PPC64LE_SHA=$LINUX_PPC64LE_SHA" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
LINUX_S390X_SHA=$(cosign verify --certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/build.yml" --certificate-oidc-issuer https://token.actions.githubusercontent.com buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x | jq -r .[0].critical.image.\"docker-manifest-digest\")
|
||||||
|
echo "LINUX_S390X_SHA: $LINUX_S390X_SHA"
|
||||||
|
echo "LINUX_S390X_SHA=$LINUX_S390X_SHA" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Download SBOM
|
||||||
|
run: |
|
||||||
|
gh release download --pattern '*-bom.cdx.json' ${{ github.event.release.tag_name }}
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Retag lifecycle images & create manifest list - semver
|
||||||
|
run: |
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-x86-64
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-arm64
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-ppc64le
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} ${{ env.LIFECYCLE_VERSION }}-linux-s390x
|
||||||
|
|
||||||
|
docker manifest create buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }} \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}-linux-s390x@${{ env.LINUX_S390X_SHA }}
|
||||||
|
|
||||||
|
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }})
|
||||||
|
echo "MANIFEST_SHA: $MANIFEST_SHA"
|
||||||
|
|
||||||
|
cosign sign -r -y \
|
||||||
|
-a tag=${{ env.LIFECYCLE_VERSION }} \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
-a tag=${{ env.LIFECYCLE_VERSION }} \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||||
|
|
||||||
|
cosign attach sbom --sbom ./*-bom.cdx.json --type cyclonedx buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||||
|
cosign sign -r -y \
|
||||||
|
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}@${MANIFEST_SHA}
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||||
|
buildpacksio/lifecycle:${{ env.LIFECYCLE_VERSION }}
|
||||||
|
- name: Retag lifecycle images & create manifest list - latest
|
||||||
|
if: "!contains(env.LIFECYCLE_VERSION, 'rc') && !contains(env.LIFECYCLE_VERSION, 'pre')"
|
||||||
|
run: |
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-x86-64@${{ env.LINUX_AMD64_SHA }} latest-linux-x86-64
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-arm64@${{ env.LINUX_ARM64_SHA }} latest-linux-arm64
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} latest-linux-ppc64le
|
||||||
|
crane tag buildpacksio/lifecycle:${{ env.LIFECYCLE_IMAGE_TAG }}-linux-s390x@${{ env.LINUX_S390X_SHA }} latest-linux-s390x
|
||||||
|
|
||||||
|
docker manifest create buildpacksio/lifecycle:latest \
|
||||||
|
buildpacksio/lifecycle:latest-linux-x86-64@${{ env.LINUX_AMD64_SHA }} \
|
||||||
|
buildpacksio/lifecycle:latest-linux-arm64@${{ env.LINUX_ARM64_SHA }} \
|
||||||
|
buildpacksio/lifecycle:latest-linux-ppc64le@${{ env.LINUX_PPC64LE_SHA }} \
|
||||||
|
buildpacksio/lifecycle:latest-linux-s390x@${{ env.LINUX_S390X_SHA }}
|
||||||
|
|
||||||
|
MANIFEST_SHA=$(docker manifest push buildpacksio/lifecycle:latest)
|
||||||
|
echo "MANIFEST_SHA: $MANIFEST_SHA"
|
||||||
|
|
||||||
|
cosign sign -r -y \
|
||||||
|
-a tag=latest \
|
||||||
|
buildpacksio/lifecycle:latest@${MANIFEST_SHA}
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
-a tag=latest \
|
||||||
|
buildpacksio/lifecycle:latest
|
||||||
|
|
||||||
|
cosign attach sbom --sbom ./*-bom.cdx.json --type cyclonedx buildpacksio/lifecycle:latest
|
||||||
|
cosign sign -r -y \
|
||||||
|
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||||
|
buildpacksio/lifecycle:latest@${MANIFEST_SHA}
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/${{ github.repository_owner }}/lifecycle/.github/workflows/post-release.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
-a tag=${{ env.LIFECYCLE_VERSION }} --attachment sbom \
|
||||||
|
buildpacksio/lifecycle:latest
|
|
@ -0,0 +1,87 @@
|
||||||
|
name: test-s390x
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- 'release/**'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- 'release/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-linux-s390x:
|
||||||
|
if: (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release*')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
ZVSI_FP_NAME: bp-floating-ci-${{ github.run_id }}
|
||||||
|
ZVSI_INSTANCE_NAME: bp-zvsi-ci-${{ github.run_id }}
|
||||||
|
ZVSI_ZONE_NAME: ca-tor-1
|
||||||
|
ZVSI_PROFILE_NAME: bz2-4x16
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: install ibmcli and setup ibm login
|
||||||
|
run: |
|
||||||
|
curl -fsSL https://clis.cloud.ibm.com/install/linux | sh
|
||||||
|
ibmcloud login -q --apikey ${{ secrets.IBMCLOUD_API_KEY }} -r ca-tor
|
||||||
|
ibmcloud plugin install vpc-infrastructure
|
||||||
|
- name: Creation of ZVSI
|
||||||
|
id: ZVSI
|
||||||
|
run: |
|
||||||
|
#creation of zvsi
|
||||||
|
ibmcloud is instance-create $ZVSI_INSTANCE_NAME ${{ secrets.ZVSI_VPC }} $ZVSI_ZONE_NAME $ZVSI_PROFILE_NAME ${{ secrets.ZVSI_SUBNET }} --image ${{ secrets.ZVSI_IMAGE }} --keys ${{ secrets.ZVSI_KEY }} --resource-group-id ${{ secrets.ZVSI_RG_ID }} --primary-network-interface "{\"name\":\"eth0\",\"allow_ip_spoofing\":false,\"subnet\": {\"name\":\"${{ secrets.ZVSI_SUBNET }}\"},\"security_groups\":[{\"id\":\"${{ secrets.ZVSI_SG }}\"}]}"
|
||||||
|
#Reserving a floating ip to the ZVSI
|
||||||
|
ibmcloud is floating-ip-reserve $ZVSI_FP_NAME --zone $ZVSI_ZONE_NAME --resource-group-id ${{ secrets.ZVSI_RG_ID }} --in $ZVSI_INSTANCE_NAME
|
||||||
|
#Bouding the Floating ip to the ZVSI
|
||||||
|
ibmcloud is floating-ip-update $ZVSI_FP_NAME --nic eth0 --in $ZVSI_INSTANCE_NAME
|
||||||
|
sleep 60
|
||||||
|
#Saving the Floating IP to login ZVSI
|
||||||
|
ZVSI_HOST=$(ibmcloud is floating-ip $ZVSI_FP_NAME | awk '/Address/{print $2}')
|
||||||
|
echo $ZVSI_HOST
|
||||||
|
echo "IP=${ZVSI_HOST}" >> $GITHUB_OUTPUT
|
||||||
|
- name: Status of ZVSI
|
||||||
|
run: |
|
||||||
|
check=$(ibmcloud is ins| awk '/'$ZVSI_INSTANCE_NAME'/{print $3}')
|
||||||
|
while [[ $check != "running" ]]
|
||||||
|
do
|
||||||
|
check=$(ibmcloud is ins | awk '/'$ZVSI_INSTANCE_NAME'/{print $3}')
|
||||||
|
if [[ $check == 'failed' ]]
|
||||||
|
then
|
||||||
|
echo "Failed to run the ZVSI"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
- name: Install dependencies and run all tests on s390x ZVSI
|
||||||
|
uses: appleboy/ssh-action@v1.2.2
|
||||||
|
env:
|
||||||
|
GH_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
|
||||||
|
GH_REF: ${{ github.ref }}
|
||||||
|
with:
|
||||||
|
host: ${{ steps.ZVSI.outputs.IP }}
|
||||||
|
username: ${{ secrets.ZVSI_SSH_USER }}
|
||||||
|
key: ${{ secrets.ZVSI_PR_KEY }}
|
||||||
|
envs: GH_REPOSITORY,GH_REF
|
||||||
|
command_timeout: 100m
|
||||||
|
script: |
|
||||||
|
apt-get update -y
|
||||||
|
apt-get install -y wget curl git make gcc jq docker.io
|
||||||
|
wget https://go.dev/dl/go1.24.6.linux-s390x.tar.gz
|
||||||
|
rm -rf /usr/local/go && tar -C /usr/local -xzf go1.24.6.linux-s390x.tar.gz
|
||||||
|
export PATH=$PATH:/usr/local/go/bin
|
||||||
|
git clone ${GH_REPOSITORY} lifecycle
|
||||||
|
cd lifecycle && git checkout ${GH_REF}
|
||||||
|
go env
|
||||||
|
export PATH=$PATH:~/go/bin
|
||||||
|
make format || true
|
||||||
|
make test
|
||||||
|
- name: Cleanup ZVSI
|
||||||
|
if: ${{ steps.ZVSI.conclusion == 'success' && always() }}
|
||||||
|
run: |
|
||||||
|
#Delete the created ZVSI
|
||||||
|
ibmcloud is instance-delete $ZVSI_INSTANCE_NAME --force
|
||||||
|
sleep 20
|
||||||
|
#Release the created FP
|
||||||
|
ibmcloud is floating-ip-release $ZVSI_FP_NAME --force
|
|
@ -3,6 +3,15 @@
|
||||||
*.coverprofile
|
*.coverprofile
|
||||||
*.test
|
*.test
|
||||||
*~
|
*~
|
||||||
|
.tool-versions
|
||||||
/out
|
/out
|
||||||
|
.vscode
|
||||||
|
|
||||||
acceptance/testdata/*/container/cnb/lifecycle/*
|
acceptance/testdata/*/**/container/cnb/lifecycle/*
|
||||||
|
acceptance/testdata/*/**/container/docker-config/*
|
||||||
|
|
||||||
|
acceptance/testdata/exporter/container/cnb/run.toml
|
||||||
|
acceptance/testdata/exporter/container/layers/*analyzed.toml
|
||||||
|
acceptance/testdata/exporter/container/other_layers/*analyzed.toml
|
||||||
|
|
||||||
|
acceptance/testdata/restorer/container/layers/*analyzed.toml
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
# allow socket to be writable by all
|
||||||
|
# (necessary for acceptance tests / calls from within container)
|
||||||
|
- init: chmod ugo+w /var/run/docker.sock
|
||||||
|
# build linux to install dependencies
|
||||||
|
- init: make tidy build-linux
|
||||||
|
github:
|
||||||
|
prebuilds:
|
||||||
|
master: true
|
||||||
|
branches: true
|
||||||
|
pullRequests: true
|
||||||
|
pullRequestsFromForks: true
|
||||||
|
addCheck: true
|
||||||
|
|
||||||
|
vscode:
|
||||||
|
extensions:
|
||||||
|
- golang.go
|
||||||
|
- ms-azuretools.vscode-docker
|
|
@ -0,0 +1,5 @@
|
||||||
|
ignore:
|
||||||
|
- vulnerability: CVE-2015-5237 # false positive, see https://github.com/anchore/grype/issues/558
|
||||||
|
- vulnerability: CVE-2021-22570 # false positive, see https://github.com/anchore/grype/issues/558
|
||||||
|
- vulnerability: CVE-2024-41110 # non-impactful as we only use docker as a client
|
||||||
|
- vulnerability: GHSA-v23v-6jw2-98fq # non-impactful as we only use docker as a client
|
|
@ -0,0 +1,6 @@
|
||||||
|
{
|
||||||
|
"go.testTimeout": "10m",
|
||||||
|
"go.testFlags": [
|
||||||
|
"-v"
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
## Policies
|
||||||
|
|
||||||
|
This repository adheres to the following project policies:
|
||||||
|
|
||||||
|
- [Code of Conduct][code-of-conduct] - How we should act with each other.
|
||||||
|
- [Contributing][contributing] - General contributing standards.
|
||||||
|
- [Security][security] - Reporting security concerns.
|
||||||
|
- [Support][support] - Getting support.
|
||||||
|
|
||||||
|
## Contributing to this repository
|
||||||
|
|
||||||
|
### Welcome
|
||||||
|
|
||||||
|
We welcome contributions to this repository! To get a sense of what the team is currently focusing on, check out our [milestones](https://github.com/buildpacks/lifecycle/milestones). Issues labeled [good first issue](https://github.com/buildpacks/lifecycle/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and issues in our [docs repo](https://github.com/buildpacks/docs/issues?q=is%3Aissue+is%3Aopen+label%3Ateam%2Fimplementations) are great places to get started, but you are welcome to work on any issue that interests you. For issues requiring a greater degree of coordination, such as those labeled `status/needs-discussion` or that are part of larger epics, please reach out in the #implementation channel in [Slack](https://slack.buildpacks.io/).
|
||||||
|
|
||||||
|
### Development
|
||||||
|
|
||||||
|
Aside from the policies above, you may find [DEVELOPMENT.md](DEVELOPMENT.md) helpful in developing in this repository.
|
||||||
|
|
||||||
|
### Background
|
||||||
|
|
||||||
|
Here are some topics that might be helpful in further understanding the lifecycle:
|
||||||
|
|
||||||
|
* Cloud Native Buildpacks platform api spec
|
||||||
|
* Example platforms: [pack CLI](https://github.com/buildpack/pack), [Tekton](https://github.com/tektoncd/catalog/blob/master/task/buildpacks/0.1/README.md)
|
||||||
|
* Cloud Native Buildpacks buildpack api spec
|
||||||
|
* Example buildpack providers: [Google](https://github.com/GoogleCloudPlatform/buildpacks), [Heroku](https://www.heroku.com/), [Paketo](https://paketo.io/)
|
||||||
|
* The Open Container Initiative (OCI) and [OCI image spec](https://github.com/opencontainers/image-spec)
|
||||||
|
* Questions to deepen understanding:
|
||||||
|
* What are the different [lifecycle phases](https://buildpacks.io/docs/concepts/components/lifecycle/)? What is the purpose of each phase?
|
||||||
|
* What is a [builder](https://buildpacks.io/docs/concepts/components/builder/)? Is it required to run the lifecycle?
|
||||||
|
* What is the [untrusted builder workflow](https://medium.com/buildpacks/faster-more-secure-builds-with-pack-0-11-0-4d0c633ca619)? Why do we have this flow?
|
||||||
|
* What is the [launcher](https://github.com/buildpacks/spec/blob/main/platform.md#launch)? Why do we have a launcher?
|
||||||
|
* What does a [buildpack](https://buildpacks.io/docs/concepts/components/buildpack/) do? Where does it write data? How does it communicate with the lifecycle?
|
||||||
|
* What does a [platform](https://buildpacks.io/docs/concepts/components/platform/) do? What things does it know about that the lifecycle does not? How does it communicate with the lifecycle?
|
||||||
|
* What is a [stack](https://buildpacks.io/docs/concepts/components/stack/)? Who produces stacks? Why is the stack concept important for the lifecycle?
|
||||||
|
|
||||||
|
[code-of-conduct]: https://github.com/buildpacks/.github/blob/main/CODE_OF_CONDUCT.md
|
||||||
|
[contributing]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTING.md
|
||||||
|
[security]: https://github.com/buildpacks/.github/blob/main/SECURITY.md
|
||||||
|
[support]: https://github.com/buildpacks/.github/blob/main/SUPPORT.md
|
||||||
|
[pull-request-process]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTIONS.md#pull-request-process
|
|
@ -0,0 +1,105 @@
|
||||||
|
# Development
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
* [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||||
|
* macOS: _(built-in)_
|
||||||
|
* Windows:
|
||||||
|
* `choco install git -y`
|
||||||
|
* `git config --global core.autocrlf false`
|
||||||
|
* [Go](https://golang.org/doc/install)
|
||||||
|
* macOS: `brew install go`
|
||||||
|
* Windows: `choco install golang -y`
|
||||||
|
* [Docker](https://www.docker.com/products/docker-desktop)
|
||||||
|
* [jq](https://stedolan.github.io/jq/) and [yj](https://github.com/sclevine/yj) utilities
|
||||||
|
* macOS: `brew install jq yj`
|
||||||
|
* Windows:
|
||||||
|
* `choco insall jq -y`
|
||||||
|
* `go get github.com/sclevine/yj`
|
||||||
|
* Make (and build tools)
|
||||||
|
* macOS: `xcode-select --install`
|
||||||
|
* Windows:
|
||||||
|
* `choco install cygwin make -y`
|
||||||
|
* `[Environment]::SetEnvironmentVariable("PATH", "C:\tools\cygwin\bin;$ENV:PATH", "MACHINE")`
|
||||||
|
|
||||||
|
### Caveats
|
||||||
|
|
||||||
|
* The acceptance tests require the docker daemon to be able to communicate with a local containerized insecure registry. On Docker Desktop 3.3.x, this may result in failures such as: `Expected nil: push response: : Get http://localhost:<port>/v2/: dial tcp [::1]:<port>: connect: connection refused`. To fix these failures, it may be necessary to add the following to the Docker Desktop Engine config:
|
||||||
|
* macOS: Docker > Preferences > Docker Engine:
|
||||||
|
```
|
||||||
|
"insecure-registries": [
|
||||||
|
"<my-host-ip>/32"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing GitHub actions on forks
|
||||||
|
|
||||||
|
The lifecycle release process involves chaining a series of GitHub actions together such that:
|
||||||
|
* The "build" workflow creates the artifacts
|
||||||
|
* .tgz files containing the lifecycle binaries, shasums for the .tgz files, an SBOM, etc.
|
||||||
|
* OCI images containing the lifecycle binaries, tagged with their commit sha (for more information, see RELEASE.md)
|
||||||
|
* The "draft-release" workflow finds the artifacts and downloads them, creating the draft release
|
||||||
|
* The "post-release" workflow re-tags the OCI images that were created during the "build" workflow with the release version
|
||||||
|
|
||||||
|
It can be rather cumbersome to test changes to these workflows, as they are heavily intertwined. Thus we recommend forking the buildpacks/lifecycle repository in GitHub and running through the entire release process end-to-end.
|
||||||
|
For the fork, it is necessary to add the following secrets:
|
||||||
|
* DOCKER_PASSWORD (if not using ghcr.io)
|
||||||
|
* DOCKER_USERNAME (if not using ghcr.io)
|
||||||
|
|
||||||
|
The tools/test-fork.sh script can be used to update the source code to reflect the state of the fork.
|
||||||
|
It can be invoked like so: `./tools/test-fork.sh <registry repo name>`
|
||||||
|
|
||||||
|
## Tasks
|
||||||
|
|
||||||
|
To test, build, and package binaries into an archive, simply run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ make all
|
||||||
|
```
|
||||||
|
This will create archives at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz` and `out/lifecycle-<LIFECYCLE_VERSION>+windows.x86-64.tgz`.
|
||||||
|
|
||||||
|
`LIFECYCLE_VERSION` defaults to the value returned by `git describe --tags` if not on a release branch (for more information about the release process, see [RELEASE](RELEASE.md)). It can be changed by prepending `LIFECYCLE_VERSION=<some version>` to the
|
||||||
|
`make` command. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ LIFECYCLE_VERSION=1.2.3 make all
|
||||||
|
```
|
||||||
|
|
||||||
|
Steps can also be run individually as shown below.
|
||||||
|
|
||||||
|
### Test
|
||||||
|
|
||||||
|
Formats, vets, and tests the code.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ make test
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Mocks
|
||||||
|
|
||||||
|
We use mock generators like most golang projects to help with our testing. To make new mocks:
|
||||||
|
```bash
|
||||||
|
$ make generate
|
||||||
|
$ make format lint
|
||||||
|
```
|
||||||
|
|
||||||
|
This is because the mock generator will make a larger diff that the formatter will fix.
|
||||||
|
|
||||||
|
### Build
|
||||||
|
|
||||||
|
Builds binaries to `out/linux/lifecycle/` and `out/windows/lifecycle/`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ make build
|
||||||
|
```
|
||||||
|
|
||||||
|
> To clean the `out/` directory, run `make clean`.
|
||||||
|
|
||||||
|
### Package
|
||||||
|
|
||||||
|
Creates archives at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz` and `out/lifecycle-<LIFECYCLE_VERSION>+windows.x86-64.tgz`, using the contents of the
|
||||||
|
`out/linux/lifecycle/` directory, for the given (or default) `LIFECYCLE_VERSION`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ make package
|
||||||
|
```
|
|
@ -0,0 +1,44 @@
|
||||||
|
# Quick reference
|
||||||
|
|
||||||
|
This image is maintained by the [Cloud Native Buildpacks project](https://buildpacks.io/). The maintainers can be contacted via the [Cloud Native Buildpacks Slack](https://slack.buildpacks.io/), or by opening an issue on the `buildpacks/lifecycle` [GitHub repo](https://github.com/buildpacks/lifecycle).
|
||||||
|
|
||||||
|
# Supported tags
|
||||||
|
|
||||||
|
Supported tags are semver-versioned manifest lists - e.g., `0.12.0` or `0.12.0-rc.1`, pointing to one of the following os/architectures:
|
||||||
|
* `linux/amd64`
|
||||||
|
* `linux/arm64`
|
||||||
|
|
||||||
|
# About this image
|
||||||
|
|
||||||
|
Images are built in [GitHub actions](https://github.com/buildpacks/lifecycle/actions) and signed with [`cosign`](https://github.com/sigstore/cosign). To verify:
|
||||||
|
* Run:
|
||||||
|
```
|
||||||
|
cosign version # must be at least 2.0.0
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/buildpacks/lifecycle/.github/workflows/post-release.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
buildpacksio/lifecycle:<tag>
|
||||||
|
```
|
||||||
|
|
||||||
|
A CycloneDX SBOM is "attached" to the image and signed with [`cosign`](https://github.com/sigstore/cosign). To verify:
|
||||||
|
* Run:
|
||||||
|
```
|
||||||
|
cosign version # must be at least 2.0.0
|
||||||
|
cosign verify \
|
||||||
|
--certificate-identity-regexp "https://github.com/buildpacks/lifecycle/.github/workflows/post-release.yml" \
|
||||||
|
--certificate-oidc-issuer https://token.actions.githubusercontent.com \
|
||||||
|
-a tag=<tag> -attachment sbom \
|
||||||
|
buildpacksio/lifecycle:<tag>
|
||||||
|
cosign download sbom buildpacksio/lifecycle:<tag>
|
||||||
|
```
|
||||||
|
|
||||||
|
# Using this image
|
||||||
|
|
||||||
|
With [pack](https://github.com/buildpack/pack):
|
||||||
|
* `pack build <target> --lifecycle-image buildpacksio/lifecycle:<tag>`
|
||||||
|
|
||||||
|
With [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks-phases/0.2):
|
||||||
|
* Provide as param `LIFECYCLE_IMAGE` in taskrun
|
||||||
|
|
||||||
|
***
|
||||||
|
[Source](https://github.com/buildpacks/lifecycle/blob/main/IMAGE.md) for this page
|
305
Makefile
305
Makefile
|
@ -1,112 +1,168 @@
|
||||||
GOCMD?=go
|
ifeq ($(OS),Windows_NT)
|
||||||
GOARCH?=amd64
|
SHELL:=cmd.exe
|
||||||
GOENV=GOARCH=$(GOARCH) CGO_ENABLED=0
|
PWD?=$(subst /,\,${CURDIR})
|
||||||
|
LDFLAGS=-s -w
|
||||||
|
BLANK:=
|
||||||
|
/:=\$(BLANK)
|
||||||
|
else
|
||||||
|
/:=/
|
||||||
|
endif
|
||||||
|
|
||||||
|
PARSED_COMMIT:=$(shell git rev-parse --short HEAD)
|
||||||
|
|
||||||
|
ifeq ($(LIFECYCLE_VERSION),)
|
||||||
|
LIFECYCLE_VERSION:=$(shell go run tools/version/main.go)
|
||||||
|
LIFECYCLE_IMAGE_TAG?=$(PARSED_COMMIT)
|
||||||
|
else
|
||||||
|
LIFECYCLE_IMAGE_TAG?=$(LIFECYCLE_VERSION)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ACCEPTANCE_TIMEOUT?=2400s
|
||||||
|
GOCMD?=go
|
||||||
|
GOENV=GOARCH=$(GOARCH) CGO_ENABLED=0
|
||||||
|
LIFECYCLE_DESCRIPTOR_PATH?=lifecycle.toml
|
||||||
|
SCM_REPO?=github.com/buildpacks/lifecycle
|
||||||
|
SCM_COMMIT?=$(PARSED_COMMIT)
|
||||||
LDFLAGS=-s -w
|
LDFLAGS=-s -w
|
||||||
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.Version=$(LIFECYCLE_VERSION)'
|
|
||||||
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.SCMRepository=$(SCM_REPO)'
|
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.SCMRepository=$(SCM_REPO)'
|
||||||
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.SCMCommit=$(SCM_COMMIT)'
|
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.SCMCommit=$(SCM_COMMIT)'
|
||||||
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.PlatformAPI=$(PLATFORM_API)'
|
LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.Version=$(LIFECYCLE_VERSION)'
|
||||||
GOBUILD=go build $(GOFLAGS) -ldflags "$(LDFLAGS)"
|
GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)"
|
||||||
GOTEST=$(GOCMD) test $(GOFLAGS)
|
GOTEST=$(GOCMD) test $(GOFLAGS)
|
||||||
LIFECYCLE_VERSION?=$(shell cat VERSION)
|
BUILD_DIR?=$(PWD)$/out
|
||||||
PLATFORM_API?=0.3
|
SOURCE_COMPILATION_IMAGE?=lifecycle-img
|
||||||
BUILDPACK_API?=0.2
|
BUILD_CTR?=lifecycle-ctr
|
||||||
SCM_REPO?=github.com/buildpacks/lifecycle
|
DOCKER_CMD?=make test
|
||||||
PARSED_COMMIT:=$(shell git rev-parse --short HEAD)
|
|
||||||
SCM_COMMIT?=$(PARSED_COMMIT)
|
|
||||||
BUILD_DIR?=$(PWD)/out
|
|
||||||
COMPILATION_IMAGE?=golang:1.13-alpine
|
|
||||||
|
|
||||||
define LIFECYCLE_DESCRIPTOR
|
GOFILES := $(shell $(GOCMD) run tools$/lister$/main.go)
|
||||||
[api]
|
|
||||||
platform = "$(PLATFORM_API)"
|
|
||||||
buildpack = "$(BUILDPACK_API)"
|
|
||||||
|
|
||||||
[lifecycle]
|
|
||||||
version = "$(LIFECYCLE_VERSION)"
|
|
||||||
endef
|
|
||||||
|
|
||||||
all: test build package
|
all: test build package
|
||||||
|
|
||||||
build: build-linux build-windows
|
GOOS_ARCHS = linux/amd64 linux/arm64 linux/ppc64le linux/s390x darwin/amd64 darwin/arm64
|
||||||
|
|
||||||
build-linux-lifecycle: export GOOS:=linux
|
build: build-linux-amd64 build-linux-arm64 build-linux-ppc64le build-linux-s390x
|
||||||
build-linux-lifecycle: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
|
|
||||||
build-linux-lifecycle: GOENV:=GOARCH=$(GOARCH) CGO_ENABLED=1
|
|
||||||
build-linux-lifecycle: DOCKER_RUN=docker run --workdir=/lifecycle -v $(OUT_DIR):/out -v $(PWD):/lifecycle $(COMPILATION_IMAGE)
|
|
||||||
build-linux-lifecycle:
|
|
||||||
@echo "> Building lifecycle/lifecycle for linux..."
|
|
||||||
mkdir -p $(OUT_DIR)
|
|
||||||
$(DOCKER_RUN) sh -c 'apk add build-base && $(GOENV) $(GOBUILD) -o /out/lifecycle -a ./cmd/lifecycle'
|
|
||||||
|
|
||||||
|
build-image-linux-amd64: build-linux-amd64 package-linux-amd64
|
||||||
|
build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz
|
||||||
|
build-image-linux-amd64:
|
||||||
|
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch amd64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||||
|
|
||||||
build-linux-launcher: export GOOS:=linux
|
build-image-linux-arm64: build-linux-arm64 package-linux-arm64
|
||||||
build-linux-launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)/lifecycle
|
build-image-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.arm64.tgz
|
||||||
build-linux-launcher:
|
build-image-linux-arm64:
|
||||||
@echo "> Building lifecycle/launcher for linux..."
|
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch arm64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||||
mkdir -p $(OUT_DIR)
|
|
||||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
|
|
||||||
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
|
|
||||||
|
|
||||||
build-linux-symlinks: export GOOS:=linux
|
build-image-linux-ppc64le: build-linux-ppc64le package-linux-ppc64le
|
||||||
build-linux-symlinks: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
|
build-image-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.ppc64le.tgz
|
||||||
build-linux-symlinks:
|
build-image-linux-ppc64le:
|
||||||
@echo "> Creating phase symlinks for linux..."
|
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch ppc64le -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||||
ln -sf lifecycle $(OUT_DIR)/detector
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/analyzer
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/restorer
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/builder
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/exporter
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/rebaser
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/creator
|
|
||||||
|
|
||||||
build-linux: build-linux-lifecycle build-linux-symlinks build-linux-launcher
|
build-image-linux-s390x: build-linux-s390x package-linux-s390x
|
||||||
|
build-image-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.s390x.tgz
|
||||||
|
build-image-linux-s390x:
|
||||||
|
$(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch s390x -tag lifecycle:$(LIFECYCLE_IMAGE_TAG)
|
||||||
|
|
||||||
build-windows: export GOOS:=windows
|
define build_targets
|
||||||
build-windows: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
|
build-$(1)-$(2): build-$(1)-$(2)-lifecycle build-$(1)-$(2)-symlinks build-$(1)-$(2)-launcher
|
||||||
build-windows:
|
|
||||||
@echo "> Building for windows..."
|
|
||||||
mkdir -p $(OUT_DIR)
|
|
||||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
|
|
||||||
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
|
|
||||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle.exe -a ./cmd/lifecycle
|
|
||||||
ln -sf lifecycle.exe $(OUT_DIR)/analyzer.exe
|
|
||||||
ln -sf lifecycle.exe $(OUT_DIR)/restorer.exe
|
|
||||||
ln -sf lifecycle.exe $(OUT_DIR)/builder.exe
|
|
||||||
ln -sf lifecycle.exe $(OUT_DIR)/exporter.exe
|
|
||||||
ln -sf lifecycle.exe $(OUT_DIR)/rebaser.exe
|
|
||||||
ln -sf lifecycle.exe $(OUT_DIR)/creator.exe
|
|
||||||
|
|
||||||
build-darwin: export GOOS:=darwin
|
build-$(1)-$(2)-lifecycle: $(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle
|
||||||
build-darwin: OUT_DIR:=$(BUILD_DIR)/$(GOOS)/lifecycle
|
|
||||||
build-darwin:
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOOS:=$(1)
|
||||||
@echo "> Building for macos..."
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: export GOARCH:=$(2)
|
||||||
mkdir -p $(OUT_DIR)
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
|
||||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle: $$(GOFILES)
|
||||||
test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/lifecycle:
|
||||||
$(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle
|
@echo "> Building lifecycle/lifecycle for $$(GOOS)/$$(GOARCH)..."
|
||||||
ln -sf lifecycle $(OUT_DIR)/detector
|
mkdir -p $$(OUT_DIR)
|
||||||
ln -sf lifecycle $(OUT_DIR)/analyzer
|
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/lifecycle -a ./cmd/lifecycle
|
||||||
ln -sf lifecycle $(OUT_DIR)/restorer
|
|
||||||
ln -sf lifecycle $(OUT_DIR)/builder
|
build-$(1)-$(2)-symlinks: export GOOS:=$(1)
|
||||||
ln -sf lifecycle $(OUT_DIR)/exporter
|
build-$(1)-$(2)-symlinks: export GOARCH:=$(2)
|
||||||
ln -sf lifecycle $(OUT_DIR)/rebaser
|
build-$(1)-$(2)-symlinks: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
|
||||||
|
build-$(1)-$(2)-symlinks:
|
||||||
|
@echo "> Creating phase symlinks for $$(GOOS)/$$(GOARCH)..."
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/detector
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/analyzer
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/restorer
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/builder
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/exporter
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/rebaser
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/creator
|
||||||
|
ln -sf lifecycle $$(OUT_DIR)/extender
|
||||||
|
|
||||||
|
build-$(1)-$(2)-launcher: $$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher
|
||||||
|
|
||||||
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOOS:=$(1)
|
||||||
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: export GOARCH:=$(2)
|
||||||
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: OUT_DIR?=$$(BUILD_DIR)/$$(GOOS)-$$(GOARCH)/lifecycle
|
||||||
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher: $$(GOFILES)
|
||||||
|
$$(BUILD_DIR)/$(1)-$(2)/lifecycle/launcher:
|
||||||
|
@echo "> Building lifecycle/launcher for $$(GOOS)/$$(GOARCH)..."
|
||||||
|
mkdir -p $$(OUT_DIR)
|
||||||
|
$$(GOENV) $$(GOBUILD) -o $$(OUT_DIR)/launcher -a ./cmd/launcher
|
||||||
|
test $$$$(du -m $$(OUT_DIR)/launcher|cut -f 1) -le 3
|
||||||
|
endef
|
||||||
|
|
||||||
|
$(foreach ga,$(GOOS_ARCHS),$(eval $(call build_targets,$(word 1, $(subst /, ,$(ga))),$(word 2, $(subst /, ,$(ga))))))
|
||||||
|
|
||||||
|
generate-sbom: run-syft-linux-amd64 run-syft-linux-arm64 run-syft-linux-ppc64le run-syft-linux-s390x
|
||||||
|
|
||||||
|
run-syft-linux-amd64: install-syft
|
||||||
|
run-syft-linux-amd64: export GOOS:=linux
|
||||||
|
run-syft-linux-amd64: export GOARCH:=amd64
|
||||||
|
run-syft-linux-amd64:
|
||||||
|
@echo "> Running syft..."
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||||
|
|
||||||
|
run-syft-linux-arm64: install-syft
|
||||||
|
run-syft-linux-arm64: export GOOS:=linux
|
||||||
|
run-syft-linux-arm64: export GOARCH:=arm64
|
||||||
|
run-syft-linux-arm64:
|
||||||
|
@echo "> Running syft..."
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||||
|
|
||||||
|
run-syft-linux-ppc64le: install-syft
|
||||||
|
run-syft-linux-ppc64le: export GOOS:=linux
|
||||||
|
run-syft-linux-ppc64le: export GOARCH:=ppc64le
|
||||||
|
run-syft-linux-ppc64le:
|
||||||
|
@echo "> Running syft..."
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||||
|
|
||||||
|
run-syft-linux-s390x: install-syft
|
||||||
|
run-syft-linux-s390x: export GOOS:=linux
|
||||||
|
run-syft-linux-s390x: export GOARCH:=s390x
|
||||||
|
run-syft-linux-s390x:
|
||||||
|
@echo "> Running syft..."
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/lifecycle.sbom.cdx.json
|
||||||
|
syft $(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher -o json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.syft.json -o spdx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.spdx.json -o cyclonedx-json=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle/launcher.sbom.cdx.json
|
||||||
|
|
||||||
|
install-syft:
|
||||||
|
@echo "> Installing syft..."
|
||||||
|
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
|
||||||
|
|
||||||
|
define install-go-tool
|
||||||
|
@echo "> Installing $(1)..."
|
||||||
|
$(GOCMD) install $(1)@$(shell $(GOCMD) list -m -f '{{.Version}}' $(2))
|
||||||
|
endef
|
||||||
|
|
||||||
install-goimports:
|
install-goimports:
|
||||||
@echo "> Installing goimports..."
|
@echo "> Installing goimports..."
|
||||||
cd tools; $(GOCMD) install golang.org/x/tools/cmd/goimports
|
$(call install-go-tool,golang.org/x/tools/cmd/goimports,golang.org/x/tools)
|
||||||
|
|
||||||
install-yj:
|
install-yj:
|
||||||
@echo "> Installing yj..."
|
@echo "> Installing yj..."
|
||||||
cd tools; $(GOCMD) install github.com/sclevine/yj
|
$(call install-go-tool,github.com/sclevine/yj,github.com/sclevine/yj)
|
||||||
|
|
||||||
install-mockgen:
|
install-mockgen:
|
||||||
@echo "> Installing mockgen..."
|
@echo "> Installing mockgen..."
|
||||||
cd tools; $(GOCMD) install github.com/golang/mock/mockgen
|
$(call install-go-tool,github.com/golang/mock/mockgen,github.com/golang/mock)
|
||||||
|
|
||||||
install-golangci-lint:
|
install-golangci-lint:
|
||||||
@echo "> Installing golangci-lint..."
|
@echo "> Installing golangci-lint..."
|
||||||
cd tools; $(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint
|
$(call install-go-tool,github.com/golangci/golangci-lint/v2/cmd/golangci-lint,github.com/golangci/golangci-lint/v2)
|
||||||
|
|
||||||
lint: install-golangci-lint
|
lint: install-golangci-lint
|
||||||
@echo "> Linting code..."
|
@echo "> Linting code..."
|
||||||
|
@ -119,55 +175,70 @@ generate: install-mockgen
|
||||||
|
|
||||||
format: install-goimports
|
format: install-goimports
|
||||||
@echo "> Formating code..."
|
@echo "> Formating code..."
|
||||||
test -z $$(goimports -l -w -local github.com/buildpacks/lifecycle .)
|
$(if $(shell goimports -l -w -local github.com/buildpacks/lifecycle .), @echo Fixed formatting errors. Re-run && exit 1)
|
||||||
|
|
||||||
verify-jq:
|
tidy:
|
||||||
ifeq (, $(shell which jq))
|
@echo "> Tidying modules..."
|
||||||
$(error "No jq in $$PATH, please install jq")
|
$(GOCMD) mod tidy
|
||||||
endif
|
|
||||||
|
|
||||||
test: unit acceptance
|
test: unit acceptance
|
||||||
|
|
||||||
unit: verify-jq format lint install-yj
|
# append coverage arguments
|
||||||
|
ifeq ($(TEST_COVERAGE), 1)
|
||||||
|
unit: GOTESTFLAGS:=$(GOTESTFLAGS) -coverprofile=./out/tests/coverage-unit.txt -covermode=atomic
|
||||||
|
endif
|
||||||
|
unit: out
|
||||||
|
unit: UNIT_PACKAGES=$(shell $(GOCMD) list ./... | grep -v acceptance)
|
||||||
|
unit: format lint tidy install-yj
|
||||||
@echo "> Running unit tests..."
|
@echo "> Running unit tests..."
|
||||||
$(GOTEST) -v -count=1 ./...
|
$(GOTEST) $(GOTESTFLAGS) -v -count=1 $(UNIT_PACKAGES)
|
||||||
|
|
||||||
acceptance: format lint
|
out:
|
||||||
@echo "> Running acceptance tests..."
|
@mkdir out || (exit 0)
|
||||||
$(GOTEST) -v -count=1 -tags=acceptance ./acceptance/...
|
mkdir out$/tests || (exit 0)
|
||||||
|
|
||||||
acceptance-darwin: format lint
|
acceptance: format tidy
|
||||||
@echo "> Running acceptance tests..."
|
@echo "> Running acceptance tests..."
|
||||||
$(GOTEST) -v -count=1 -tags=acceptance ./acceptance/...
|
$(GOTEST) -v -count=1 -tags=acceptance -timeout=$(ACCEPTANCE_TIMEOUT) ./acceptance/...
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
@echo "> Cleaning workspace..."
|
@echo "> Cleaning workspace..."
|
||||||
rm -rf $(BUILD_DIR)
|
rm -rf $(BUILD_DIR)
|
||||||
|
|
||||||
package: package-linux package-windows
|
package: generate-sbom package-linux-amd64 package-linux-arm64 package-linux-ppc64le package-linux-s390x
|
||||||
|
|
||||||
package-linux: export LIFECYCLE_DESCRIPTOR:=$(LIFECYCLE_DESCRIPTOR)
|
package-linux-amd64: GOOS:=linux
|
||||||
package-linux: GOOS:=linux
|
package-linux-amd64: GOARCH:=amd64
|
||||||
package-linux: GOOS_DIR:=$(BUILD_DIR)/$(GOOS)
|
package-linux-amd64: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||||
package-linux: ARCHIVE_NAME=lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64
|
package-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz
|
||||||
package-linux:
|
package-linux-amd64: PACKAGER=./tools/packager/main.go
|
||||||
@echo "> Writing descriptor file for $(GOOS)..."
|
package-linux-amd64:
|
||||||
mkdir -p $(GOOS_DIR)
|
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||||
echo "$${LIFECYCLE_DESCRIPTOR}" > $(GOOS_DIR)/lifecycle.toml
|
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||||
|
|
||||||
@echo "> Packaging lifecycle for $(GOOS)..."
|
package-linux-arm64: GOOS:=linux
|
||||||
tar czf $(BUILD_DIR)/$(ARCHIVE_NAME).tgz -C $(GOOS_DIR) lifecycle.toml lifecycle
|
package-linux-arm64: GOARCH:=arm64
|
||||||
|
package-linux-arm64: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||||
|
package-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).arm64.tgz
|
||||||
|
package-linux-arm64: PACKAGER=./tools/packager/main.go
|
||||||
|
package-linux-arm64:
|
||||||
|
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||||
|
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||||
|
|
||||||
package-windows: export LIFECYCLE_DESCRIPTOR:=$(LIFECYCLE_DESCRIPTOR)
|
package-linux-ppc64le: GOOS:=linux
|
||||||
package-windows: GOOS:=windows
|
package-linux-ppc64le: GOARCH:=ppc64le
|
||||||
package-windows: GOOS_DIR:=$(BUILD_DIR)/$(GOOS)
|
package-linux-ppc64le: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||||
package-windows: ARCHIVE_NAME=lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64
|
package-linux-ppc64le: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).ppc64le.tgz
|
||||||
package-windows:
|
package-linux-ppc64le: PACKAGER=./tools/packager/main.go
|
||||||
@echo "> Writing descriptor file for $(GOOS)..."
|
package-linux-ppc64le:
|
||||||
mkdir -p $(GOOS_DIR)
|
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||||
echo "$${LIFECYCLE_DESCRIPTOR}" > $(GOOS_DIR)/lifecycle.toml
|
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||||
|
|
||||||
@echo "> Packaging lifecycle for $(GOOS)..."
|
package-linux-s390x: GOOS:=linux
|
||||||
tar czf $(BUILD_DIR)/$(ARCHIVE_NAME).tgz -C $(GOOS_DIR) lifecycle.toml lifecycle
|
package-linux-s390x: GOARCH:=s390x
|
||||||
|
package-linux-s390x: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle
|
||||||
.PHONY: verify-jq
|
package-linux-s390x: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).s390x.tgz
|
||||||
|
package-linux-s390x: PACKAGER=./tools/packager/main.go
|
||||||
|
package-linux-s390x:
|
||||||
|
@echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..."
|
||||||
|
$(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION)
|
||||||
|
|
112
README.md
112
README.md
|
@ -1,72 +1,74 @@
|
||||||
# Lifecycle
|
# Lifecycle
|
||||||
|
|
||||||

|
[](https://github.com/buildpacks/lifecycle/actions)
|
||||||
[](https://godoc.org/github.com/buildpacks/lifecycle)
|
[](https://godoc.org/github.com/buildpacks/lifecycle)
|
||||||
|
[](https://codecov.io/gh/buildpacks/lifecycle/tree/main)
|
||||||
|
[](https://bestpractices.coreinfrastructure.org/projects/4748)
|
||||||
|
[](https://gitpod.io/#https://github.com/buildpacks/lifecycle)
|
||||||
|
|
||||||
A reference implementation of the [Cloud Native Buildpacks specification](https://github.com/buildpacks/spec).
|
A reference implementation of the [Cloud Native Buildpacks specification](https://github.com/buildpacks/spec).
|
||||||
|
|
||||||
This lifecycle implements the following versioned APIs
|
## Supported APIs
|
||||||
* Buildpack API 0.2
|
| Lifecycle Version | Platform APIs | Buildpack APIs |
|
||||||
* Platform API 0.3
|
|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| 0.20.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13], [0.14][p/0.14] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] |
|
||||||
|
| 0.19.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12], [0.13][p/0.13] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10], [0.11][b/0.11] |
|
||||||
|
| 0.18.x | [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] |
|
||||||
|
| 0.17.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11], [0.12][p/0.12] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9], [0.10][b/0.10] |
|
||||||
|
| 0.16.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10], [0.11][p/0.11] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] |
|
||||||
|
| 0.15.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9], [0.10][p/0.10] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8], [0.9][b/0.9] |
|
||||||
|
| 0.14.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8], [0.9][p/0.9] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7], [0.8][b/0.8] |
|
||||||
|
| 0.13.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7], [0.8][p/0.8] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6], [0.7][b/0.7] |
|
||||||
|
|
||||||
## Commands
|
[b/0.2]: https://github.com/buildpacks/spec/blob/buildpack/v0.2/buildpack.md
|
||||||
|
[b/0.3]: https://github.com/buildpacks/spec/tree/buildpack/v0.3/buildpack.md
|
||||||
|
[b/0.4]: https://github.com/buildpacks/spec/tree/buildpack/v0.4/buildpack.md
|
||||||
|
[b/0.5]: https://github.com/buildpacks/spec/tree/buildpack/v0.5/buildpack.md
|
||||||
|
[b/0.6]: https://github.com/buildpacks/spec/tree/buildpack/v0.6/buildpack.md
|
||||||
|
[b/0.7]: https://github.com/buildpacks/spec/tree/buildpack/v0.7/buildpack.md
|
||||||
|
[b/0.8]: https://github.com/buildpacks/spec/tree/buildpack/v0.8/buildpack.md
|
||||||
|
[b/0.9]: https://github.com/buildpacks/spec/tree/buildpack/v0.9/buildpack.md
|
||||||
|
[b/0.10]: https://github.com/buildpacks/spec/tree/buildpack/v0.10/buildpack.md
|
||||||
|
[b/0.11]: https://github.com/buildpacks/spec/tree/buildpack/v0.11/buildpack.md
|
||||||
|
[p/0.2]: https://github.com/buildpacks/spec/blob/platform/v0.2/platform.md
|
||||||
|
[p/0.3]: https://github.com/buildpacks/spec/blob/platform/v0.3/platform.md
|
||||||
|
[p/0.4]: https://github.com/buildpacks/spec/blob/platform/v0.4/platform.md
|
||||||
|
[p/0.5]: https://github.com/buildpacks/spec/blob/platform/v0.5/platform.md
|
||||||
|
[p/0.6]: https://github.com/buildpacks/spec/blob/platform/v0.6/platform.md
|
||||||
|
[p/0.7]: https://github.com/buildpacks/spec/blob/platform/v0.7/platform.md
|
||||||
|
[p/0.8]: https://github.com/buildpacks/spec/blob/platform/v0.8/platform.md
|
||||||
|
[p/0.9]: https://github.com/buildpacks/spec/blob/platform/v0.9/platform.md
|
||||||
|
[p/0.10]: https://github.com/buildpacks/spec/blob/platform/v0.10/platform.md
|
||||||
|
[p/0.11]: https://github.com/buildpacks/spec/blob/platform/v0.11/platform.md
|
||||||
|
[p/0.12]: https://github.com/buildpacks/spec/blob/platform/v0.12/platform.md
|
||||||
|
[p/0.13]: https://github.com/buildpacks/spec/blob/platform/v0.13/platform.md
|
||||||
|
[p/0.14]: https://github.com/buildpacks/spec/blob/platform/v0.14/platform.md
|
||||||
|
|
||||||
|
\* denotes unreleased version
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
* `detector` - chooses buildpacks (via `/bin/detect`)
|
Either:
|
||||||
* `analyzer` - restores launch layer metadata from the previous build
|
* `analyzer` - Reads metadata from the previous image and ensures registry access.
|
||||||
* `restorer` - restores cache
|
* `detector` - Chooses buildpacks (via `/bin/detect`) and produces a build plan.
|
||||||
* `builder` - executes buildpacks (via `/bin/build`)
|
* `restorer` - Restores layer metadata from the previous image and from the cache, and restores cached layers.
|
||||||
* `exporter` - creates image and stores cache
|
* `builder` - Executes buildpacks (via `/bin/build`).
|
||||||
|
* `exporter` - Creates an image and caches layers.
|
||||||
|
|
||||||
|
Or:
|
||||||
|
* `creator` - Runs the five phases listed above in order.
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
* `launcher` - invokes choice of process
|
* `launcher` - Invokes a chosen process.
|
||||||
|
|
||||||
### Rebase
|
### Rebase
|
||||||
|
|
||||||
* `rebaser` - remotely patches images with new base image
|
* `rebaser` - Creates an image from a previous image with updated base layers.
|
||||||
|
|
||||||
## Development
|
## Contributing
|
||||||
To test, build, and package binaries into an archive, simply run:
|
- [CONTRIBUTING](CONTRIBUTING.md) - Information on how to contribute and grow your understanding of the lifecycle.
|
||||||
|
- [DEVELOPMENT](DEVELOPMENT.md) - Further detail to help you during the development process.
|
||||||
```bash
|
- [RELEASE](RELEASE.md) - Further details about our release process.
|
||||||
$ make all
|
|
||||||
```
|
|
||||||
This will create an archive at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz`.
|
|
||||||
|
|
||||||
`LIFECYCLE_VERSION` defaults to the value in the `VERSION` file at the root of the repo . It can be changed by prepending `LIFECYCLE_VERSION=<some version>` to the
|
|
||||||
`make` command. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ LIFECYCLE_VERSION=1.2.3 make all
|
|
||||||
```
|
|
||||||
|
|
||||||
Steps can also be run individually as shown below.
|
|
||||||
|
|
||||||
### Test
|
|
||||||
|
|
||||||
Formats, vets, and tests the code.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make test
|
|
||||||
```
|
|
||||||
|
|
||||||
### Build
|
|
||||||
|
|
||||||
Builds binaries to `out/linux/lifecycle/`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make build
|
|
||||||
```
|
|
||||||
|
|
||||||
> To clean the `out/` directory, run `make clean`.
|
|
||||||
|
|
||||||
### Package
|
|
||||||
|
|
||||||
Creates an archive at `out/lifecycle-<LIFECYCLE_VERSION>+linux.x86-64.tgz`, using the contents of the
|
|
||||||
`out/linux/lifecycle/` directory, for the given (or default) `LIFECYCLE_VERSION`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make package
|
|
||||||
```
|
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
# Release Finalization
|
||||||
|
|
||||||
|
## Types of releases
|
||||||
|
|
||||||
|
#### New minor
|
||||||
|
* For newly supported Platform or Buildpack API versions, or breaking changes (e.g., API deprecations).
|
||||||
|
|
||||||
|
#### Pre-release aka release candidate
|
||||||
|
* Ideally we should ship a pre-release (waiting a few days for folks to try it out) before we ship a new minor.
|
||||||
|
* We typically don't ship pre-releases for patches or backports.
|
||||||
|
|
||||||
|
#### New patch
|
||||||
|
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
|
||||||
|
* Review the latest commits on `main` to determine if any are unacceptable for a patch - if there are commits that should be excluded, branch off the latest tag for the current minor and cherry-pick commits over.
|
||||||
|
|
||||||
|
#### Backport
|
||||||
|
* New patch for an old minor. Typically, to help folks out who haven't yet upgraded from [unsupported APIs](https://github.com/buildpacks/rfcs/blob/main/text/0110-deprecate-apis.md).
|
||||||
|
* For go version updates, CVE fixes / dependency bumps, bug fixes, etc.
|
||||||
|
* Branch off the latest tag for the desired minor.
|
||||||
|
|
||||||
|
## Release Finalization Steps
|
||||||
|
|
||||||
|
### Step 1 - Prepare
|
||||||
|
|
||||||
|
Determine the type of release ([new minor](#new-minor), [pre-release](#pre-release-aka-release-candidate), [new patch](#new-patch), or [backport](#backport)) and prepare the branch accordingly.
|
||||||
|
|
||||||
|
**To prepare the release branch:**
|
||||||
|
1. Check open PRs for any dependabot updates that should be merged.
|
||||||
|
1. Create a release branch in the format `release/0.99.0-rc.1` (for pre-releases) or `release/0.99.0` (for final releases).
|
||||||
|
* New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:<commit sha>`.
|
||||||
|
1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550).
|
||||||
|
* For final releases (not pre-releases), remove the pre-release note (`*`) for the latest apis.
|
||||||
|
|
||||||
|
**For final releases (not pre-releases):**
|
||||||
|
1. Ensure the relevant spec APIs have been released.
|
||||||
|
1. Ensure the `lifecycle/0.99.0` milestone on the [docs repo](https://github.com/buildpacks/docs/blob/main/RELEASE.md#lump-changes) is complete, such that every new feature in the lifecycle is fully explained in the `release/lifecycle/0.99` branch on the docs repo, and [migration guides](https://github.com/buildpacks/docs/tree/main/content/docs/reference/spec/migration) (if relevant) are included.
|
||||||
|
|
||||||
|
### Step 2 - Publish the Release
|
||||||
|
|
||||||
|
1. Manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/<release version>`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch.
|
||||||
|
1. Edit the release notes as necessary.
|
||||||
|
1. Perform any manual validation of the artifacts as necessary (usually none).
|
||||||
|
1. Edit the release page and click "Publish release".
|
||||||
|
* This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:<release version>`.
|
||||||
|
* For final releases ONLY, this will also re-tag the lifecycle image from `buildpacksio/lifecycle:<commit sha>` to `buildpacksio/lifecycle:latest`.
|
||||||
|
|
||||||
|
### Step 3 - Follow-up
|
||||||
|
|
||||||
|
**For pre-releases:**
|
||||||
|
* Ask the relevant teams to try out the pre-released artifacts.
|
||||||
|
|
||||||
|
**For final releases:**
|
||||||
|
* Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`.
|
||||||
|
* Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo.
|
||||||
|
|
||||||
|
## Go version updates
|
||||||
|
|
||||||
|
Go version updates should be released as a [new minor](#new-minor) or [new patch](#new-patch) release.
|
||||||
|
|
||||||
|
### New Patch
|
||||||
|
|
||||||
|
If the go patch is in [actions/go-versions](https://github.com/actions/go-versions/pulls?q=is%3Apr+is%3Aclosed) then CI should pull it in automatically without any action needed.
|
||||||
|
We simply need to create the release branch and let the pipeline run.
|
||||||
|
|
||||||
|
### New Minor
|
||||||
|
|
||||||
|
We typically do this when the existing patch version exceeds 6 - e.g., `1.22.6`. This means we have about 6 months to upgrade before the current minor becomes unsupported due to the introduction of the new n+2 minor.
|
||||||
|
|
||||||
|
#### Steps
|
||||||
|
1. Update go.mod
|
||||||
|
1. Search for the old `major.minor`, there are a few files that need to be updated (example PR: https://github.com/buildpacks/lifecycle/pull/1405/files)
|
||||||
|
1. Update the linter to a version that supports the current `major.minor`
|
||||||
|
1. Fix any lint errors as necessary
|
|
@ -1,9 +1,7 @@
|
||||||
// +build acceptance
|
|
||||||
|
|
||||||
package acceptance
|
package acceptance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -13,52 +11,50 @@ import (
|
||||||
"github.com/sclevine/spec"
|
"github.com/sclevine/spec"
|
||||||
"github.com/sclevine/spec/report"
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
h "github.com/buildpacks/lifecycle/testhelpers"
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var buildDir string
|
const (
|
||||||
|
expectedVersion = "some-version"
|
||||||
|
expectedCommit = "asdf123"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
latestPlatformAPI = api.Platform.Latest().String()
|
||||||
|
buildDir string
|
||||||
|
)
|
||||||
|
|
||||||
func TestVersion(t *testing.T) {
|
func TestVersion(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
buildDir, err = ioutil.TempDir("", "lifecycle-acceptance")
|
buildDir, err = os.MkdirTemp("", "lifecycle-acceptance")
|
||||||
h.AssertNil(t, err)
|
h.AssertNil(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
h.AssertNil(t, os.RemoveAll(buildDir))
|
h.AssertNil(t, os.RemoveAll(buildDir))
|
||||||
}()
|
}()
|
||||||
buildBinaries(t, buildDir, runtime.GOOS)
|
|
||||||
|
outDir := filepath.Join(buildDir, fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH), "lifecycle")
|
||||||
|
h.AssertNil(t, os.MkdirAll(outDir, 0755))
|
||||||
|
|
||||||
|
h.MakeAndCopyLifecycle(t,
|
||||||
|
runtime.GOOS,
|
||||||
|
runtime.GOARCH,
|
||||||
|
outDir,
|
||||||
|
"LIFECYCLE_VERSION=some-version",
|
||||||
|
"SCM_COMMIT="+expectedCommit,
|
||||||
|
)
|
||||||
spec.Run(t, "acceptance", testVersion, spec.Parallel(), spec.Report(report.Terminal{}))
|
spec.Run(t, "acceptance", testVersion, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
description string
|
description string
|
||||||
|
focus bool
|
||||||
command string
|
command string
|
||||||
args []string
|
args []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func testVersion(t *testing.T, when spec.G, it spec.S) {
|
func testVersion(t *testing.T, when spec.G, it spec.S) {
|
||||||
when("All", func() {
|
when("All", func() {
|
||||||
when("CNB_PLATFORM_API is set and incompatible", func() {
|
|
||||||
for _, binary := range []string{
|
|
||||||
"analyzer",
|
|
||||||
"builder",
|
|
||||||
"detector",
|
|
||||||
"exporter",
|
|
||||||
"restorer",
|
|
||||||
"rebaser",
|
|
||||||
"lifecycle",
|
|
||||||
} {
|
|
||||||
binary := binary
|
|
||||||
it(binary+"/should fail with error message and exit code 11", func() {
|
|
||||||
cmd := lifecycleCmd(binary)
|
|
||||||
cmd.Env = append(os.Environ(), "CNB_PLATFORM_API=0.8")
|
|
||||||
|
|
||||||
_, exitCode, err := h.RunE(cmd)
|
|
||||||
h.AssertError(t, err, "the Lifecycle's Platform API version is 0.9 which is incompatible with Platform API version 0.8")
|
|
||||||
h.AssertEq(t, exitCode, 11)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
when("version flag is set", func() {
|
when("version flag is set", func() {
|
||||||
for _, tc := range []testCase{
|
for _, tc := range []testCase{
|
||||||
{
|
{
|
||||||
|
@ -138,14 +134,19 @@ func testVersion(t *testing.T, when spec.G, it spec.S) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
tc := tc
|
tc := tc
|
||||||
when(tc.description, func() {
|
w := when
|
||||||
|
if tc.focus {
|
||||||
|
w = when.Focus
|
||||||
|
}
|
||||||
|
w(tc.description, func() {
|
||||||
it("only prints the version", func() {
|
it("only prints the version", func() {
|
||||||
cmd := lifecycleCmd(tc.command, tc.args...)
|
cmd := lifecycleCmd(tc.command, tc.args...)
|
||||||
|
cmd.Env = []string{fmt.Sprintf("CNB_PLATFORM_API=%s", api.Platform.Latest().String())}
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
||||||
}
|
}
|
||||||
h.AssertStringContains(t, string(output), "some-version+asdf123")
|
h.AssertStringContains(t, string(output), expectedVersion+"+"+expectedCommit)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -153,27 +154,6 @@ func testVersion(t *testing.T, when spec.G, it spec.S) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func lifecycleCmd(binary string, args ...string) *exec.Cmd {
|
func lifecycleCmd(phase string, args ...string) *exec.Cmd {
|
||||||
return exec.Command(filepath.Join(buildDir, runtime.GOOS, "lifecycle", binary), args...)
|
return exec.Command(filepath.Join(buildDir, fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH), "lifecycle", phase), args...) // #nosec G204
|
||||||
}
|
|
||||||
|
|
||||||
func buildBinaries(t *testing.T, dir string, goos string) {
|
|
||||||
cmd := exec.Command("make", "build-"+runtime.GOOS)
|
|
||||||
wd, err := os.Getwd()
|
|
||||||
h.AssertNil(t, err)
|
|
||||||
cmd.Dir = filepath.Join(wd, "..")
|
|
||||||
cmd.Env = append(
|
|
||||||
os.Environ(),
|
|
||||||
"GOOS="+goos,
|
|
||||||
"PWD="+cmd.Dir,
|
|
||||||
"BUILD_DIR="+dir,
|
|
||||||
"PLATFORM_API=0.9",
|
|
||||||
"LIFECYCLE_VERSION=some-version",
|
|
||||||
"SCM_COMMIT=asdf123",
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Log("Building binaries: ", cmd.Args)
|
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,541 @@
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
"github.com/buildpacks/lifecycle/cmd"
|
||||||
|
"github.com/buildpacks/lifecycle/internal/path"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
analyzeImage string
|
||||||
|
analyzeRegAuthConfig string
|
||||||
|
analyzeRegNetwork string
|
||||||
|
analyzerPath string
|
||||||
|
analyzeDaemonFixtures *daemonImageFixtures
|
||||||
|
analyzeRegFixtures *regImageFixtures
|
||||||
|
analyzeTest *PhaseTest
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAnalyzer(t *testing.T) {
|
||||||
|
testImageDockerContext := filepath.Join("testdata", "analyzer")
|
||||||
|
analyzeTest = NewPhaseTest(t, "analyzer", testImageDockerContext)
|
||||||
|
analyzeTest.Start(t)
|
||||||
|
defer analyzeTest.Stop(t)
|
||||||
|
|
||||||
|
analyzeImage = analyzeTest.testImageRef
|
||||||
|
analyzerPath = analyzeTest.containerBinaryPath
|
||||||
|
analyzeRegAuthConfig = analyzeTest.targetRegistry.authConfig
|
||||||
|
analyzeRegNetwork = analyzeTest.targetRegistry.network
|
||||||
|
analyzeDaemonFixtures = analyzeTest.targetDaemon.fixtures
|
||||||
|
analyzeRegFixtures = analyzeTest.targetRegistry.fixtures
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
spec.Run(t, "acceptance-analyzer/"+platformAPI.String(), testAnalyzerFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAnalyzerFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
var copyDir, containerName, cacheVolume string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
if h.DockerVolumeExists(t, cacheVolume) {
|
||||||
|
h.DockerVolumeRemove(t, cacheVolume)
|
||||||
|
}
|
||||||
|
os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("CNB_PLATFORM_API not provided", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API= ",
|
||||||
|
analyzeImage,
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
"some-image",
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "please set 'CNB_PLATFORM_API'"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called without an app image", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
analyzeImage,
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to parse arguments: received 0 arguments, but expected 1"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called with skip layers", func() {
|
||||||
|
it("writes analyzed.toml and does not restore previous image SBOM", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "Platform API < 0.9 does not accept a -skip-layers flag")
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
"-daemon",
|
||||||
|
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
"-skip-layers",
|
||||||
|
analyzeDaemonFixtures.AppImage,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
assertAnalyzedMetadata(t, filepath.Join(copyDir, "layers", "analyzed.toml"))
|
||||||
|
h.AssertStringDoesNotContain(t, output, "Restoring data for SBOM from previous image")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the provided layers directory isn't writeable", func() {
|
||||||
|
it("recursively chowns the directory", func() {
|
||||||
|
analyzeFlags := []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithBash(
|
||||||
|
fmt.Sprintf("chown -R 9999:9999 /layers; chmod -R 775 /layers; %s %s %s; ls -al /layers",
|
||||||
|
analyzerPath,
|
||||||
|
flatPrint(analyzeFlags),
|
||||||
|
analyzeRegFixtures.SomeAppImage),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
h.AssertMatch(t, output, "2222 3333 .+ \\.")
|
||||||
|
h.AssertMatch(t, output, "2222 3333 .+ group.toml")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called with analyzed", func() {
|
||||||
|
it("uses the provided analyzed.toml path", func() {
|
||||||
|
analyzeFlags := []string{
|
||||||
|
"-analyzed", ctrPath("/some-dir/some-analyzed.toml"),
|
||||||
|
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, analyzeRegFixtures.SomeAppImage)
|
||||||
|
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/some-dir/some-analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
assertAnalyzedMetadata(t, filepath.Join(copyDir, "some-analyzed.toml"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called with run", func() {
|
||||||
|
it("uses the provided run.toml path", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept -run")
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
analyzeImage,
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
"-run", "/cnb/run.toml",
|
||||||
|
analyzeRegFixtures.SomeAppImage,
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to find accessible run image"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it("drops privileges", func() {
|
||||||
|
analyzeArgs := []string{
|
||||||
|
"-analyzed", "/some-dir/some-analyzed.toml",
|
||||||
|
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithBash(
|
||||||
|
fmt.Sprintf("%s %s %s; ls -al /some-dir",
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
flatPrint(analyzeArgs),
|
||||||
|
analyzeRegFixtures.SomeAppImage,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
h.AssertMatch(t, output, "2222 3333 .+ some-analyzed.toml")
|
||||||
|
})
|
||||||
|
|
||||||
|
when("run image", func() {
|
||||||
|
when("provided", func() {
|
||||||
|
it("is recorded in analyzed.toml", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(ctrPath(analyzerPath), "-run-image", analyzeRegFixtures.ReadOnlyRunImage, analyzeRegFixtures.SomeAppImage),
|
||||||
|
)
|
||||||
|
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
h.AssertStringContains(t, analyzedMD.RunImage.Reference, analyzeRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("not provided", func() {
|
||||||
|
it("falls back to CNB_RUN_IMAGE", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--env", "CNB_RUN_IMAGE="+analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(ctrPath(analyzerPath), analyzeRegFixtures.SomeAppImage),
|
||||||
|
)
|
||||||
|
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
h.AssertStringContains(t, analyzedMD.RunImage.Reference, analyzeRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("daemon case", func() {
|
||||||
|
it("writes analyzed.toml", func() {
|
||||||
|
analyzeFlags := []string{
|
||||||
|
"-daemon",
|
||||||
|
"-run-image", "some-run-image",
|
||||||
|
}
|
||||||
|
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, analyzeRegFixtures.ReadOnlyAppImage)
|
||||||
|
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
})
|
||||||
|
|
||||||
|
when("app image exists", func() {
|
||||||
|
it("does not restore app metadata to the layers directory", func() {
|
||||||
|
analyzeFlags := []string{"-daemon", "-run-image", "some-run-image"}
|
||||||
|
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, analyzeDaemonFixtures.AppImage)
|
||||||
|
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
assertNoRestoreOfAppMetadata(t, copyDir, output)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("registry case", func() {
|
||||||
|
it("writes analyzed.toml", func() {
|
||||||
|
analyzeFlags := []string{"-run-image", analyzeRegFixtures.ReadOnlyRunImage}
|
||||||
|
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, analyzeRegFixtures.SomeAppImage)
|
||||||
|
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called with previous image", func() {
|
||||||
|
when("auth registry", func() {
|
||||||
|
when("the destination image does not exist", func() {
|
||||||
|
it("writes analyzed.toml with previous image identifier", func() {
|
||||||
|
analyzeFlags := []string{
|
||||||
|
"-previous-image", analyzeRegFixtures.ReadWriteAppImage,
|
||||||
|
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, analyzeRegFixtures.ReadWriteOtherAppImage)
|
||||||
|
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
h.AssertStringContains(t, analyzedMD.PreviousImageRef(), analyzeRegFixtures.ReadWriteAppImage)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the destination image exists", func() {
|
||||||
|
it("writes analyzed.toml with previous image identifier", func() {
|
||||||
|
analyzeFlags := []string{
|
||||||
|
"-previous-image", analyzeRegFixtures.ReadWriteAppImage,
|
||||||
|
"-run-image", analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, analyzeRegFixtures.ReadWriteOtherAppImage)
|
||||||
|
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
h.AssertStringContains(t, analyzedMD.PreviousImageRef(), analyzeRegFixtures.ReadWriteAppImage)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called with tag", func() {
|
||||||
|
when("read/write access to registry", func() {
|
||||||
|
it("passes read/write validation and writes analyzed.toml", func() {
|
||||||
|
execArgs := []string{
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
"-tag", analyzeRegFixtures.ReadWriteOtherAppImage,
|
||||||
|
analyzeRegFixtures.ReadWriteAppImage,
|
||||||
|
}
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+analyzeRegAuthConfig,
|
||||||
|
"--env", "CNB_RUN_IMAGE="+analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
h.AssertStringContains(t, analyzedMD.PreviousImageRef(), analyzeRegFixtures.ReadWriteAppImage)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("no read/write access to registry", func() {
|
||||||
|
it("throws read/write error accessing destination tag", func() {
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_RUN_IMAGE="+analyzeRegFixtures.ReadOnlyRunImage,
|
||||||
|
"--name", containerName,
|
||||||
|
"--network", analyzeRegNetwork,
|
||||||
|
analyzeImage,
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
"-tag", analyzeRegFixtures.InaccessibleImage,
|
||||||
|
analyzeRegFixtures.ReadWriteAppImage,
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "ensure registry read/write access to " + analyzeRegFixtures.InaccessibleImage
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("layout case", func() {
|
||||||
|
layoutDir := filepath.Join(path.RootDir, "layout-repo")
|
||||||
|
when("experimental mode is enabled", func() {
|
||||||
|
it("writes analyzed.toml", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||||
|
|
||||||
|
analyzeFlags := []string{
|
||||||
|
"-layout",
|
||||||
|
"-layout-dir", layoutDir,
|
||||||
|
"-run-image", "busybox",
|
||||||
|
}
|
||||||
|
var execArgs []string
|
||||||
|
execArgs = append([]string{ctrPath(analyzerPath)}, analyzeFlags...)
|
||||||
|
execArgs = append(execArgs, "my-app")
|
||||||
|
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers/analyzed.toml"),
|
||||||
|
analyzeImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_EXPERIMENTAL_MODE=warn",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
),
|
||||||
|
h.WithArgs(execArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
analyzer := assertAnalyzedMetadata(t, filepath.Join(copyDir, "analyzed.toml"))
|
||||||
|
h.AssertNotNil(t, analyzer.RunImage)
|
||||||
|
analyzedImagePath := filepath.Join(path.RootDir, "layout-repo", "index.docker.io", "library", "busybox", "latest")
|
||||||
|
reference := fmt.Sprintf("%s@%s", analyzedImagePath, "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab")
|
||||||
|
h.AssertEq(t, analyzer.RunImage.Reference, reference)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("experimental mode is not enabled", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_LAYOUT_DIR="+layoutDir,
|
||||||
|
analyzeImage,
|
||||||
|
ctrPath(analyzerPath),
|
||||||
|
"-layout",
|
||||||
|
"-run-image", "busybox",
|
||||||
|
"some-image",
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertAnalyzedMetadata(t *testing.T, path string) *files.Analyzed {
|
||||||
|
contents, err := os.ReadFile(path)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(contents) > 0, true)
|
||||||
|
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(path, cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
return &analyzedMD
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertNoRestoreOfAppMetadata(t *testing.T, dir, output string) {
|
||||||
|
layerFilenames := []string{
|
||||||
|
"launch-build-cache-layer.sha",
|
||||||
|
"launch-build-cache-layer.toml",
|
||||||
|
"launch-cache-layer.sha",
|
||||||
|
"launch-cache-layer.toml",
|
||||||
|
"launch-layer.sha",
|
||||||
|
"launch-layer.toml",
|
||||||
|
"store.toml",
|
||||||
|
}
|
||||||
|
for _, filename := range layerFilenames {
|
||||||
|
h.AssertPathDoesNotExist(t, filepath.Join(dir, "layers", "some-buildpack-id", filename))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func flatPrint(arr []string) string {
|
||||||
|
return strings.Join(arr, " ")
|
||||||
|
}
|
|
@ -0,0 +1,535 @@
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
builderDockerContext = filepath.Join("testdata", "builder")
|
||||||
|
builderBinaryDir = filepath.Join("testdata", "builder", "container", "cnb", "lifecycle")
|
||||||
|
builderImage = "lifecycle/acceptance/builder"
|
||||||
|
builderDaemonOS, builderDaemonArch string
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBuilder(t *testing.T) {
|
||||||
|
info, err := h.DockerCli(t).Info(context.TODO())
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
// These variables are clones of the variables in analyzer_test.go.
|
||||||
|
// You can find the same variables there without `builder` prefix.
|
||||||
|
// These lines are added for supporting windows tests.
|
||||||
|
builderDaemonOS = info.OSType
|
||||||
|
builderDaemonArch = info.Architecture
|
||||||
|
if builderDaemonArch == "x86_64" {
|
||||||
|
builderDaemonArch = "amd64"
|
||||||
|
} else if builderDaemonArch == "aarch64" {
|
||||||
|
builderDaemonArch = "arm64"
|
||||||
|
}
|
||||||
|
|
||||||
|
h.MakeAndCopyLifecycle(t, builderDaemonOS, builderDaemonArch, builderBinaryDir)
|
||||||
|
h.DockerBuild(t,
|
||||||
|
builderImage,
|
||||||
|
builderDockerContext,
|
||||||
|
h.WithArgs("--build-arg", fmt.Sprintf("cnb_platform_api=%s", api.Platform.Latest())),
|
||||||
|
h.WithFlags(
|
||||||
|
"-f", filepath.Join(builderDockerContext, dockerfileName),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
defer h.DockerImageRemove(t, builderImage)
|
||||||
|
|
||||||
|
spec.Run(t, "acceptance-builder", testBuilder, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBuilder(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
var copyDir, containerName, cacheVolume string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
if h.DockerVolumeExists(t, cacheVolume) {
|
||||||
|
h.DockerVolumeRemove(t, cacheVolume)
|
||||||
|
}
|
||||||
|
os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
// .../cmd/lifecycle/builder.go#Args
|
||||||
|
when("called with arguments", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
builderImage,
|
||||||
|
"some-arg",
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to parse arguments: received unexpected arguments"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// .../cmd/lifecycle/builder.go#Privileges
|
||||||
|
when("running as a root", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--user",
|
||||||
|
"root",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to build: refusing to run as root"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("correct and full group.toml and plan.toml", func() {
|
||||||
|
it("succeeds", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// check builder metadata.toml for success test
|
||||||
|
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("writing metadata.toml", func() {
|
||||||
|
it("writes and reads successfully", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// check builder metadata.toml for success test
|
||||||
|
contents, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||||
|
|
||||||
|
// prevent regression of inline table serialization
|
||||||
|
h.AssertStringDoesNotContain(t, contents, "processes =")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||||
|
h.AssertEq(t, len(md.Processes), 1)
|
||||||
|
h.AssertEq(t, md.Processes[0].Type, "hello")
|
||||||
|
h.AssertEq(t, len(md.Processes[0].Command.Entries), 1)
|
||||||
|
h.AssertEq(t, md.Processes[0].Command.Entries[0], "echo world")
|
||||||
|
h.AssertEq(t, len(md.Processes[0].Args), 1)
|
||||||
|
h.AssertEq(t, md.Processes[0].Args[0], "arg1")
|
||||||
|
h.AssertEq(t, md.Processes[0].Direct, true)
|
||||||
|
h.AssertEq(t, md.Processes[0].WorkingDirectory, "")
|
||||||
|
h.AssertEq(t, md.Processes[0].Default, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the platform < 0.10", func() {
|
||||||
|
it("writes and reads successfully", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API=0.9",
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// check builder metadata.toml for success test
|
||||||
|
contents, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||||
|
|
||||||
|
// prevent regression of inline table serialization
|
||||||
|
h.AssertStringDoesNotContain(t, contents, "processes =")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||||
|
h.AssertEq(t, len(md.Processes), 1)
|
||||||
|
h.AssertEq(t, md.Processes[0].Type, "hello")
|
||||||
|
h.AssertEq(t, len(md.Processes[0].Command.Entries), 1)
|
||||||
|
h.AssertEq(t, md.Processes[0].Command.Entries[0], "echo world")
|
||||||
|
h.AssertEq(t, len(md.Processes[0].Args), 1)
|
||||||
|
h.AssertEq(t, md.Processes[0].Args[0], "arg1")
|
||||||
|
h.AssertEq(t, md.Processes[0].Direct, true)
|
||||||
|
h.AssertEq(t, md.Processes[0].WorkingDirectory, "")
|
||||||
|
h.AssertEq(t, md.Processes[0].Default, false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("-group contains extensions", func() {
|
||||||
|
it("includes the provided extensions in <layers>/config/metadata.toml", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/group_with_ext.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// check builder metadata.toml for success test
|
||||||
|
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||||
|
h.AssertStringContains(t, md.Extensions[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Extensions[0].ID, "hello_world")
|
||||||
|
h.AssertStringContains(t, md.Extensions[0].Version, "0.0.1")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("invalid input files", func() {
|
||||||
|
// .../cmd/lifecycle/builder.go#readData
|
||||||
|
when("group.toml", func() {
|
||||||
|
when("not found", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to read group file: open /layers/group.toml: no such file or directory"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("empty", func() {
|
||||||
|
it("succeeds", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/empty_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// check builder metadata.toml for success test
|
||||||
|
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||||
|
h.AssertEq(t, len(md.Processes), 0)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("invalid", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/wrong_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to read group file: toml: line 1: expected '.' or '=', but got 'a' instead"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// .../cmd/lifecycle/builder.go#Exec
|
||||||
|
when("invalid builpack api", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/invalid_buildpack_api_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "parse buildpack API '<nil>' for buildpack 'hello_world@0.0.1'"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// .../cmd/lifecycle/builder.go#readData
|
||||||
|
when("plan.toml", func() {
|
||||||
|
when("not found", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to read plan file: open /layers/plan.toml: no such file or directory"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("empty", func() {
|
||||||
|
it("succeeds", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/empty_plan.toml",
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
// check builder metadata.toml for success test
|
||||||
|
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers", "config", "metadata.toml"))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.1")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("invalid", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/wrong_plan.toml",
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to read plan file: toml: line 1: expected '.' or '=', but got 'a' instead"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("determining the location of input files", func() {
|
||||||
|
// .../cmd/lifecycle/builder.go#Args
|
||||||
|
when("group.toml path is not specified", func() {
|
||||||
|
it("will look for group.toml in the provided layers directory", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_LAYERS_DIR=/layers/different_layer_dir_from_env",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan_buildpack_2.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers/different_layer_dir_from_env/config/metadata.toml"))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world_2")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.2")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// .../cmd/lifecycle/builder.go#Args
|
||||||
|
when("plan.toml path is not specified", func() {
|
||||||
|
it("will look for plan.toml in the provided layers directory", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
builderImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_LAYERS_DIR=/layers/different_layer_dir_from_env",
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group_buildpack2.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
_, md := getBuilderMetadata(t, filepath.Join(copyDir, "layers/different_layer_dir_from_env/config/metadata.toml"))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].API, "0.10")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].ID, "hello_world_2")
|
||||||
|
h.AssertStringContains(t, md.Buildpacks[0].Version, "0.0.2")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("CNB_APP_DIR is set", func() {
|
||||||
|
it("sets the buildpacks' working directory to CNB_APP_DIR", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
"--env", "CNB_APP_DIR=/env_folders/different_cnb_app_dir_from_env",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
expected := "CNB_APP_DIR: /env_folders/different_cnb_app_dir_from_env"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("CNB_BUILDPACKS_DIR is set", func() {
|
||||||
|
it("uses buildpacks from CNB_BUILDPACKS_DIR", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
"--env", "CNB_BUILDPACKS_DIR=/env_folders/different_buildpack_dir_from_env",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
expected := "CNB_BUILDPACK_DIR: /env_folders/different_buildpack_dir_from_env"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("CNB_LAYERS_DIR is set", func() {
|
||||||
|
it("CNB_LAYERS_DIR is a parent of the buildpack layers dir", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
"--env", "CNB_LAYERS_DIR=/layers/different_layer_dir_from_env",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
expected := "LAYERS_DIR: /layers/different_layer_dir_from_env/hello_world"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("CNB_PLAN_PATH is set", func() {
|
||||||
|
it("provides the buildpack a filtered version of the plan found at CNB_PLAN_PATH", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/different_plan_from_env.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
expected := "name = \"different_plan_from_env.toml_reqires_subset_content\""
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("CNB_PLATFORM_DIR is set", func() {
|
||||||
|
it("CNB_PLATFORM_DIR is successfully transmitted to build script", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_GROUP_PATH=/cnb/group_tomls/always_detect_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan.toml",
|
||||||
|
"--env", "CNB_PLATFORM_DIR=/env_folders/different_platform_dir_from_env",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
expected := "PLATFORM_DIR: /env_folders/different_platform_dir_from_env"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("It runs", func() {
|
||||||
|
it("sets CNB_TARGET_* vars", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
"--env", "CNB_LAYERS_DIR=/layers/03_layer",
|
||||||
|
"--env", "CNB_PLAN_PATH=/cnb/plan_tomls/always_detect_plan_buildpack_3.toml",
|
||||||
|
builderImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
fmt.Println(string(output))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertStringContains(t, string(output), "CNB_TARGET_ARCH: amd64")
|
||||||
|
h.AssertStringContains(t, string(output), "CNB_TARGET_ARCH_VARIANT: some-variant")
|
||||||
|
h.AssertStringContains(t, string(output), "CNB_TARGET_OS: linux")
|
||||||
|
h.AssertStringContains(t, string(output), "CNB_TARGET_DISTRO_NAME: ubuntu")
|
||||||
|
h.AssertStringContains(t, string(output), "CNB_TARGET_DISTRO_VERSION: some-cute-version")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBuilderMetadata(t *testing.T, path string) (string, *files.BuildMetadata) {
|
||||||
|
t.Helper()
|
||||||
|
contents, _ := os.ReadFile(path)
|
||||||
|
h.AssertEq(t, len(contents) > 0, true)
|
||||||
|
|
||||||
|
buildMD, err := files.Handler.ReadBuildMetadata(path, api.MustParse(latestPlatformAPI))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
return string(contents), buildMD
|
||||||
|
}
|
|
@ -0,0 +1,445 @@
|
||||||
|
//go:build acceptance
|
||||||
|
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/internal/path"
|
||||||
|
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
createImage string
|
||||||
|
createRegAuthConfig string
|
||||||
|
createRegNetwork string
|
||||||
|
creatorPath string
|
||||||
|
createDaemonFixtures *daemonImageFixtures
|
||||||
|
createRegFixtures *regImageFixtures
|
||||||
|
createTest *PhaseTest
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCreator(t *testing.T) {
|
||||||
|
testImageDockerContext := filepath.Join("testdata", "creator")
|
||||||
|
createTest = NewPhaseTest(t, "creator", testImageDockerContext)
|
||||||
|
createTest.Start(t)
|
||||||
|
defer createTest.Stop(t)
|
||||||
|
|
||||||
|
createImage = createTest.testImageRef
|
||||||
|
creatorPath = createTest.containerBinaryPath
|
||||||
|
createRegAuthConfig = createTest.targetRegistry.authConfig
|
||||||
|
createRegNetwork = createTest.targetRegistry.network
|
||||||
|
createDaemonFixtures = createTest.targetDaemon.fixtures
|
||||||
|
createRegFixtures = createTest.targetRegistry.fixtures
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
spec.Run(t, "acceptance-creator/"+platformAPI.String(), testCreatorFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCreatorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
var createdImageName string
|
||||||
|
|
||||||
|
when("called with run", func() {
|
||||||
|
it("uses the provided run.toml path", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept -run")
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
createImage,
|
||||||
|
ctrPath(creatorPath),
|
||||||
|
"-run", "/cnb/run.toml",
|
||||||
|
createRegFixtures.SomeAppImage,
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to resolve inputs: failed to find accessible run image"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("detected order contains extensions", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
createImage,
|
||||||
|
ctrPath(creatorPath),
|
||||||
|
"-log-level", "debug",
|
||||||
|
"-order", "/cnb/order-with-extensions.toml",
|
||||||
|
"-run-image", createRegFixtures.ReadOnlyRunImage,
|
||||||
|
createRegFixtures.SomeAppImage,
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "detected order contains extensions which is not supported by the creator"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("daemon case", func() {
|
||||||
|
it.After(func() {
|
||||||
|
h.DockerImageRemove(t, createdImageName)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("creates app", func() {
|
||||||
|
createFlags := []string{"-daemon"}
|
||||||
|
createFlags = append(createFlags, []string{"-run-image", createRegFixtures.ReadOnlyRunImage}...)
|
||||||
|
|
||||||
|
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
|
||||||
|
createdImageName = "some-created-image-" + h.RandString(10)
|
||||||
|
createArgs = append(createArgs, createdImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
createImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(createArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+createdImageName)
|
||||||
|
|
||||||
|
assertImageOSAndArch(t, createdImageName, createTest)
|
||||||
|
|
||||||
|
output = h.DockerRun(t,
|
||||||
|
createdImageName,
|
||||||
|
h.WithFlags(
|
||||||
|
"--entrypoint", "/cnb/lifecycle/launcher",
|
||||||
|
),
|
||||||
|
h.WithArgs("env"),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "SOME_VAR=some-val") // set by buildpack
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("registry case", func() {
|
||||||
|
it.After(func() {
|
||||||
|
h.DockerImageRemove(t, createdImageName)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("creates app", func() {
|
||||||
|
var createFlags []string
|
||||||
|
createFlags = append(createFlags, []string{"-run-image", createRegFixtures.ReadOnlyRunImage}...)
|
||||||
|
|
||||||
|
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
|
||||||
|
createdImageName = createTest.RegRepoName("some-created-image-" + h.RandString(10))
|
||||||
|
createArgs = append(createArgs, createdImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
createImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(createArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+createdImageName)
|
||||||
|
|
||||||
|
h.Run(t, exec.Command("docker", "pull", createdImageName))
|
||||||
|
assertImageOSAndArch(t, createdImageName, createTest)
|
||||||
|
|
||||||
|
output = h.DockerRun(t,
|
||||||
|
createdImageName,
|
||||||
|
h.WithFlags(
|
||||||
|
"--entrypoint", "/cnb/lifecycle/launcher",
|
||||||
|
),
|
||||||
|
h.WithArgs("env"),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "SOME_VAR=some-val") // set by buildpack
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("multiple builds", func() {
|
||||||
|
var (
|
||||||
|
container1 string
|
||||||
|
container2 string
|
||||||
|
container3 string
|
||||||
|
container4 string
|
||||||
|
dirBuild1 string
|
||||||
|
dirBuild2 string
|
||||||
|
dirCache string
|
||||||
|
dirLaunchCache string
|
||||||
|
dirRun1 string
|
||||||
|
dirRun2 string
|
||||||
|
imageName string
|
||||||
|
)
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
// assign container names
|
||||||
|
for _, cPtr := range []*string{&container1, &container2, &container3, &container4} {
|
||||||
|
*cPtr = "test-container-" + h.RandString(10)
|
||||||
|
}
|
||||||
|
// create temp dirs
|
||||||
|
for _, dirPtr := range []*string{&dirCache, &dirLaunchCache, &dirBuild1, &dirRun1, &dirBuild2, &dirRun2} {
|
||||||
|
dir, err := os.MkdirTemp("", "creator-acceptance")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertNil(t, os.Chmod(dir, 0777)) // Override umask
|
||||||
|
|
||||||
|
// Resolve temp dir so it can be properly mounted by the Docker daemon.
|
||||||
|
*dirPtr, err = filepath.EvalSymlinks(dir)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
}
|
||||||
|
// assign image name
|
||||||
|
imageName = "some-created-image-" + h.RandString(10)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
// remove containers if needed
|
||||||
|
for _, container := range []string{container1, container2, container3, container4} {
|
||||||
|
if h.DockerContainerExists(t, container) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", container))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// remove temp dirs
|
||||||
|
for _, dir := range []string{dirCache, dirLaunchCache, dirBuild1, dirRun1, dirBuild2, dirRun2} {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
// remove image
|
||||||
|
h.DockerImageRemove(t, imageName)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("multiple builds", func() {
|
||||||
|
var (
|
||||||
|
createFlags []string
|
||||||
|
createArgs []string
|
||||||
|
duration1, duration2 time.Duration
|
||||||
|
)
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.8"), "Platform API < 0.8 does not support standardized SBOM")
|
||||||
|
|
||||||
|
createFlags = []string{"-daemon"}
|
||||||
|
createFlags = append(createFlags, []string{
|
||||||
|
"-run-image", createRegFixtures.ReadOnlyRunImage,
|
||||||
|
"-cache-dir", ctrPath("/cache"),
|
||||||
|
"-launch-cache", ctrPath("/launch-cache"),
|
||||||
|
"-log-level", "debug",
|
||||||
|
}...)
|
||||||
|
createArgs = append([]string{ctrPath(creatorPath)}, createFlags...)
|
||||||
|
createArgs = append(createArgs, imageName)
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
// first build
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
container1,
|
||||||
|
dirBuild1,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
createImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
"--volume", dirCache+":"+ctrPath("/cache"),
|
||||||
|
"--volume", dirLaunchCache+":"+ctrPath("/launch-cache"),
|
||||||
|
)...),
|
||||||
|
h.WithArgs(createArgs...),
|
||||||
|
)
|
||||||
|
duration1 = time.Now().Sub(startTime)
|
||||||
|
t.Logf("First build duration: %s", duration1)
|
||||||
|
h.AssertStringDoesNotContain(t, output, "restored with content")
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirBuild1, "layers", "sbom", "build", "samples_hello-world", "sbom.cdx.json"))
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirBuild1, "layers", "sbom", "build", "samples_hello-world", "some-build-layer", "sbom.cdx.json"))
|
||||||
|
|
||||||
|
// first run
|
||||||
|
output = h.DockerRunAndCopy(t,
|
||||||
|
container2,
|
||||||
|
dirRun1,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
imageName,
|
||||||
|
h.WithFlags(
|
||||||
|
"--entrypoint", "/cnb/lifecycle/launcher",
|
||||||
|
),
|
||||||
|
h.WithArgs("env"),
|
||||||
|
)
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "sbom.cdx.json"))
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-launch-cache-layer", "sbom.cdx.json"))
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-layer", "sbom.cdx.json"))
|
||||||
|
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "build"))
|
||||||
|
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "cache"))
|
||||||
|
})
|
||||||
|
|
||||||
|
when("rebuild with cache", func() {
|
||||||
|
it("exports SBOM in the app image", func() {
|
||||||
|
startTime := time.Now()
|
||||||
|
// second build
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
container3,
|
||||||
|
dirBuild2,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
createImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
"--volume", dirCache+":/cache",
|
||||||
|
"--volume", dirLaunchCache+":"+ctrPath("/launch-cache"),
|
||||||
|
)...),
|
||||||
|
h.WithArgs(createArgs...),
|
||||||
|
)
|
||||||
|
// check that launch cache was used
|
||||||
|
duration2 = time.Now().Sub(startTime)
|
||||||
|
t.Logf("Second build duration: %s", duration2)
|
||||||
|
if duration2+time.Duration(0.1*float64(time.Second)) >= duration1 {
|
||||||
|
t.Logf("Second build output: %s", output)
|
||||||
|
t.Fatalf("Expected second build to complete 0.1s faster than first build; first build took %s, second build took %s", duration1, duration2)
|
||||||
|
}
|
||||||
|
h.AssertStringContains(t, output, "some-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-bom-content\"}")
|
||||||
|
h.AssertStringContains(t, output, "some-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-cache-true-bom-content\"}")
|
||||||
|
h.AssertStringContains(t, output, "some-launch-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-cache-true-bom-content\"}")
|
||||||
|
h.AssertStringContains(t, output, "Reusing layer 'buildpacksio/lifecycle:launch.sbom'")
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirBuild2, "layers", "sbom", "build", "samples_hello-world", "sbom.cdx.json"))
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirBuild2, "layers", "sbom", "build", "samples_hello-world", "some-build-layer", "sbom.cdx.json"))
|
||||||
|
t.Log("restores store.toml")
|
||||||
|
h.AssertStringContains(t, output, "store.toml restored with content: [metadata]")
|
||||||
|
|
||||||
|
// second run
|
||||||
|
output = h.DockerRunAndCopy(t,
|
||||||
|
container4,
|
||||||
|
dirRun2,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
imageName,
|
||||||
|
h.WithFlags(
|
||||||
|
"--entrypoint", "/cnb/lifecycle/launcher",
|
||||||
|
),
|
||||||
|
h.WithArgs("env"),
|
||||||
|
)
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "sbom.cdx.json"))
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-launch-cache-layer", "sbom.cdx.json"))
|
||||||
|
h.AssertPathExists(t, filepath.Join(dirRun1, "layers", "sbom", "launch", "samples_hello-world", "some-layer", "sbom.cdx.json"))
|
||||||
|
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "build"))
|
||||||
|
h.AssertPathDoesNotExist(t, filepath.Join(dirRun1, "layers", "sbom", "cache"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("rebuild with clear cache", func() {
|
||||||
|
it("exports SBOM in the app image", func() {
|
||||||
|
createArgs = append([]string{ctrPath(creatorPath)}, append(createFlags, "-skip-restore")...)
|
||||||
|
createArgs = append(createArgs, imageName)
|
||||||
|
|
||||||
|
// second build
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
container3,
|
||||||
|
dirBuild2,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
createImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+createRegAuthConfig,
|
||||||
|
"--network", createRegNetwork,
|
||||||
|
"--volume", dirCache+":/cache",
|
||||||
|
"--volume", dirLaunchCache+":"+ctrPath("/launch-cache"),
|
||||||
|
)...),
|
||||||
|
h.WithArgs(createArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringDoesNotContain(t, output, "some-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-bom-content\"}")
|
||||||
|
h.AssertStringDoesNotContain(t, output, "some-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-cache-true-bom-content\"}")
|
||||||
|
h.AssertStringDoesNotContain(t, output, "some-launch-cache-layer.sbom.cdx.json restored with content: {\"key\": \"some-launch-true-cache-true-bom-content\"}")
|
||||||
|
// check that store.toml was restored
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.10") {
|
||||||
|
h.AssertStringContains(t, output, "store.toml restored with content: [metadata]")
|
||||||
|
} else {
|
||||||
|
h.AssertStringDoesNotContain(t, output, "store.toml restored with content")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("layout case", func() {
|
||||||
|
var (
|
||||||
|
containerName string
|
||||||
|
err error
|
||||||
|
layoutDir string
|
||||||
|
tmpDir string
|
||||||
|
)
|
||||||
|
when("experimental mode is enabled", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
// creates the directory to save all the OCI images on disk
|
||||||
|
tmpDir, err = os.MkdirTemp("", "layout")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
layoutDir = filepath.Join(path.RootDir, "layout-repo")
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
h.DockerImageRemove(t, createdImageName)
|
||||||
|
|
||||||
|
// removes all images created
|
||||||
|
os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
it("creates app", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||||
|
var createFlags []string
|
||||||
|
createFlags = append(createFlags, []string{"-layout", "-layout-dir", layoutDir, "-run-image", "busybox"}...)
|
||||||
|
|
||||||
|
createArgs := append([]string{ctrPath(creatorPath)}, createFlags...)
|
||||||
|
createdImageName = "some-created-image-" + h.RandString(10)
|
||||||
|
createArgs = append(createArgs, createdImageName)
|
||||||
|
|
||||||
|
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, createImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_EXPERIMENTAL_MODE=warn",
|
||||||
|
),
|
||||||
|
h.WithArgs(createArgs...))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, output, "Saving /layout-repo/index.docker.io/library/"+createdImageName+"/latest")
|
||||||
|
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", createdImageName+"/latest"))
|
||||||
|
h.AssertEq(t, len(index.Manifests), 1)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("experimental mode is not enabled", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||||
|
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
createImage,
|
||||||
|
ctrPath(creatorPath),
|
||||||
|
"-layout",
|
||||||
|
"-layout-dir", layoutDir,
|
||||||
|
"-run-image", "busybox",
|
||||||
|
"some-image",
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,422 @@
|
||||||
|
//go:build acceptance
|
||||||
|
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
"github.com/buildpacks/lifecycle/cmd"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
detectDockerContext = filepath.Join("testdata", "detector")
|
||||||
|
detectorBinaryDir = filepath.Join("testdata", "detector", "container", "cnb", "lifecycle")
|
||||||
|
detectImage = "lifecycle/acceptance/detector"
|
||||||
|
userID = "1234"
|
||||||
|
detectorDaemonOS, detectorDaemonArch string
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDetector(t *testing.T) {
|
||||||
|
info, err := h.DockerCli(t).Info(context.TODO())
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
detectorDaemonOS = info.OSType
|
||||||
|
detectorDaemonArch = info.Architecture
|
||||||
|
if detectorDaemonArch == "x86_64" {
|
||||||
|
detectorDaemonArch = "amd64"
|
||||||
|
}
|
||||||
|
if detectorDaemonArch == "aarch64" {
|
||||||
|
detectorDaemonArch = "arm64"
|
||||||
|
}
|
||||||
|
|
||||||
|
h.MakeAndCopyLifecycle(t, detectorDaemonOS, detectorDaemonArch, detectorBinaryDir)
|
||||||
|
h.DockerBuild(t,
|
||||||
|
detectImage,
|
||||||
|
detectDockerContext,
|
||||||
|
h.WithArgs("--build-arg", fmt.Sprintf("cnb_platform_api=%s", api.Platform.Latest())),
|
||||||
|
)
|
||||||
|
defer h.DockerImageRemove(t, detectImage)
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
if platformAPI.LessThan("0.12") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.Run(t, "acceptance-detector/"+platformAPI.String(), testDetectorFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDetectorFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
when("called with arguments", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
detectImage,
|
||||||
|
"some-arg",
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to parse arguments: received unexpected arguments"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("running as a root", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--user",
|
||||||
|
"root",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
detectImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to detect: refusing to run as root"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("read buildpack order file failed", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
// no order.toml file in the default search locations
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
detectImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to initialize detector: reading order"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("no buildpack group passed detection", func() {
|
||||||
|
it("errors and exits with the expected code", func() {
|
||||||
|
command := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"run",
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_ORDER_PATH=/cnb/orders/fail_detect_order.toml",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
detectImage,
|
||||||
|
)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
failErr, ok := err.(*exec.ExitError)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected an error of type exec.ExitError")
|
||||||
|
}
|
||||||
|
h.AssertEq(t, failErr.ExitCode(), 20) // platform code for failed detect
|
||||||
|
|
||||||
|
expected1 := `======== Output: fail_detect_buildpack@some_version ========
|
||||||
|
Opted out of detection
|
||||||
|
======== Results ========
|
||||||
|
fail: fail_detect_buildpack@some_version`
|
||||||
|
h.AssertStringContains(t, string(output), expected1)
|
||||||
|
expected2 := "No buildpack groups passed detection."
|
||||||
|
h.AssertStringContains(t, string(output), expected2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("there is a buildpack group that passes detection", func() {
|
||||||
|
var copyDir, containerName string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("writes group.toml and plan.toml at the default locations", func() {
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
detectImage,
|
||||||
|
h.WithFlags("--user", userID,
|
||||||
|
"--env", "CNB_ORDER_PATH=/cnb/orders/simple_order.toml",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
),
|
||||||
|
h.WithArgs(),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check group.toml
|
||||||
|
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||||
|
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, group.Group[0].ID, "simple_buildpack")
|
||||||
|
h.AssertEq(t, group.Group[0].Version, "simple_buildpack_version")
|
||||||
|
|
||||||
|
// check plan.toml
|
||||||
|
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
|
||||||
|
buildPlan, err := files.Handler.ReadPlan(foundPlanTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, buildPlan.Entries[0].Providers[0].ID, "simple_buildpack")
|
||||||
|
h.AssertEq(t, buildPlan.Entries[0].Providers[0].Version, "simple_buildpack_version")
|
||||||
|
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Name, "some_requirement")
|
||||||
|
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["some_metadata_key"], "some_metadata_val")
|
||||||
|
h.AssertEq(t, buildPlan.Entries[0].Requires[0].Metadata["version"], "some_version")
|
||||||
|
|
||||||
|
// check output
|
||||||
|
h.AssertStringContains(t, output, "simple_buildpack simple_buildpack_version")
|
||||||
|
h.AssertStringDoesNotContain(t, output, "======== Results ========") // log output is info level as detect passed
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("environment variables are provided for buildpack and app directories and for the output files", func() {
|
||||||
|
var copyDir, containerName string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("writes group.toml and plan.toml in the right locations and with the right names", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
detectImage,
|
||||||
|
h.WithFlags("--user", userID,
|
||||||
|
"--env", "CNB_ORDER_PATH=/cnb/orders/always_detect_order.toml",
|
||||||
|
"--env", "CNB_BUILDPACKS_DIR=/cnb/custom_buildpacks",
|
||||||
|
"--env", "CNB_APP_DIR=/custom_workspace",
|
||||||
|
"--env", "CNB_GROUP_PATH=./custom_group.toml",
|
||||||
|
"--env", "CNB_PLAN_PATH=./custom_plan.toml",
|
||||||
|
"--env", "CNB_PLATFORM_DIR=/custom_platform",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
),
|
||||||
|
h.WithArgs("-log-level=debug"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check group.toml
|
||||||
|
foundGroupTOML := filepath.Join(copyDir, "layers", "custom_group.toml")
|
||||||
|
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, group.Group[0].ID, "always_detect_buildpack")
|
||||||
|
h.AssertEq(t, group.Group[0].Version, "always_detect_buildpack_version")
|
||||||
|
|
||||||
|
// check plan.toml - should be empty since we're using always_detect_order.toml so there is no "actual plan"
|
||||||
|
tempPlanToml := filepath.Join(copyDir, "layers", "custom_plan.toml")
|
||||||
|
planContents, err := os.ReadFile(tempPlanToml)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(planContents) == 0, true)
|
||||||
|
|
||||||
|
// check platform directory
|
||||||
|
logs := h.Run(t, exec.Command("docker", "logs", containerName))
|
||||||
|
expectedPlatformPath := "platform_path: /custom_platform"
|
||||||
|
expectedAppDir := "app_dir: /custom_workspace"
|
||||||
|
h.AssertStringContains(t, logs, expectedPlatformPath)
|
||||||
|
h.AssertStringContains(t, logs, expectedAppDir)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("-order is provided", func() {
|
||||||
|
var copyDir, containerName, expectedOrderTOMLPath string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
simpleOrderTOML := filepath.Join("testdata", "detector", "container", "cnb", "orders", "simple_order.toml")
|
||||||
|
expectedOrderTOMLPath, err = filepath.Abs(simpleOrderTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the order.toml exists", func() {
|
||||||
|
it("processes the provided order.toml", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
detectImage,
|
||||||
|
h.WithFlags("--user", userID,
|
||||||
|
"--volume", expectedOrderTOMLPath+":/custom/order.toml",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
),
|
||||||
|
h.WithArgs(
|
||||||
|
"-log-level=debug",
|
||||||
|
"-order=/custom/order.toml",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check group.toml
|
||||||
|
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||||
|
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, group.Group[0].ID, "simple_buildpack")
|
||||||
|
h.AssertEq(t, group.Group[0].Version, "simple_buildpack_version")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the order.toml does not exist", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command("docker", "run",
|
||||||
|
"--user", userID,
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
detectImage,
|
||||||
|
"-order=/custom/order.toml")
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to initialize detector: reading order: failed to read order file: open /custom/order.toml: no such file or directory"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("the order.toml contains a buildpack using an unsupported api", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command("docker", "run",
|
||||||
|
"--user", userID,
|
||||||
|
"--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
detectImage,
|
||||||
|
"-order=/cnb/orders/bad_api.toml")
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
failErr, ok := err.(*exec.ExitError)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected an error of type exec.ExitError")
|
||||||
|
}
|
||||||
|
h.AssertEq(t, failErr.ExitCode(), 12) // platform code for buildpack api error
|
||||||
|
expected := "buildpack API version '0.1' is incompatible with the lifecycle"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("-order contains extensions", func() {
|
||||||
|
var containerName, copyDir, orderPath string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
orderPath, err = filepath.Abs(filepath.Join("testdata", "detector", "container", "cnb", "orders", "order_with_ext.toml"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("processes the provided order.toml", func() {
|
||||||
|
experimentalMode := "warn"
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||||
|
experimentalMode = "error"
|
||||||
|
}
|
||||||
|
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
detectImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--user", userID,
|
||||||
|
"--volume", orderPath+":/layers/order.toml",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
|
||||||
|
),
|
||||||
|
h.WithArgs(
|
||||||
|
"-analyzed=/layers/analyzed.toml",
|
||||||
|
"-extensions=/cnb/extensions",
|
||||||
|
"-generated=/layers/generated",
|
||||||
|
"-log-level=debug",
|
||||||
|
"-run=/layers/run.toml", // /cnb/run.toml is the default location of run.toml
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Log("runs /bin/detect for buildpacks and extensions")
|
||||||
|
if api.MustParse(platformAPI).LessThan("0.13") {
|
||||||
|
h.AssertStringContains(t, output, "Platform requested experimental feature 'Dockerfiles'")
|
||||||
|
}
|
||||||
|
h.AssertStringContains(t, output, "FOO=val-from-build-config")
|
||||||
|
h.AssertStringContains(t, output, "simple_extension: output from /bin/detect")
|
||||||
|
t.Log("writes group.toml")
|
||||||
|
foundGroupTOML := filepath.Join(copyDir, "layers", "group.toml")
|
||||||
|
group, err := files.Handler.ReadGroup(foundGroupTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, group.GroupExtensions[0].ID, "simple_extension")
|
||||||
|
h.AssertEq(t, group.GroupExtensions[0].Version, "simple_extension_version")
|
||||||
|
h.AssertEq(t, group.Group[0].ID, "buildpack_for_ext")
|
||||||
|
h.AssertEq(t, group.Group[0].Version, "buildpack_for_ext_version")
|
||||||
|
h.AssertEq(t, group.Group[0].Extension, false)
|
||||||
|
t.Log("writes plan.toml")
|
||||||
|
foundPlanTOML := filepath.Join(copyDir, "layers", "plan.toml")
|
||||||
|
buildPlan, err := files.Handler.ReadPlan(foundPlanTOML)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(buildPlan.Entries), 0) // this shows that the plan was filtered to remove `requires` provided by extensions
|
||||||
|
|
||||||
|
t.Log("runs /bin/generate for extensions")
|
||||||
|
h.AssertStringContains(t, output, "simple_extension: output from /bin/generate")
|
||||||
|
|
||||||
|
var dockerfilePath string
|
||||||
|
if api.MustParse(platformAPI).LessThan("0.13") {
|
||||||
|
t.Log("copies the generated Dockerfiles to the output directory")
|
||||||
|
dockerfilePath = filepath.Join(copyDir, "layers", "generated", "run", "simple_extension", "Dockerfile")
|
||||||
|
} else {
|
||||||
|
dockerfilePath = filepath.Join(copyDir, "layers", "generated", "simple_extension", "run.Dockerfile")
|
||||||
|
}
|
||||||
|
h.AssertPathExists(t, dockerfilePath)
|
||||||
|
contents, err := os.ReadFile(dockerfilePath)
|
||||||
|
h.AssertEq(t, string(contents), "FROM some-run-image-from-extension\n")
|
||||||
|
t.Log("records the new run image in analyzed.toml")
|
||||||
|
foundAnalyzedTOML := filepath.Join(copyDir, "layers", "analyzed.toml")
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(foundAnalyzedTOML, cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.Image, "some-run-image-from-extension")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,677 @@
|
||||||
|
//go:build acceptance
|
||||||
|
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/buildpacks/imgutil"
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
"github.com/buildpacks/lifecycle/auth"
|
||||||
|
"github.com/buildpacks/lifecycle/cache"
|
||||||
|
"github.com/buildpacks/lifecycle/cmd"
|
||||||
|
"github.com/buildpacks/lifecycle/internal/fsutil"
|
||||||
|
"github.com/buildpacks/lifecycle/internal/path"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
exportImage string
|
||||||
|
exportRegAuthConfig string
|
||||||
|
exportRegNetwork string
|
||||||
|
exporterPath string
|
||||||
|
exportDaemonFixtures *daemonImageFixtures
|
||||||
|
exportRegFixtures *regImageFixtures
|
||||||
|
exportTest *PhaseTest
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExporter(t *testing.T) {
|
||||||
|
testImageDockerContext := filepath.Join("testdata", "exporter")
|
||||||
|
exportTest = NewPhaseTest(t, "exporter", testImageDockerContext)
|
||||||
|
|
||||||
|
exportTest.Start(t, updateTOMLFixturesWithTestRegistry)
|
||||||
|
defer exportTest.Stop(t)
|
||||||
|
|
||||||
|
exportImage = exportTest.testImageRef
|
||||||
|
exporterPath = exportTest.containerBinaryPath
|
||||||
|
exportRegAuthConfig = exportTest.targetRegistry.authConfig
|
||||||
|
exportRegNetwork = exportTest.targetRegistry.network
|
||||||
|
exportDaemonFixtures = exportTest.targetDaemon.fixtures
|
||||||
|
exportRegFixtures = exportTest.targetRegistry.fixtures
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
spec.Run(t, "acceptance-exporter/"+platformAPI.String(), testExporterFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
|
||||||
|
when("daemon case", func() {
|
||||||
|
var exportedImageName string
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
|
||||||
|
})
|
||||||
|
|
||||||
|
it("app is created", func() {
|
||||||
|
exportFlags := []string{"-daemon", "-log-level", "debug"}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.11") {
|
||||||
|
extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"}
|
||||||
|
for _, extension := range extensions {
|
||||||
|
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension)))
|
||||||
|
h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
h.AssertStringDoesNotContain(t, output, "Copying SBOM")
|
||||||
|
}
|
||||||
|
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.12") {
|
||||||
|
expectedHistory := []string{
|
||||||
|
"Buildpacks Launcher Config",
|
||||||
|
"Buildpacks Application Launcher",
|
||||||
|
"Application Layer",
|
||||||
|
"Software Bill-of-Materials",
|
||||||
|
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
|
||||||
|
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
|
||||||
|
"", // run image layer
|
||||||
|
}
|
||||||
|
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
|
||||||
|
} else {
|
||||||
|
assertDaemonImageDoesNotHaveHistory(t, exportedImageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("using extensions", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("app is created from the extended run image", func() {
|
||||||
|
exportFlags := []string{
|
||||||
|
"-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
|
||||||
|
"-daemon",
|
||||||
|
"-extended", "/layers/some-extended-dir",
|
||||||
|
"-log-level", "debug",
|
||||||
|
"-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag
|
||||||
|
}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
// get run image top layer
|
||||||
|
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
layers := inspect.RootFS.Layers
|
||||||
|
runImageFixtureTopLayerSHA := layers[len(layers)-1]
|
||||||
|
runImageFixtureSHA := inspect.ID
|
||||||
|
|
||||||
|
experimentalMode := "warn"
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||||
|
experimentalMode = "error"
|
||||||
|
}
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||||
|
expectedHistory := []string{
|
||||||
|
"Buildpacks Launcher Config",
|
||||||
|
"Buildpacks Application Launcher",
|
||||||
|
"Application Layer",
|
||||||
|
"Software Bill-of-Materials",
|
||||||
|
"Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1",
|
||||||
|
"Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1",
|
||||||
|
"Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension",
|
||||||
|
"Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension",
|
||||||
|
"", // run image layer
|
||||||
|
}
|
||||||
|
assertDaemonImageHasHistory(t, exportedImageName, expectedHistory)
|
||||||
|
t.Log("bases the exported image on the extended run image")
|
||||||
|
inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
|
||||||
|
t.Log("Adds extension layers")
|
||||||
|
type testCase struct {
|
||||||
|
expectedDiffID string
|
||||||
|
layerIndex int
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps
|
||||||
|
layerIndex: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps
|
||||||
|
layerIndex: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID)
|
||||||
|
}
|
||||||
|
t.Log("sets the layers metadata label according to the new spec")
|
||||||
|
var lmd files.LayersMetadata
|
||||||
|
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
|
||||||
|
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
|
||||||
|
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
|
||||||
|
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
|
||||||
|
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA)
|
||||||
|
h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("SOURCE_DATE_EPOCH is set", func() {
|
||||||
|
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
|
||||||
|
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
|
||||||
|
exportFlags := []string{"-daemon"}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = "some-exported-image-" + h.RandString(10)
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("registry case", func() {
|
||||||
|
var exportedImageName string
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
_, _, _ = h.RunE(exec.Command("docker", "rmi", exportedImageName)) // #nosec G204
|
||||||
|
})
|
||||||
|
|
||||||
|
it("app is created", func() {
|
||||||
|
var exportFlags []string
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("registry is insecure", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("uses http protocol", func() {
|
||||||
|
var exportFlags []string
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
insecureRegistry := "host.docker.internal/bar"
|
||||||
|
insecureAnalyzed := "/layers/analyzed_insecure.toml"
|
||||||
|
|
||||||
|
_, _, err := h.DockerRunWithError(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
|
||||||
|
"--env", "CNB_ANALYZED_PATH="+insecureAnalyzed,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("SOURCE_DATE_EPOCH is set", func() {
|
||||||
|
it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9")
|
||||||
|
expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC)
|
||||||
|
|
||||||
|
var exportFlags []string
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()),
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// FIXME: move this out of the registry block
|
||||||
|
when("cache", func() {
|
||||||
|
when("image case", func() {
|
||||||
|
it("cache is created", func() {
|
||||||
|
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
|
||||||
|
exportFlags := []string{"-cache-image", cacheImageName}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
// To detect whether the export of cacheImage and exportedImage is successful
|
||||||
|
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||||
|
h.Run(t, exec.Command("docker", "pull", cacheImageName))
|
||||||
|
})
|
||||||
|
|
||||||
|
when("parallel export is enabled", func() {
|
||||||
|
it("cache is created", func() {
|
||||||
|
cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10))
|
||||||
|
exportFlags := []string{"-cache-image", cacheImageName, "-parallel"}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||||
|
h.Run(t, exec.Command("docker", "pull", cacheImageName))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("cache is provided but no data was cached", func() {
|
||||||
|
it("cache is created with an empty layer", func() {
|
||||||
|
cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10))
|
||||||
|
exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
testEmptyLayerSHA := calculateEmptyLayerSha(t)
|
||||||
|
|
||||||
|
// Retrieve the cache image from the ephemeral registry
|
||||||
|
h.Run(t, exec.Command("docker", "pull", cacheImageName))
|
||||||
|
logger := cmd.DefaultLogger
|
||||||
|
|
||||||
|
subject, err := cache.NewImageCacheFromName(cacheImageName, authn.DefaultKeychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, api.MustParse(platformAPI).LessThan("0.13")))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
//Assert the cache image was created with an empty layer
|
||||||
|
layer, err := subject.RetrieveLayer(testEmptyLayerSHA)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
defer layer.Close()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("directory case", func() {
|
||||||
|
when("original cache was corrupted", func() {
|
||||||
|
var cacheDir string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
var err error
|
||||||
|
cacheDir, err = os.MkdirTemp("", "cache")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertNil(t, os.Chmod(cacheDir, 0777)) // Override umask
|
||||||
|
|
||||||
|
cacheFixtureDir := filepath.Join("testdata", "exporter", "cache-dir")
|
||||||
|
h.AssertNil(t, fsutil.Copy(cacheFixtureDir, cacheDir))
|
||||||
|
// We have to pre-create the tar files so that their digests do not change due to timestamps
|
||||||
|
// But, ':' in the filepath on Windows is not allowed
|
||||||
|
h.AssertNil(t, os.Rename(
|
||||||
|
filepath.Join(cacheDir, "committed", "sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
|
||||||
|
filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
_ = os.RemoveAll(cacheDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("overwrites the original layer", func() {
|
||||||
|
exportFlags := []string{
|
||||||
|
"-cache-dir", "/cache",
|
||||||
|
"-log-level", "debug",
|
||||||
|
}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
"--volume", fmt.Sprintf("%s:/cache", cacheDir),
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Skipping reuse for layer corrupted_buildpack:corrupted-layer: expected layer contents to have SHA 'sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59'; found 'sha256:9e0b77ed599eafdab8611f7eeefef084077f91f02f1da0a3870c7ff20a08bee8'")
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||||
|
defer h.Run(t, exec.Command("docker", "image", "rm", exportedImageName))
|
||||||
|
// Verify the app has the correct sha for the layer
|
||||||
|
inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
var lmd files.LayersMetadata
|
||||||
|
lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"]
|
||||||
|
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
|
||||||
|
h.AssertEq(t, lmd.Buildpacks[2].Layers["corrupted-layer"].SHA, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
|
||||||
|
// Verify the cache has correct contents now
|
||||||
|
foundDiffID, err := func() (string, error) {
|
||||||
|
layerPath := filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar")
|
||||||
|
layerRC, err := os.Open(layerPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = layerRC.Close()
|
||||||
|
}()
|
||||||
|
hasher := sha256.New()
|
||||||
|
if _, err = io.Copy(hasher, layerRC); err != nil {
|
||||||
|
return "", errors.Wrap(err, "hashing layer")
|
||||||
|
}
|
||||||
|
foundDiffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size())))
|
||||||
|
return foundDiffID, nil
|
||||||
|
}()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, foundDiffID, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("using extensions", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("app is created from the extended run image", func() {
|
||||||
|
exportFlags := []string{
|
||||||
|
"-analyzed", "/layers/run-image-extended-analyzed.toml",
|
||||||
|
"-extended", "/layers/some-extended-dir",
|
||||||
|
"-log-level", "debug",
|
||||||
|
"-run", "/cnb/run.toml",
|
||||||
|
}
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10))
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
// get run image SHA & top layer
|
||||||
|
ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
layers, err := remoteImage.Layers()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
runImageFixtureSHA, err := remoteImage.Digest()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
experimentalMode := "warn"
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||||
|
experimentalMode = "error"
|
||||||
|
}
|
||||||
|
|
||||||
|
output := h.DockerRun(t,
|
||||||
|
exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig,
|
||||||
|
"--network", exportRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringContains(t, output, "Saving "+exportedImageName)
|
||||||
|
|
||||||
|
h.Run(t, exec.Command("docker", "pull", exportedImageName))
|
||||||
|
assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime)
|
||||||
|
t.Log("bases the exported image on the extended run image")
|
||||||
|
ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
configFile, err := remoteImage.ConfigFile()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<sha>/blobs/sha256/<config>
|
||||||
|
t.Log("Adds extension layers")
|
||||||
|
layers, err = remoteImage.Layers()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
type testCase struct {
|
||||||
|
expectedDigest string
|
||||||
|
layerIndex int
|
||||||
|
}
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing
|
||||||
|
layerIndex: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_<c72eda1c>/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing
|
||||||
|
layerIndex: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
layer := layers[tc.layerIndex]
|
||||||
|
digest, err := layer.Digest()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, digest.String(), tc.expectedDigest)
|
||||||
|
}
|
||||||
|
t.Log("sets the layers metadata label according to the new spec")
|
||||||
|
var lmd files.LayersMetadata
|
||||||
|
lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"]
|
||||||
|
h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd))
|
||||||
|
h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml
|
||||||
|
h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml
|
||||||
|
h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String())
|
||||||
|
h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("layout case", func() {
|
||||||
|
var (
|
||||||
|
containerName string
|
||||||
|
err error
|
||||||
|
layoutDir string
|
||||||
|
tmpDir string
|
||||||
|
exportedImageName string
|
||||||
|
)
|
||||||
|
|
||||||
|
when("experimental mode is enabled", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
// create the directory to save all OCI images on disk
|
||||||
|
tmpDir, err = os.MkdirTemp("", "layout")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
// removes all images created
|
||||||
|
os.RemoveAll(tmpDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("using a custom layout directory", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
exportedImageName = "my-custom-layout-app"
|
||||||
|
layoutDir = filepath.Join(path.RootDir, "my-layout-dir")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("app is created", func() {
|
||||||
|
var exportFlags []string
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||||
|
exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...)
|
||||||
|
exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...)
|
||||||
|
exportArgs = append(exportArgs, exportedImageName)
|
||||||
|
|
||||||
|
output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_EXPERIMENTAL_MODE=warn",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
),
|
||||||
|
h.WithArgs(exportArgs...))
|
||||||
|
|
||||||
|
h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest")
|
||||||
|
|
||||||
|
// assert the image was saved on disk in OCI layout format
|
||||||
|
index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest"))
|
||||||
|
h.AssertEq(t, len(index.Manifests), 1)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("experimental mode is not enabled", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
layoutDir = filepath.Join(path.RootDir, "layout-dir")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("errors", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag")
|
||||||
|
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
exportImage,
|
||||||
|
ctrPath(exporterPath),
|
||||||
|
"-layout",
|
||||||
|
"-layout-dir", layoutDir,
|
||||||
|
"some-image",
|
||||||
|
) // #nosec G204
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "experimental features are disabled by CNB_EXPERIMENTAL_MODE=error"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertDaemonImageDoesNotHaveHistory(t *testing.T, repoName string) {
|
||||||
|
history, err := h.DockerCli(t).ImageHistory(context.TODO(), repoName)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
for _, hs := range history {
|
||||||
|
h.AssertEq(t, hs.Created, imgutil.NormalizedDateTime.Unix())
|
||||||
|
h.AssertEq(t, hs.CreatedBy, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertDaemonImageHasHistory(t *testing.T, repoName string, expectedHistory []string) {
|
||||||
|
history, err := h.DockerCli(t).ImageHistory(context.TODO(), repoName)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(history), len(expectedHistory))
|
||||||
|
for idx, hs := range history {
|
||||||
|
h.AssertEq(t, hs.Created, imgutil.NormalizedDateTime.Unix())
|
||||||
|
h.AssertEq(t, hs.CreatedBy, expectedHistory[idx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateEmptyLayerSha(t *testing.T) string {
|
||||||
|
tmpDir, err := os.MkdirTemp("", "")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
testLayerEmptyPath := filepath.Join(tmpDir, "empty.tar")
|
||||||
|
h.AssertNil(t, os.WriteFile(testLayerEmptyPath, []byte{}, 0600))
|
||||||
|
return "sha256:" + h.ComputeSHA256ForFile(t, testLayerEmptyPath)
|
||||||
|
}
|
|
@ -0,0 +1,288 @@
|
||||||
|
//go:build acceptance
|
||||||
|
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/buildpacks/imgutil/layout/sparse"
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
"github.com/buildpacks/lifecycle/auth"
|
||||||
|
"github.com/buildpacks/lifecycle/cmd"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
extendImage string
|
||||||
|
extendRegAuthConfig string
|
||||||
|
extendRegNetwork string
|
||||||
|
extenderPath string
|
||||||
|
extendDaemonFixtures *daemonImageFixtures
|
||||||
|
extendRegFixtures *regImageFixtures
|
||||||
|
extendTest *PhaseTest
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Log message emitted by kaniko;
|
||||||
|
// if we provide cache directory as an option, kaniko looks there for the base image as a tarball;
|
||||||
|
// however the base image is in OCI layout format, so we fail to initialize the base image;
|
||||||
|
// we manage to provide the base image because we override image.RetrieveRemoteImage,
|
||||||
|
// but the log message could be confusing to end users, hence we check that it is not printed.
|
||||||
|
msgErrRetrievingImageFromCache = "Error while retrieving image from cache"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExtender(t *testing.T) {
|
||||||
|
testImageDockerContext := filepath.Join("testdata", "extender")
|
||||||
|
extendTest = NewPhaseTest(t, "extender", testImageDockerContext)
|
||||||
|
extendTest.Start(t)
|
||||||
|
defer extendTest.Stop(t)
|
||||||
|
|
||||||
|
extendImage = extendTest.testImageRef
|
||||||
|
extenderPath = extendTest.containerBinaryPath
|
||||||
|
extendRegAuthConfig = extendTest.targetRegistry.authConfig
|
||||||
|
extendRegNetwork = extendTest.targetRegistry.network
|
||||||
|
extendDaemonFixtures = extendTest.targetDaemon.fixtures
|
||||||
|
extendRegFixtures = extendTest.targetRegistry.fixtures
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
if platformAPI.LessThan("0.10") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
spec.Run(t, "acceptance-extender/"+platformAPI.String(), testExtenderFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testExtenderFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
var generatedDir = "/layers/generated"
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||||
|
generatedDir = "/layers/generated-with-contexts"
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
when("kaniko case", func() {
|
||||||
|
var extendedDir, kanikoDir, analyzedPath string
|
||||||
|
|
||||||
|
it.Before(func() {
|
||||||
|
var err error
|
||||||
|
extendedDir, err = os.MkdirTemp("", "lifecycle-acceptance")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
kanikoDir, err = os.MkdirTemp("", "lifecycle-acceptance")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
// push base image to test registry
|
||||||
|
h.Run(t, exec.Command("docker", "tag", extendImage, extendTest.RegRepoName(extendImage)))
|
||||||
|
h.AssertNil(t, h.PushImage(h.DockerCli(t), extendTest.RegRepoName(extendImage), extendTest.targetRegistry.registry.EncodedLabeledAuth()))
|
||||||
|
|
||||||
|
// mimic what the restorer would have done in the previous phase:
|
||||||
|
|
||||||
|
// warm kaniko cache
|
||||||
|
|
||||||
|
// get remote image
|
||||||
|
os.Setenv("DOCKER_CONFIG", extendTest.targetRegistry.dockerConfigDir)
|
||||||
|
ref, auth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, extendTest.RegRepoName(extendImage))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
remoteImage, err := remote.Image(ref, remote.WithAuth(auth))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
baseImageHash, err := remoteImage.Digest()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
baseImageDigest := baseImageHash.String()
|
||||||
|
baseCacheDir := filepath.Join(kanikoDir, "cache", "base")
|
||||||
|
h.AssertNil(t, os.MkdirAll(baseCacheDir, 0755))
|
||||||
|
|
||||||
|
// write sparse image
|
||||||
|
layoutImage, err := sparse.NewImage(filepath.Join(baseCacheDir, baseImageDigest), remoteImage)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertNil(t, layoutImage.Save())
|
||||||
|
|
||||||
|
// write image reference in analyzed.toml
|
||||||
|
analyzedMD := files.Analyzed{
|
||||||
|
BuildImage: &files.ImageIdentifier{
|
||||||
|
Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), baseImageDigest),
|
||||||
|
},
|
||||||
|
RunImage: &files.RunImage{
|
||||||
|
Reference: fmt.Sprintf("%s@%s", extendTest.RegRepoName(extendImage), baseImageDigest),
|
||||||
|
Extend: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
analyzedPath = h.TempFile(t, "", "analyzed.toml")
|
||||||
|
h.AssertNil(t, files.Handler.WriteAnalyzed(analyzedPath, &analyzedMD, cmd.DefaultLogger))
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
_ = os.RemoveAll(kanikoDir)
|
||||||
|
_ = os.RemoveAll(extendedDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("extending the build image", func() {
|
||||||
|
it("succeeds", func() {
|
||||||
|
extendArgs := []string{
|
||||||
|
ctrPath(extenderPath),
|
||||||
|
"-analyzed", "/layers/analyzed.toml",
|
||||||
|
"-generated", generatedDir,
|
||||||
|
"-log-level", "debug",
|
||||||
|
"-gid", "1000",
|
||||||
|
"-uid", "1234",
|
||||||
|
}
|
||||||
|
|
||||||
|
extendFlags := []string{
|
||||||
|
"--env", "CNB_PLATFORM_API=" + platformAPI,
|
||||||
|
"--volume", fmt.Sprintf("%s:/layers/analyzed.toml", analyzedPath),
|
||||||
|
"--volume", fmt.Sprintf("%s:/kaniko", kanikoDir),
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("first build extends the build image by running Dockerfile commands")
|
||||||
|
firstOutput := h.DockerRunWithCombinedOutput(t,
|
||||||
|
extendImage,
|
||||||
|
h.WithFlags(extendFlags...),
|
||||||
|
h.WithArgs(extendArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringDoesNotContain(t, firstOutput, msgErrRetrievingImageFromCache)
|
||||||
|
h.AssertStringContains(t, firstOutput, "ca-certificates")
|
||||||
|
h.AssertStringContains(t, firstOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl was installed on the build image
|
||||||
|
t.Log("sets environment variables from the extended build image in the build context")
|
||||||
|
h.AssertStringContains(t, firstOutput, "CNB_STACK_ID for buildpack: stack-id-from-ext-tree")
|
||||||
|
h.AssertStringContains(t, firstOutput, "HOME for buildpack: /home/cnb")
|
||||||
|
|
||||||
|
t.Log("cleans the kaniko directory")
|
||||||
|
fis, err := os.ReadDir(kanikoDir)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(fis), 1) // 1: /kaniko/cache
|
||||||
|
|
||||||
|
t.Log("second build extends the build image by pulling from the cache directory")
|
||||||
|
secondOutput := h.DockerRunWithCombinedOutput(t,
|
||||||
|
extendImage,
|
||||||
|
h.WithFlags(extendFlags...),
|
||||||
|
h.WithArgs(extendArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, msgErrRetrievingImageFromCache)
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that first cache layer was used
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree") // shows that second cache layer was used
|
||||||
|
h.AssertStringContains(t, secondOutput, "Hello Extensions buildpack\ncurl") // output by buildpack, shows that curl is still installed in the unpacked cached layer
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("extending the run image", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not support run image extension")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("succeeds", func() {
|
||||||
|
extendArgs := []string{
|
||||||
|
ctrPath(extenderPath),
|
||||||
|
"-analyzed", "/layers/analyzed.toml",
|
||||||
|
"-extended", "/layers/extended",
|
||||||
|
"-generated", generatedDir,
|
||||||
|
"-kind", "run",
|
||||||
|
"-log-level", "debug",
|
||||||
|
"-gid", "1000",
|
||||||
|
"-uid", "1234",
|
||||||
|
}
|
||||||
|
|
||||||
|
extendFlags := []string{
|
||||||
|
"--env", "CNB_PLATFORM_API=" + platformAPI,
|
||||||
|
"--volume", fmt.Sprintf("%s:/layers/analyzed.toml", analyzedPath),
|
||||||
|
"--volume", fmt.Sprintf("%s:/layers/extended", extendedDir),
|
||||||
|
"--volume", fmt.Sprintf("%s:/kaniko", kanikoDir),
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("first build extends the run image by running Dockerfile commands")
|
||||||
|
firstOutput := h.DockerRunWithCombinedOutput(t,
|
||||||
|
extendImage,
|
||||||
|
h.WithFlags(extendFlags...),
|
||||||
|
h.WithArgs(extendArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringDoesNotContain(t, firstOutput, msgErrRetrievingImageFromCache)
|
||||||
|
h.AssertStringContains(t, firstOutput, "ca-certificates")
|
||||||
|
h.AssertStringContains(t, firstOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree")
|
||||||
|
t.Log("does not run the build phase")
|
||||||
|
h.AssertStringDoesNotContain(t, firstOutput, "Hello Extensions buildpack\ncurl")
|
||||||
|
t.Log("outputs extended image layers to the extended directory")
|
||||||
|
images, err := os.ReadDir(filepath.Join(extendedDir, "run"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(images), 1) // sha256:<extended image digest>
|
||||||
|
assertExpectedImage(t, filepath.Join(extendedDir, "run", images[0].Name()), platformAPI)
|
||||||
|
t.Log("cleans the kaniko directory")
|
||||||
|
caches, err := os.ReadDir(kanikoDir)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(caches), 1) // 1: /kaniko/cache
|
||||||
|
|
||||||
|
t.Log("second build extends the build image by pulling from the cache directory")
|
||||||
|
secondOutput := h.DockerRunWithCombinedOutput(t,
|
||||||
|
extendImage,
|
||||||
|
h.WithFlags(extendFlags...),
|
||||||
|
h.WithArgs(extendArgs...),
|
||||||
|
)
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, msgErrRetrievingImageFromCache)
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, "ca-certificates") // shows that first cache layer was used
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, "No cached layer found for cmd RUN apt-get update && apt-get install -y tree") // shows that second cache layer was used
|
||||||
|
t.Log("does not run the build phase")
|
||||||
|
h.AssertStringDoesNotContain(t, secondOutput, "Hello Extensions buildpack\ncurl")
|
||||||
|
t.Log("outputs extended image layers to the extended directory")
|
||||||
|
images, err = os.ReadDir(filepath.Join(extendedDir, "run"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(images), 1) // sha256:<first extended image digest>
|
||||||
|
assertExpectedImage(t, filepath.Join(extendedDir, "run", images[0].Name()), platformAPI)
|
||||||
|
t.Log("cleans the kaniko directory")
|
||||||
|
caches, err = os.ReadDir(kanikoDir)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(caches), 1) // 1: /kaniko/cache
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertExpectedImage(t *testing.T, imagePath, platformAPI string) {
|
||||||
|
image, err := readOCI(imagePath)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
configFile, err := image.ConfigFile()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false")
|
||||||
|
layers, err := image.Layers()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
history := configFile.History
|
||||||
|
h.AssertEq(t, len(history), len(configFile.RootFS.DiffIDs))
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.13") {
|
||||||
|
h.AssertEq(t, len(layers), 7) // base (3), curl (2), tree (2)
|
||||||
|
h.AssertEq(t, history[3].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y curl', Created by extension: curl")
|
||||||
|
h.AssertEq(t, history[4].CreatedBy, "Layer: 'COPY run-file /', Created by extension: curl")
|
||||||
|
h.AssertEq(t, history[5].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y tree', Created by extension: tree")
|
||||||
|
h.AssertEq(t, history[6].CreatedBy, "Layer: 'COPY shared-file /shared-run', Created by extension: tree")
|
||||||
|
} else {
|
||||||
|
h.AssertEq(t, len(layers), 5) // base (3), curl (1), tree (1)
|
||||||
|
h.AssertEq(t, history[3].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y curl', Created by extension: curl")
|
||||||
|
h.AssertEq(t, history[4].CreatedBy, "Layer: 'RUN apt-get update && apt-get install -y tree', Created by extension: tree")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readOCI(fromPath string) (v1.Image, error) {
|
||||||
|
layoutPath, err := layout.FromPath(fromPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("getting layout from path: %w", err)
|
||||||
|
}
|
||||||
|
hash, err := v1.NewHash(filepath.Base(fromPath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("getting hash from reference '%s': %w", fromPath, err)
|
||||||
|
}
|
||||||
|
v1Image, err := layoutPath.Image(hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("getting image from hash '%s': %w", hash.String(), err)
|
||||||
|
}
|
||||||
|
return v1Image, nil
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
package acceptance
|
package acceptance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -14,166 +14,202 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
launchDockerContext = filepath.Join("testdata", "launcher")
|
launchImage string
|
||||||
launcherBinaryDir = filepath.Join("acceptance", "testdata", "launcher", "container", "cnb", "lifecycle")
|
launcherPath string
|
||||||
launchImage = "lifecycle/acceptance/launcher"
|
launchTest *PhaseTest
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLauncher(t *testing.T) {
|
func TestLauncher(t *testing.T) {
|
||||||
buildLauncher(t)
|
testImageDockerContext := filepath.Join("testdata", "launcher")
|
||||||
buildLaunchImage(t)
|
launchTest = NewPhaseTest(t, "launcher", testImageDockerContext, withoutDaemonFixtures, withoutRegistry)
|
||||||
defer removeLaunchImage(t)
|
|
||||||
|
containerBinaryDir := filepath.Join("testdata", "launcher", "linux", "container", "cnb", "lifecycle")
|
||||||
|
withCustomContainerBinaryDir := func(_ *testing.T, phaseTest *PhaseTest) {
|
||||||
|
phaseTest.containerBinaryDir = containerBinaryDir
|
||||||
|
}
|
||||||
|
launchTest.Start(t, withCustomContainerBinaryDir)
|
||||||
|
defer launchTest.Stop(t)
|
||||||
|
|
||||||
|
launchImage = launchTest.testImageRef
|
||||||
|
launcherPath = launchTest.containerBinaryPath
|
||||||
|
|
||||||
spec.Run(t, "acceptance", testLauncher, spec.Parallel(), spec.Report(report.Terminal{}))
|
spec.Run(t, "acceptance", testLauncher, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLauncher(t *testing.T, when spec.G, it spec.S) {
|
func testLauncher(t *testing.T, when spec.G, it spec.S) {
|
||||||
when("there is no CMD provided", func() {
|
when("exec.d", func() {
|
||||||
when("CNB_PROCESS_TYPE is NOT set", func() {
|
it("executes the binaries and modifies env before running profiles", func() {
|
||||||
it("web is the default process-type", func() {
|
cmd := exec.Command("docker", "run", "--rm", //nolint
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage)
|
"--env=CNB_PLATFORM_API=0.7",
|
||||||
output, err := cmd.CombinedOutput()
|
"--entrypoint=exec.d-checker"+exe,
|
||||||
if err != nil {
|
"--env=VAR_FROM_EXEC_D=orig-val",
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage)
|
||||||
}
|
|
||||||
expected := "Executing web process-type"
|
helper := "helper" + exe
|
||||||
if !strings.Contains(string(output), expected) {
|
execDHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d", helper)
|
||||||
t.Fatalf("failed to execute web:\n\t got: %s\n\t want: %s", output, expected)
|
execDCheckerHelper := ctrPath("/layers", execDBpDir, "some_layer/exec.d/exec.d-checker", helper)
|
||||||
}
|
workDir := ctrPath("/workspace")
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s was executed\n", execDHelper)
|
||||||
|
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
|
||||||
|
expected += fmt.Sprintf("%s was executed\n", execDCheckerHelper)
|
||||||
|
expected += fmt.Sprintf("Exec.d Working Dir: %s\n", workDir)
|
||||||
|
expected += "sourced bp profile\n"
|
||||||
|
expected += "sourced app profile\n"
|
||||||
|
expected += "VAR_FROM_EXEC_D: orig-val:val-from-exec.d:val-from-exec.d-for-process-type-exec.d-checker"
|
||||||
|
|
||||||
|
assertOutput(t, cmd, expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("entrypoint is a process", func() {
|
||||||
|
it("launches that process", func() {
|
||||||
|
cmd := exec.Command("docker", "run", "--rm", //nolint
|
||||||
|
"--entrypoint=web",
|
||||||
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
launchImage)
|
||||||
|
assertOutput(t, cmd, "Executing web process-type")
|
||||||
|
})
|
||||||
|
|
||||||
|
when("process contains a period", func() {
|
||||||
|
it("launches that process", func() {
|
||||||
|
cmd := exec.Command("docker", "run", "--rm",
|
||||||
|
"--entrypoint=process.with.period"+exe,
|
||||||
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
launchImage)
|
||||||
|
assertOutput(t, cmd, "Executing process.with.period process-type")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it("appends any args to the process args", func() {
|
||||||
|
cmd := exec.Command( //nolint
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--entrypoint=web",
|
||||||
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
launchImage, "with user provided args",
|
||||||
|
)
|
||||||
|
assertOutput(t, cmd, "Executing web process-type with user provided args")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("entrypoint is a not a process", func() {
|
||||||
|
it("builds a process from the arguments", func() {
|
||||||
|
cmd := exec.Command( //nolint
|
||||||
|
"docker", "run", "--rm",
|
||||||
|
"--entrypoint=launcher",
|
||||||
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
|
launchImage, "--",
|
||||||
|
"env",
|
||||||
|
)
|
||||||
|
|
||||||
|
assertOutput(t, cmd,
|
||||||
|
"SOME_VAR=some-bp-val",
|
||||||
|
"OTHER_VAR=other-bp-val",
|
||||||
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
when("CNB_PROCESS_TYPE is set", func() {
|
when("CNB_PROCESS_TYPE is set", func() {
|
||||||
it("the value of CNB_PROCESS_TYPE is the default process-type", func() {
|
it("should warn", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", "--env", "CNB_PROCESS_TYPE=other-process", launchImage)
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
|
||||||
}
|
|
||||||
expected := "Executing other-process process-type"
|
|
||||||
if !strings.Contains(string(output), expected) {
|
|
||||||
t.Fatalf("failed to execute other-process:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("process-type provided in CMD", func() {
|
|
||||||
it("launches that process-type", func() {
|
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "other-process")
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
|
||||||
}
|
|
||||||
expected := "Executing other-process process-type"
|
|
||||||
if !strings.Contains(string(output), expected) {
|
|
||||||
t.Fatalf("failed to execute other-process:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
it("respects CNB_APP_DIR and CNB_LAYERS_DIR environment variables", func() {
|
|
||||||
cmd := exec.Command("docker", "run", "--rm",
|
cmd := exec.Command("docker", "run", "--rm",
|
||||||
"--env", "CNB_APP_DIR=/other-app",
|
"--env=CNB_PROCESS_TYPE=direct-process",
|
||||||
"--env", "CNB_LAYERS_DIR=/other-layers",
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
launchImage)
|
"--env=CNB_NO_COLOR=true",
|
||||||
output, err := cmd.CombinedOutput()
|
launchImage,
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
out, err := cmd.CombinedOutput()
|
||||||
}
|
h.AssertNotNil(t, err)
|
||||||
expected := "sourced other app profile\nExecuting other-layers web process-type"
|
h.AssertStringContains(t, string(out), "Warning: CNB_PROCESS_TYPE is not supported in Platform API "+latestPlatformAPI)
|
||||||
if !strings.Contains(string(output), expected) {
|
h.AssertStringContains(t, string(out), `Warning: Run with ENTRYPOINT 'direct-process' to invoke the 'direct-process' process type`)
|
||||||
t.Fatalf("failed to execute web:\n\t got: %s\n\t want: %s", output, expected)
|
h.AssertStringContains(t, string(out), "ERROR: failed to launch: determine start command: when there is no default process a command is required")
|
||||||
}
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
when("provided CMD is not a process-type", func() {
|
when("provided CMD is not a process-type", func() {
|
||||||
it("sources profiles and executes the command in a shell", func() {
|
it("sources profiles and executes the command in a shell", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "echo something")
|
cmd := exec.Command( //nolint
|
||||||
output, err := cmd.CombinedOutput()
|
"docker", "run", "--rm",
|
||||||
if err != nil {
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage,
|
||||||
}
|
"echo", "something",
|
||||||
expected := "sourced bp profile\nsourced app profile\nsomething"
|
)
|
||||||
if !strings.Contains(string(output), expected) {
|
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsomething")
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it("sets env vars from layers", func() {
|
it("sets env vars from layers", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "echo $SOME_VAR $OTHER_VAR")
|
cmd := exec.Command( //nolint
|
||||||
output, err := cmd.CombinedOutput()
|
"docker", "run", "--rm",
|
||||||
if err != nil {
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage,
|
||||||
}
|
"echo", "$SOME_VAR", "$OTHER_VAR", "$WORKER_VAR",
|
||||||
expected := "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val"
|
)
|
||||||
if !strings.Contains(string(output), expected) {
|
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-bp-val other-bp-val worker-no-process-val")
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it("passes through env vars from user, excluding blacklist", func() {
|
it("passes through env vars from user, excluding excluded vars", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm",
|
args := []string{"echo", "$SOME_USER_VAR, $CNB_APP_DIR, $OTHER_VAR"}
|
||||||
"--env", "CNB_APP_DIR=/workspace",
|
cmd := exec.Command("docker",
|
||||||
|
append(
|
||||||
|
[]string{
|
||||||
|
"run", "--rm",
|
||||||
|
"--env", "CNB_APP_DIR=" + ctrPath("/workspace"),
|
||||||
|
"--env=CNB_PLATFORM_API=" + latestPlatformAPI,
|
||||||
"--env", "SOME_USER_VAR=some-user-val",
|
"--env", "SOME_USER_VAR=some-user-val",
|
||||||
"--env", "OTHER_VAR=other-user-val",
|
"--env", "OTHER_VAR=other-user-val",
|
||||||
launchImage,
|
launchImage,
|
||||||
"echo $SOME_USER_VAR $CNB_APP_DIR $OTHER_VAR")
|
},
|
||||||
output, err := cmd.CombinedOutput()
|
args...)...,
|
||||||
if err != nil {
|
) // #nosec G204
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
|
||||||
}
|
assertOutput(t, cmd, "sourced bp profile\nsourced app profile\nsome-user-val, , other-user-val**other-bp-val")
|
||||||
// bp appends other-bp-val with delimeter '**'
|
|
||||||
expected := "sourced bp profile\nsourced app profile\nsome-user-val other-user-val**other-bp-val"
|
|
||||||
if !strings.Contains(string(output), expected) {
|
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it("adds buildpack bin dirs to the path", func() {
|
it("adds buildpack bin dirs to the path", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "bp-executable")
|
cmd := exec.Command( //nolint
|
||||||
output, err := cmd.CombinedOutput()
|
"docker", "run", "--rm",
|
||||||
if err != nil {
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage,
|
||||||
}
|
"bp-executable",
|
||||||
expected := "bp executable"
|
)
|
||||||
if !strings.Contains(string(output), expected) {
|
assertOutput(t, cmd, "bp executable")
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
when("CMD provided starts with --", func() {
|
when("CMD provided starts with --", func() {
|
||||||
it("launches command directly", func() {
|
it("launches command directly", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "echo", "something")
|
cmd := exec.Command( //nolint
|
||||||
output, err := cmd.CombinedOutput()
|
"docker", "run", "--rm",
|
||||||
if err != nil {
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage, "--",
|
||||||
}
|
"echo", "something",
|
||||||
expected := "something"
|
)
|
||||||
if !strings.Contains(string(output), expected) {
|
assertOutput(t, cmd, "something")
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it("sets env vars from layers", func() {
|
it("sets env vars from layers", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "env")
|
cmd := exec.Command( //nolint
|
||||||
output, err := cmd.CombinedOutput()
|
"docker", "run", "--rm",
|
||||||
if err != nil {
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage, "--",
|
||||||
}
|
"env",
|
||||||
if !strings.Contains(string(output), "SOME_VAR=some-bp-val") {
|
)
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, "SOME_VAR=some-bp-val")
|
|
||||||
}
|
assertOutput(t, cmd,
|
||||||
if !strings.Contains(string(output), "OTHER_VAR=other-bp-val") {
|
"SOME_VAR=some-bp-val",
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, "OTHER_VAR=other-bp-val")
|
"OTHER_VAR=other-bp-val",
|
||||||
}
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
it("passes through env vars from user, excluding blacklist", func() {
|
it("passes through env vars from user, excluding excluded vars", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm",
|
cmd := exec.Command( //nolint
|
||||||
|
"docker", "run", "--rm",
|
||||||
"--env", "CNB_APP_DIR=/workspace",
|
"--env", "CNB_APP_DIR=/workspace",
|
||||||
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
"--env", "SOME_USER_VAR=some-user-val",
|
"--env", "SOME_USER_VAR=some-user-val",
|
||||||
launchImage, "--", "env")
|
launchImage, "--",
|
||||||
|
"env",
|
||||||
|
)
|
||||||
|
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
||||||
|
@ -189,49 +225,26 @@ func testLauncher(t *testing.T, when spec.G, it spec.S) {
|
||||||
})
|
})
|
||||||
|
|
||||||
it("adds buildpack bin dirs to the path before looking up command", func() {
|
it("adds buildpack bin dirs to the path before looking up command", func() {
|
||||||
cmd := exec.Command("docker", "run", "--rm", launchImage, "--", "bp-executable")
|
cmd := exec.Command( //nolint
|
||||||
output, err := cmd.CombinedOutput()
|
"docker", "run", "--rm",
|
||||||
if err != nil {
|
"--env=CNB_PLATFORM_API="+latestPlatformAPI,
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
launchImage, "--",
|
||||||
}
|
"bp-executable",
|
||||||
expected := "bp executable"
|
|
||||||
if !strings.Contains(string(output), expected) {
|
|
||||||
t.Fatalf("failed to execute provided CMD:\n\t got: %s\n\t want: %s", output, expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildLaunchImage(t *testing.T) {
|
|
||||||
cmd := exec.Command("docker", "build", "-t", launchImage, launchDockerContext)
|
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, string(output), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeLaunchImage(t *testing.T) {
|
|
||||||
cmd := exec.Command("docker", "rmi", launchImage)
|
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, string(output), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildLauncher(t *testing.T) {
|
|
||||||
cmd := exec.Command("make", "clean", "build-linux-launcher")
|
|
||||||
wd, err := os.Getwd()
|
|
||||||
h.AssertNil(t, err)
|
|
||||||
cmd.Dir = filepath.Join(wd, "..")
|
|
||||||
cmd.Env = append(
|
|
||||||
os.Environ(),
|
|
||||||
"PWD="+cmd.Dir,
|
|
||||||
"OUT_DIR="+launcherBinaryDir,
|
|
||||||
"LIFECYCLE_VERSION=some-version",
|
|
||||||
"SCM_COMMIT=asdf123",
|
|
||||||
)
|
)
|
||||||
|
assertOutput(t, cmd, "bp executable")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
t.Log("Building binaries: ", cmd.Args)
|
func assertOutput(t *testing.T, cmd *exec.Cmd, expected ...string) {
|
||||||
|
t.Helper()
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
t.Fatalf("failed to run %v\n OUTPUT: %s\n ERROR: %s\n", cmd.Args, output, err)
|
||||||
}
|
}
|
||||||
|
for _, ex := range expected {
|
||||||
|
if !strings.Contains(strings.ReplaceAll(string(output), "\r\n", "\n"), ex) {
|
||||||
|
t.Fatalf("failed:\n\t output: %s\n\t should include: %s", output, ex)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,522 @@
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/image"
|
||||||
|
|
||||||
|
ih "github.com/buildpacks/imgutil/testhelpers"
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
"github.com/google/go-containerregistry/pkg/registry"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/auth"
|
||||||
|
"github.com/buildpacks/lifecycle/cmd"
|
||||||
|
"github.com/buildpacks/lifecycle/internal/encoding"
|
||||||
|
"github.com/buildpacks/lifecycle/platform"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PhaseTest struct {
|
||||||
|
containerBinaryDir string // The path to copy lifecycle binaries to before building the test image.
|
||||||
|
containerBinaryPath string // The path to invoke when running the test container.
|
||||||
|
phaseName string // The phase name, such as detect, analyze, restore, build, export, or create.
|
||||||
|
testImageDockerContext string // The directory containing the Dockerfile for the test image.
|
||||||
|
testImageRef string // The test image to run.
|
||||||
|
targetDaemon *targetDaemon
|
||||||
|
targetRegistry *targetRegistry // The target registry to use. Remove by passing `withoutRegistry` to the constructor.
|
||||||
|
}
|
||||||
|
|
||||||
|
type targetDaemon struct {
|
||||||
|
os string
|
||||||
|
arch string
|
||||||
|
fixtures *daemonImageFixtures
|
||||||
|
}
|
||||||
|
|
||||||
|
type daemonImageFixtures struct {
|
||||||
|
AppImage string
|
||||||
|
CacheImage string
|
||||||
|
RunImage string
|
||||||
|
}
|
||||||
|
|
||||||
|
type targetRegistry struct {
|
||||||
|
authConfig string
|
||||||
|
dockerConfigDir string
|
||||||
|
network string
|
||||||
|
fixtures *regImageFixtures
|
||||||
|
registry *ih.DockerRegistry
|
||||||
|
}
|
||||||
|
|
||||||
|
type regImageFixtures struct {
|
||||||
|
InaccessibleImage string
|
||||||
|
ReadOnlyAppImage string
|
||||||
|
ReadOnlyCacheImage string
|
||||||
|
ReadOnlyRunImage string
|
||||||
|
ReadWriteAppImage string
|
||||||
|
ReadWriteCacheImage string
|
||||||
|
ReadWriteOtherAppImage string
|
||||||
|
SomeAppImage string
|
||||||
|
SomeCacheImage string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPhaseTest(t *testing.T, phaseName, testImageDockerContext string, phaseOp ...func(*PhaseTest)) *PhaseTest {
|
||||||
|
phaseTest := &PhaseTest{
|
||||||
|
containerBinaryDir: filepath.Join(testImageDockerContext, "container", "cnb", "lifecycle"),
|
||||||
|
containerBinaryPath: "/cnb/lifecycle/" + phaseName,
|
||||||
|
phaseName: phaseName,
|
||||||
|
targetDaemon: newTargetDaemon(t),
|
||||||
|
targetRegistry: &targetRegistry{},
|
||||||
|
testImageDockerContext: testImageDockerContext,
|
||||||
|
testImageRef: "lifecycle/acceptance/" + phaseName,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, op := range phaseOp {
|
||||||
|
op(phaseTest)
|
||||||
|
}
|
||||||
|
|
||||||
|
return phaseTest
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTargetDaemon(t *testing.T) *targetDaemon {
|
||||||
|
info, err := h.DockerCli(t).Info(context.TODO())
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
arch := info.Architecture
|
||||||
|
if arch == "x86_64" {
|
||||||
|
arch = "amd64"
|
||||||
|
}
|
||||||
|
if arch == "aarch64" {
|
||||||
|
arch = "arm64"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &targetDaemon{
|
||||||
|
os: info.OSType,
|
||||||
|
arch: arch,
|
||||||
|
fixtures: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PhaseTest) RegRepoName(repoName string) string {
|
||||||
|
return p.targetRegistry.registry.RepoName(repoName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PhaseTest) Start(t *testing.T, phaseOp ...func(*testing.T, *PhaseTest)) {
|
||||||
|
p.targetDaemon.createFixtures(t)
|
||||||
|
|
||||||
|
if p.targetRegistry != nil {
|
||||||
|
p.targetRegistry.start(t)
|
||||||
|
containerDockerConfigDir := filepath.Join(p.testImageDockerContext, "container", "docker-config")
|
||||||
|
h.AssertNil(t, os.RemoveAll(containerDockerConfigDir))
|
||||||
|
h.AssertNil(t, os.MkdirAll(containerDockerConfigDir, 0755))
|
||||||
|
h.RecursiveCopy(t, p.targetRegistry.dockerConfigDir, containerDockerConfigDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, op := range phaseOp {
|
||||||
|
op(t, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
h.MakeAndCopyLifecycle(t, p.targetDaemon.os, p.targetDaemon.arch, p.containerBinaryDir)
|
||||||
|
// calculate lifecycle digest
|
||||||
|
hasher := sha256.New()
|
||||||
|
f, err := os.Open(filepath.Join(p.containerBinaryDir, "lifecycle"+exe)) //#nosec G304
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
_, err = io.Copy(hasher, f)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
t.Logf("Built lifecycle binary with digest: %s", hex.EncodeToString(hasher.Sum(nil)))
|
||||||
|
|
||||||
|
copyFakeSboms(t)
|
||||||
|
h.DockerBuild(
|
||||||
|
t,
|
||||||
|
p.testImageRef,
|
||||||
|
p.testImageDockerContext,
|
||||||
|
h.WithArgs("-f", filepath.Join(p.testImageDockerContext, dockerfileName)),
|
||||||
|
)
|
||||||
|
t.Logf("Using image %s with lifecycle version %s",
|
||||||
|
p.testImageRef,
|
||||||
|
h.DockerRun(
|
||||||
|
t,
|
||||||
|
p.testImageRef,
|
||||||
|
h.WithFlags("--env", "CNB_PLATFORM_API="+latestPlatformAPI, "--entrypoint", ctrPath("/cnb/lifecycle/lifecycle"+exe)),
|
||||||
|
h.WithArgs("-version"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PhaseTest) Stop(t *testing.T) {
|
||||||
|
p.targetDaemon.removeFixtures(t)
|
||||||
|
|
||||||
|
if p.targetRegistry != nil {
|
||||||
|
p.targetRegistry.stop(t)
|
||||||
|
// remove images that were built locally before being pushed to test registry
|
||||||
|
cleanupDaemonFixtures(t, *p.targetRegistry.fixtures)
|
||||||
|
}
|
||||||
|
|
||||||
|
h.DockerImageRemove(t, p.testImageRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *targetDaemon) createFixtures(t *testing.T) {
|
||||||
|
if d.fixtures != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var fixtures daemonImageFixtures
|
||||||
|
|
||||||
|
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), files.LayersMetadata{})
|
||||||
|
cacheMeta := minifyMetadata(t, filepath.Join("testdata", "cache_image_metadata.json"), platform.CacheMetadata{})
|
||||||
|
|
||||||
|
fixtures.AppImage = "some-app-image-" + h.RandString(10)
|
||||||
|
cmd := exec.Command(
|
||||||
|
"docker",
|
||||||
|
"build",
|
||||||
|
"-t", fixtures.AppImage,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+appMeta,
|
||||||
|
filepath.Join("testdata", "app-image"),
|
||||||
|
) // #nosec G204
|
||||||
|
h.Run(t, cmd)
|
||||||
|
|
||||||
|
fixtures.CacheImage = "some-cache-image-" + h.RandString(10)
|
||||||
|
cmd = exec.Command(
|
||||||
|
"docker",
|
||||||
|
"build",
|
||||||
|
"-t", fixtures.CacheImage,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+cacheMeta,
|
||||||
|
filepath.Join("testdata", "cache-image"),
|
||||||
|
) // #nosec G204
|
||||||
|
h.Run(t, cmd)
|
||||||
|
|
||||||
|
fixtures.RunImage = "some-run-image-" + h.RandString(10)
|
||||||
|
cmd = exec.Command(
|
||||||
|
"docker",
|
||||||
|
"build",
|
||||||
|
"-t", fixtures.RunImage,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
filepath.Join("testdata", "cache-image"),
|
||||||
|
) // #nosec G204
|
||||||
|
h.Run(t, cmd)
|
||||||
|
|
||||||
|
d.fixtures = &fixtures
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *targetDaemon) removeFixtures(t *testing.T) {
|
||||||
|
cleanupDaemonFixtures(t, *d.fixtures)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *targetRegistry) start(t *testing.T) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
r.dockerConfigDir, err = os.MkdirTemp("", "test.docker.config.dir")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
sharedRegHandler := registry.New(registry.Logger(log.New(io.Discard, "", log.Lshortfile)))
|
||||||
|
r.registry = ih.NewDockerRegistry(
|
||||||
|
ih.WithAuth(r.dockerConfigDir),
|
||||||
|
ih.WithSharedHandler(sharedRegHandler),
|
||||||
|
ih.WithImagePrivileges(),
|
||||||
|
)
|
||||||
|
r.registry.Start(t)
|
||||||
|
|
||||||
|
// if registry is listening on localhost, use host networking to allow containers to reach it
|
||||||
|
r.network = "default"
|
||||||
|
if r.registry.Host == "localhost" {
|
||||||
|
r.network = "host"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save auth config
|
||||||
|
os.Setenv("DOCKER_CONFIG", r.dockerConfigDir)
|
||||||
|
r.authConfig, err = auth.BuildEnvVar(authn.DefaultKeychain, r.registry.RepoName("some-repo")) // repo name doesn't matter
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
r.createFixtures(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *targetRegistry) createFixtures(t *testing.T) {
|
||||||
|
var fixtures regImageFixtures
|
||||||
|
|
||||||
|
appMeta := minifyMetadata(t, filepath.Join("testdata", "app_image_metadata.json"), files.LayersMetadata{})
|
||||||
|
cacheMeta := minifyMetadata(t, filepath.Join("testdata", "cache_image_metadata.json"), platform.CacheMetadata{})
|
||||||
|
|
||||||
|
// With Permissions
|
||||||
|
|
||||||
|
fixtures.InaccessibleImage = r.registry.SetInaccessible("inaccessible-image")
|
||||||
|
|
||||||
|
someReadOnlyAppName := "some-read-only-app-image-" + h.RandString(10)
|
||||||
|
fixtures.ReadOnlyAppImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
someReadOnlyAppName,
|
||||||
|
filepath.Join("testdata", "app-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+appMeta,
|
||||||
|
)
|
||||||
|
r.registry.SetReadOnly(someReadOnlyAppName)
|
||||||
|
|
||||||
|
someReadOnlyCacheImage := "some-read-only-cache-image-" + h.RandString(10)
|
||||||
|
fixtures.ReadOnlyCacheImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
someReadOnlyCacheImage,
|
||||||
|
filepath.Join("testdata", "cache-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+cacheMeta,
|
||||||
|
)
|
||||||
|
r.registry.SetReadOnly(someReadOnlyCacheImage)
|
||||||
|
|
||||||
|
someRunImageName := "some-read-only-run-image-" + h.RandString(10)
|
||||||
|
buildRegistryImage(
|
||||||
|
t,
|
||||||
|
someRunImageName,
|
||||||
|
filepath.Join("testdata", "cache-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImageFull,
|
||||||
|
)
|
||||||
|
fixtures.ReadOnlyRunImage = r.registry.SetReadOnly(someRunImageName)
|
||||||
|
|
||||||
|
readWriteAppName := "some-read-write-app-image-" + h.RandString(10)
|
||||||
|
fixtures.ReadWriteAppImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
readWriteAppName,
|
||||||
|
filepath.Join("testdata", "app-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+appMeta,
|
||||||
|
)
|
||||||
|
r.registry.SetReadWrite(readWriteAppName)
|
||||||
|
|
||||||
|
someReadWriteCacheName := "some-read-write-cache-image-" + h.RandString(10)
|
||||||
|
fixtures.ReadWriteCacheImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
someReadWriteCacheName,
|
||||||
|
filepath.Join("testdata", "cache-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+cacheMeta,
|
||||||
|
)
|
||||||
|
r.registry.SetReadWrite(someReadWriteCacheName)
|
||||||
|
|
||||||
|
readWriteOtherAppName := "some-other-read-write-app-image-" + h.RandString(10)
|
||||||
|
fixtures.ReadWriteOtherAppImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
readWriteOtherAppName,
|
||||||
|
filepath.Join("testdata", "app-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+appMeta,
|
||||||
|
)
|
||||||
|
r.registry.SetReadWrite(readWriteOtherAppName)
|
||||||
|
|
||||||
|
// Without Permissions
|
||||||
|
|
||||||
|
fixtures.SomeAppImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
"some-app-image-"+h.RandString(10),
|
||||||
|
filepath.Join("testdata", "app-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+appMeta,
|
||||||
|
)
|
||||||
|
|
||||||
|
fixtures.SomeCacheImage = buildRegistryImage(
|
||||||
|
t,
|
||||||
|
"some-cache-image-"+h.RandString(10),
|
||||||
|
filepath.Join("testdata", "cache-image"),
|
||||||
|
r.registry,
|
||||||
|
"--build-arg", "fromImage="+containerBaseImage,
|
||||||
|
"--build-arg", "metadata="+cacheMeta,
|
||||||
|
)
|
||||||
|
|
||||||
|
r.fixtures = &fixtures
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *targetRegistry) stop(t *testing.T) {
|
||||||
|
r.registry.Stop(t)
|
||||||
|
os.Unsetenv("DOCKER_CONFIG")
|
||||||
|
os.RemoveAll(r.dockerConfigDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRegistryImage(t *testing.T, repoName, context string, registry *ih.DockerRegistry, buildArgs ...string) string {
|
||||||
|
// Build image
|
||||||
|
regRepoName := registry.RepoName(repoName)
|
||||||
|
h.DockerBuild(t, regRepoName, context, h.WithArgs(buildArgs...))
|
||||||
|
|
||||||
|
// Push image
|
||||||
|
h.AssertNil(t, h.PushImage(h.DockerCli(t), regRepoName, registry.EncodedLabeledAuth()))
|
||||||
|
|
||||||
|
// Return registry repo name
|
||||||
|
return regRepoName
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupDaemonFixtures(t *testing.T, fixtures interface{}) {
|
||||||
|
v := reflect.ValueOf(fixtures)
|
||||||
|
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
imageName := fmt.Sprintf("%v", v.Field(i).Interface())
|
||||||
|
if imageName == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(imageName, "inaccessible") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.DockerImageRemove(t, imageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func minifyMetadata(t *testing.T, path string, metadataStruct interface{}) string {
|
||||||
|
metadata, err := os.ReadFile(path)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
// Unmarshal and marshal to strip unnecessary whitespace
|
||||||
|
h.AssertNil(t, json.Unmarshal(metadata, &metadataStruct))
|
||||||
|
flatMetadata, err := json.Marshal(metadataStruct)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
return string(flatMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
func withoutDaemonFixtures(phaseTest *PhaseTest) {
|
||||||
|
phaseTest.targetDaemon.fixtures = &daemonImageFixtures{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withoutRegistry(phaseTest *PhaseTest) {
|
||||||
|
phaseTest.targetRegistry = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFakeSboms(t *testing.T) {
|
||||||
|
goos := runtime.GOOS
|
||||||
|
|
||||||
|
// Check Target Daemon != runtime.GOOS
|
||||||
|
if goos == "darwin" {
|
||||||
|
goos = "linux"
|
||||||
|
}
|
||||||
|
buildLifecycleDir, err := filepath.Abs(filepath.Join("..", "out", fmt.Sprintf("%s-%s", goos, runtime.GOARCH), "lifecycle"))
|
||||||
|
if err != nil {
|
||||||
|
t.Log("Fail to locate lifecycle directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
extensions := SBOMExtensions()
|
||||||
|
components := SBOMComponents()
|
||||||
|
|
||||||
|
for _, component := range components {
|
||||||
|
for _, extension := range extensions {
|
||||||
|
if err := encoding.WriteJSON(filepath.Join(buildLifecycleDir, component+extension), "fake data"); err != nil {
|
||||||
|
t.Log("Fail to write:" + component + extension)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SBOMExtensions() []string {
|
||||||
|
return []string{".sbom.cdx.json", ".sbom.spdx.json", ".sbom.syft.json"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SBOMComponents() []string {
|
||||||
|
return []string{"lifecycle", "launcher"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertImageOSAndArch(t *testing.T, imageName string, phaseTest *PhaseTest) { //nolint - these functions are in fact used, i promise
|
||||||
|
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
|
||||||
|
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertImageOSAndArchAndCreatedAt(t *testing.T, imageName string, phaseTest *PhaseTest, expectedCreatedAt time.Time) { //nolint
|
||||||
|
inspect, err := h.DockerCli(t).ImageInspect(context.TODO(), imageName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
list, _ := h.DockerCli(t).ImageList(context.TODO(), image.ListOptions{})
|
||||||
|
fmt.Println("Error encountered running ImageInspectWithRaw. imageName: ", imageName)
|
||||||
|
fmt.Println(err)
|
||||||
|
for _, value := range list {
|
||||||
|
fmt.Println("Image Name: ", value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(err.Error(), "No such image") {
|
||||||
|
t.Log("Image not found, retrying...")
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
inspect, err = h.DockerCli(t).ImageInspect(context.TODO(), imageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, inspect.Os, phaseTest.targetDaemon.os)
|
||||||
|
h.AssertEq(t, inspect.Architecture, phaseTest.targetDaemon.arch)
|
||||||
|
h.AssertEq(t, inspect.Created, expectedCreatedAt.Format(time.RFC3339))
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertRunMetadata(t *testing.T, path string) *files.Run { //nolint
|
||||||
|
contents, err := os.ReadFile(path)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(contents) > 0, true)
|
||||||
|
|
||||||
|
runMD, err := files.Handler.ReadRun(path, cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
|
||||||
|
return &runMD
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateTOMLFixturesWithTestRegistry(t *testing.T, phaseTest *PhaseTest) { //nolint
|
||||||
|
analyzedTOMLPlaceholders := []string{
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "analyzed.toml.placeholder"),
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "run-image-extended-analyzed.toml.placeholder"),
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-analyzed.toml.placeholder"),
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-extend-false-analyzed.toml.placeholder"),
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "some-extend-true-analyzed.toml.placeholder"),
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "other_layers", "analyzed.toml.placeholder"),
|
||||||
|
}
|
||||||
|
runTOMLPlaceholders := []string{
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "cnb", "run.toml.placeholder"),
|
||||||
|
}
|
||||||
|
layoutPlaceholders := []string{
|
||||||
|
filepath.Join(phaseTest.testImageDockerContext, "container", "layers", "layout-analyzed.toml.placeholder"),
|
||||||
|
}
|
||||||
|
for _, pPath := range analyzedTOMLPlaceholders {
|
||||||
|
if _, err := os.Stat(pPath); os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, pPath)
|
||||||
|
if analyzedMD.RunImage != nil {
|
||||||
|
analyzedMD.RunImage.Reference = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage // don't override extend
|
||||||
|
if analyzedMD.RunImage.Image == "REPLACE" {
|
||||||
|
analyzedMD.RunImage.Image = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), analyzedMD))
|
||||||
|
}
|
||||||
|
for _, pPath := range runTOMLPlaceholders {
|
||||||
|
if _, err := os.Stat(pPath); os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
runMD := assertRunMetadata(t, pPath)
|
||||||
|
for idx, image := range runMD.Images {
|
||||||
|
image.Image = phaseTest.targetRegistry.fixtures.ReadOnlyRunImage
|
||||||
|
runMD.Images[idx] = image
|
||||||
|
}
|
||||||
|
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), runMD))
|
||||||
|
}
|
||||||
|
for _, pPath := range layoutPlaceholders {
|
||||||
|
if _, err := os.Stat(pPath); os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
analyzedMD := assertAnalyzedMetadata(t, pPath)
|
||||||
|
if analyzedMD.RunImage != nil {
|
||||||
|
// Values from image acceptance/testdata/exporter/container/layout-repo in OCI layout format
|
||||||
|
analyzedMD.RunImage = &files.RunImage{Reference: "/layout-repo/index.docker.io/library/busybox/latest@sha256:445c45cc89fdeb64b915b77f042e74ab580559b8d0d5ef6950be1c0265834c33"}
|
||||||
|
}
|
||||||
|
h.AssertNil(t, encoding.WriteTOML(strings.TrimSuffix(pPath, ".placeholder"), analyzedMD))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
//go:build acceptance
|
||||||
|
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
rebaserTest *PhaseTest
|
||||||
|
rebaserPath string
|
||||||
|
rebaserImage string
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRebaser(t *testing.T) {
|
||||||
|
testImageDockerContextFolder := filepath.Join("testdata", "rebaser")
|
||||||
|
rebaserTest = NewPhaseTest(t, "rebaser", testImageDockerContextFolder)
|
||||||
|
rebaserTest.Start(t, updateTOMLFixturesWithTestRegistry)
|
||||||
|
defer rebaserTest.Stop(t)
|
||||||
|
|
||||||
|
rebaserImage = rebaserTest.testImageRef
|
||||||
|
rebaserPath = rebaserTest.containerBinaryPath
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
spec.Run(t, "acceptance-rebaser/"+platformAPI.String(), testRebaser(platformAPI.String()), spec.Sequential(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRebaser(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
when("called with insecure registry flag", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||||
|
})
|
||||||
|
it("should do an http request", func() {
|
||||||
|
insecureRegistry := "host.docker.internal"
|
||||||
|
rebaserOutputImageName := insecureRegistry + "/bar"
|
||||||
|
_, _, err := h.DockerRunWithError(t,
|
||||||
|
rebaserImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
|
||||||
|
),
|
||||||
|
h.WithArgs(ctrPath(rebaserPath), rebaserOutputImageName),
|
||||||
|
)
|
||||||
|
|
||||||
|
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,338 @@
|
||||||
|
//go:build acceptance
|
||||||
|
|
||||||
|
package acceptance
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
|
"github.com/sclevine/spec"
|
||||||
|
"github.com/sclevine/spec/report"
|
||||||
|
|
||||||
|
"github.com/buildpacks/lifecycle/api"
|
||||||
|
"github.com/buildpacks/lifecycle/cmd"
|
||||||
|
"github.com/buildpacks/lifecycle/platform/files"
|
||||||
|
h "github.com/buildpacks/lifecycle/testhelpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
const emptyImageSHA = "03cbce912ef1a8a658f73c660ab9c539d67188622f00b15c4f15b89b884f0e10"
|
||||||
|
|
||||||
|
var (
|
||||||
|
restoreImage string
|
||||||
|
restoreRegAuthConfig string
|
||||||
|
restoreRegNetwork string
|
||||||
|
restorerPath string
|
||||||
|
restoreDaemonFixtures *daemonImageFixtures
|
||||||
|
restoreRegFixtures *regImageFixtures
|
||||||
|
restoreTest *PhaseTest
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRestorer(t *testing.T) {
|
||||||
|
testImageDockerContext := filepath.Join("testdata", "restorer")
|
||||||
|
restoreTest = NewPhaseTest(t, "restorer", testImageDockerContext)
|
||||||
|
restoreTest.Start(t, updateTOMLFixturesWithTestRegistry)
|
||||||
|
defer restoreTest.Stop(t)
|
||||||
|
|
||||||
|
restoreImage = restoreTest.testImageRef
|
||||||
|
restorerPath = restoreTest.containerBinaryPath
|
||||||
|
restoreRegAuthConfig = restoreTest.targetRegistry.authConfig
|
||||||
|
restoreRegNetwork = restoreTest.targetRegistry.network
|
||||||
|
restoreDaemonFixtures = restoreTest.targetDaemon.fixtures
|
||||||
|
restoreRegFixtures = restoreTest.targetRegistry.fixtures
|
||||||
|
|
||||||
|
for _, platformAPI := range api.Platform.Supported {
|
||||||
|
spec.Run(t, "acceptance-restorer/"+platformAPI.String(), testRestorerFunc(platformAPI.String()), spec.Parallel(), spec.Report(report.Terminal{}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
return func(t *testing.T, when spec.G, it spec.S) {
|
||||||
|
var copyDir, containerName string
|
||||||
|
it.Before(func() {
|
||||||
|
containerName = "test-container-" + h.RandString(10)
|
||||||
|
var err error
|
||||||
|
copyDir, err = os.MkdirTemp("", "test-docker-copy-")
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.After(func() {
|
||||||
|
if h.DockerContainerExists(t, containerName) {
|
||||||
|
h.Run(t, exec.Command("docker", "rm", containerName))
|
||||||
|
}
|
||||||
|
_ = os.RemoveAll(copyDir)
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called with arguments", func() {
|
||||||
|
it("errors", func() {
|
||||||
|
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+platformAPI, restoreImage, "some-arg")
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNotNil(t, err)
|
||||||
|
expected := "failed to parse arguments: received unexpected Args"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("called without any cache flag", func() {
|
||||||
|
it("outputs it will not restore cache layer data", func() {
|
||||||
|
command := exec.Command("docker", "run", "--rm", "--env", "CNB_PLATFORM_API="+platformAPI, restoreImage)
|
||||||
|
output, err := command.CombinedOutput()
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
expected := "No cached data will be used, no cache specified"
|
||||||
|
h.AssertStringContains(t, string(output), expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("analyzed.toml exists with app metadata", func() {
|
||||||
|
it("restores app metadata", func() {
|
||||||
|
output := h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
ctrPath("/layers"),
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(),
|
||||||
|
)
|
||||||
|
|
||||||
|
h.AssertStringContains(t, output, "Restoring metadata for \"some-buildpack-id:launch-layer\"")
|
||||||
|
})
|
||||||
|
|
||||||
|
when("restores app metadata using an insecure registry", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "")
|
||||||
|
})
|
||||||
|
it("does an http request ", func() {
|
||||||
|
insecureRegistry := "host.docker.internal"
|
||||||
|
|
||||||
|
_, _, err := h.DockerRunWithError(t,
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry,
|
||||||
|
"--env", "CNB_BUILD_IMAGE="+insecureRegistry+"/bar",
|
||||||
|
)...),
|
||||||
|
)
|
||||||
|
|
||||||
|
h.AssertStringContains(t, err.Error(), "http://host.docker.internal")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("using cache-dir", func() {
|
||||||
|
when("there is cache present from a previous build", func() {
|
||||||
|
it("restores cached layer data", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
|
||||||
|
h.WithArgs("-cache-dir", "/cache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check restored cache file is present
|
||||||
|
cachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "cached-layer", "data")
|
||||||
|
h.AssertPathExists(t, cachedFile)
|
||||||
|
|
||||||
|
// check restored cache file content is correct
|
||||||
|
contents, err := os.ReadFile(cachedFile)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, string(contents), "cached-data\n")
|
||||||
|
})
|
||||||
|
|
||||||
|
it("does not restore cache=true layers not in cache", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
|
||||||
|
h.WithArgs("-cache-dir", "/cache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check uncached layer is not restored
|
||||||
|
uncachedFile := filepath.Join(copyDir, "layers", "cacher_buildpack", "uncached-layer")
|
||||||
|
h.AssertPathDoesNotExist(t, uncachedFile)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("does not restore layer data from unused buildpacks", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
|
||||||
|
h.WithArgs("-cache-dir", "/cache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check no content is not present from unused buildpack
|
||||||
|
unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack")
|
||||||
|
h.AssertPathDoesNotExist(t, unusedBpLayer)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("does not restore corrupted layer data", func() {
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/layers",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI),
|
||||||
|
h.WithArgs("-cache-dir", "/cache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check corrupted layer is not restored
|
||||||
|
corruptedFile := filepath.Join(copyDir, "layers", "corrupted_buildpack", "corrupted-layer")
|
||||||
|
h.AssertPathDoesNotExist(t, corruptedFile)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("restoring builder image metadata for extensions", func() {
|
||||||
|
it("accepts -build-image and saves the metadata to /kaniko/cache", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "Platform API < 0.10 does not restore builder image metadata")
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "DOCKER_CONFIG=/docker-config",
|
||||||
|
"--network", restoreRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs("-build-image", restoreRegFixtures.SomeCacheImage), // some-cache-image simulates a builder image in a registry
|
||||||
|
)
|
||||||
|
t.Log("records builder image digest in analyzed.toml")
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "analyzed.toml"), cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertStringContains(t, analyzedMD.BuildImage.Reference, restoreRegFixtures.SomeCacheImage+"@sha256:")
|
||||||
|
t.Log("writes builder manifest and config to the kaniko cache")
|
||||||
|
ref, err := name.ParseReference(analyzedMD.BuildImage.Reference)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko", "cache", "base"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(fis), 1)
|
||||||
|
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", ref.Identifier(), "oci-layout"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("restoring run image metadata for extensions", func() {
|
||||||
|
it("saves metadata to /kaniko/cache", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not restore run image metadata")
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "DOCKER_CONFIG=/docker-config",
|
||||||
|
"--network", restoreRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(
|
||||||
|
"-analyzed", "/layers/some-extend-true-analyzed.toml",
|
||||||
|
"-log-level", "debug",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
t.Log("updates run image reference in analyzed.toml to include digest and target data")
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-true-analyzed.toml"), cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
|
||||||
|
t.Log("does not return the digest for an empty image")
|
||||||
|
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
|
||||||
|
t.Log("writes run image manifest and config to the kaniko cache")
|
||||||
|
ref, err := name.ParseReference(analyzedMD.RunImage.Reference)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko", "cache", "base"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(fis), 1)
|
||||||
|
h.AssertPathExists(t, filepath.Join(copyDir, "kaniko", "cache", "base", ref.Identifier(), "oci-layout"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("target data", func() {
|
||||||
|
it("updates run image reference in analyzed.toml to include digest and target data on newer platforms", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.10"), "")
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags(
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "DOCKER_CONFIG=/docker-config",
|
||||||
|
"--network", restoreRegNetwork,
|
||||||
|
),
|
||||||
|
h.WithArgs(
|
||||||
|
"-analyzed", "/layers/some-extend-false-analyzed.toml",
|
||||||
|
"-log-level", "debug",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if api.MustParse(platformAPI).AtLeast("0.12") {
|
||||||
|
t.Log("updates run image reference in analyzed.toml to include digest and target data")
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
|
||||||
|
t.Log("does not return the digest for an empty image")
|
||||||
|
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
|
||||||
|
t.Log("does not write run image manifest and config to the kaniko cache")
|
||||||
|
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(fis), 1) // .gitkeep
|
||||||
|
} else {
|
||||||
|
t.Log("updates run image reference in analyzed.toml to include digest only")
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertStringContains(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:")
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||||
|
h.AssertNil(t, analyzedMD.RunImage.TargetMetadata)
|
||||||
|
t.Log("does not return the digest for an empty image")
|
||||||
|
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, restoreRegFixtures.ReadOnlyRunImage+"@sha256:"+emptyImageSHA)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
when("-daemon", func() {
|
||||||
|
it("updates run image reference in analyzed.toml to include digest and target data on newer platforms", func() {
|
||||||
|
h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not support -daemon flag")
|
||||||
|
h.DockerRunAndCopy(t,
|
||||||
|
containerName,
|
||||||
|
copyDir,
|
||||||
|
"/",
|
||||||
|
restoreImage,
|
||||||
|
h.WithFlags(append(
|
||||||
|
dockerSocketMount,
|
||||||
|
"--env", "CNB_PLATFORM_API="+platformAPI,
|
||||||
|
"--env", "DOCKER_CONFIG=/docker-config",
|
||||||
|
"--network", restoreRegNetwork,
|
||||||
|
)...),
|
||||||
|
h.WithArgs(
|
||||||
|
"-analyzed", "/layers/some-extend-false-analyzed.toml",
|
||||||
|
"-daemon",
|
||||||
|
"-log-level", "debug",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
t.Log("updates run image reference in analyzed.toml to include digest and target data")
|
||||||
|
analyzedMD, err := files.Handler.ReadAnalyzed(filepath.Join(copyDir, "layers", "some-extend-false-analyzed.toml"), cmd.DefaultLogger)
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertStringDoesNotContain(t, analyzedMD.RunImage.Reference, "@sha256:") // daemon image ID
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.Image, restoreRegFixtures.ReadOnlyRunImage)
|
||||||
|
h.AssertEq(t, analyzedMD.RunImage.TargetMetadata.OS, "linux")
|
||||||
|
t.Log("does not write run image manifest and config to the kaniko cache")
|
||||||
|
fis, err := os.ReadDir(filepath.Join(copyDir, "kaniko"))
|
||||||
|
h.AssertNil(t, err)
|
||||||
|
h.AssertEq(t, len(fis), 1) // .gitkeep
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
FROM ubuntu:bionic
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y ca-certificates
|
||||||
|
|
||||||
|
COPY container /
|
||||||
|
|
||||||
|
WORKDIR /layers
|
||||||
|
|
||||||
|
ENV CNB_USER_ID=2222
|
||||||
|
|
||||||
|
ENV CNB_GROUP_ID=3333
|
||||||
|
|
||||||
|
ARG cnb_platform_api
|
||||||
|
ENV CNB_PLATFORM_API=${cnb_platform_api}
|
||||||
|
|
||||||
|
RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /some-dir
|
||||||
|
|
||||||
|
RUN chown -R $CNB_USER_ID:$CNB_GROUP_ID /layers
|
||||||
|
|
||||||
|
# ensure docker config directory is root owned and NOT world readable
|
||||||
|
RUN chown -R root /docker-config; chmod -R 700 /docker-config
|
|
@ -0,0 +1,5 @@
|
||||||
|
[[order]]
|
||||||
|
|
||||||
|
[[order.group]]
|
||||||
|
id = "simple_buildpack"
|
||||||
|
version = "simple_buildpack_version"
|
|
@ -0,0 +1,2 @@
|
||||||
|
[run-image]
|
||||||
|
image = "some-run-image"
|
|
@ -0,0 +1,5 @@
|
||||||
|
[[images]]
|
||||||
|
image = "some-run-image-from-run-toml"
|
||||||
|
|
||||||
|
[[images]]
|
||||||
|
image = "some-other-run-image"
|
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
id = "some-buildpack-id"
|
||||||
|
version = "some-buildpack-version"
|
||||||
|
api = "0.10"
|
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
id = "some-other-buildpack-id"
|
||||||
|
version = "some-other-buildpack-version"
|
||||||
|
api = "0.10"
|
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
id = "some-other-buildpack-id"
|
||||||
|
version = "some-other-buildpack-version"
|
||||||
|
api = "0.1"
|
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}
|
|
@ -0,0 +1,16 @@
|
||||||
|
{
|
||||||
|
"schemaVersion": 2,
|
||||||
|
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||||
|
"config": {
|
||||||
|
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||||
|
"size": 1457,
|
||||||
|
"digest": "sha256:9d5226e6ce3fb6aee2822206a5ef85f38c303d2b37bfc894b419fca2c0501269"
|
||||||
|
},
|
||||||
|
"layers": [
|
||||||
|
{
|
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||||
|
"size": 772999,
|
||||||
|
"digest": "sha256:405fecb6a2fa4f29683f977e7e3b852bf6f8975a2aba647d234d2371894943da"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
14
acceptance/testdata/analyzer/container/layout-repo/index.docker.io/library/busybox/latest/index.json
vendored
Executable file
14
acceptance/testdata/analyzer/container/layout-repo/index.docker.io/library/busybox/latest/index.json
vendored
Executable file
|
@ -0,0 +1,14 @@
|
||||||
|
{
|
||||||
|
"schemaVersion": 2,
|
||||||
|
"manifests": [
|
||||||
|
{
|
||||||
|
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||||
|
"size": 527,
|
||||||
|
"digest": "sha256:f75f3d1a317fc82c793d567de94fc8df2bece37acd5f2bd364a0d91a0d1f3dab",
|
||||||
|
"platform": {
|
||||||
|
"architecture": "amd64",
|
||||||
|
"os": "linux"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
3
acceptance/testdata/analyzer/container/layout-repo/index.docker.io/library/busybox/latest/oci-layout
vendored
Executable file
3
acceptance/testdata/analyzer/container/layout-repo/index.docker.io/library/busybox/latest/oci-layout
vendored
Executable file
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"imageLayoutVersion": "1.0.0"
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
id = "another-buildpack-id"
|
||||||
|
version = "another-buildpack-version"
|
||||||
|
api = "0.10"
|
|
@ -0,0 +1,7 @@
|
||||||
|
ARG fromImage
|
||||||
|
|
||||||
|
FROM $fromImage
|
||||||
|
|
||||||
|
ARG metadata
|
||||||
|
|
||||||
|
LABEL io.buildpacks.lifecycle.metadata=$metadata
|
|
@ -0,0 +1,54 @@
|
||||||
|
{
|
||||||
|
"buildpacks": [
|
||||||
|
{
|
||||||
|
"key": "some-buildpack-id",
|
||||||
|
"layers": {
|
||||||
|
"launch-layer": {
|
||||||
|
"data": {
|
||||||
|
"launch-key": "launch-value"
|
||||||
|
},
|
||||||
|
"launch": true,
|
||||||
|
"sha": "launch-sha"
|
||||||
|
},
|
||||||
|
"launch-build-layer": {
|
||||||
|
"build": true,
|
||||||
|
"data": {
|
||||||
|
"launch-build-key": "launch-build-value"
|
||||||
|
},
|
||||||
|
"launch": true,
|
||||||
|
"sha": "launch-build-sha"
|
||||||
|
},
|
||||||
|
"launch-build-cache-layer": {
|
||||||
|
"build": true,
|
||||||
|
"cache": true,
|
||||||
|
"data": {
|
||||||
|
"launch-build-cache-key": "launch-build-cache-value"
|
||||||
|
},
|
||||||
|
"launch": true,
|
||||||
|
"sha": "launch-build-cache-sha"
|
||||||
|
},
|
||||||
|
"launch-cache-layer": {
|
||||||
|
"cache": true,
|
||||||
|
"data": {
|
||||||
|
"launch-cache-key": "launch-cache-value"
|
||||||
|
},
|
||||||
|
"launch": true,
|
||||||
|
"sha": "launch-cache-sha"
|
||||||
|
},
|
||||||
|
"launch-false-layer": {
|
||||||
|
"data": {
|
||||||
|
"launch-false-key": "launch-false-value"
|
||||||
|
},
|
||||||
|
"sha": "launch-false-sha"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"store": {
|
||||||
|
"metadata": {
|
||||||
|
"metadata-buildpack-store-data": {
|
||||||
|
"store-key": "store-val"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
FROM ubuntu:bionic
|
||||||
|
|
||||||
|
ARG cnb_uid=1234
|
||||||
|
ARG cnb_gid=1000
|
||||||
|
|
||||||
|
COPY ./container/ /
|
||||||
|
|
||||||
|
ENTRYPOINT ["/cnb/lifecycle/builder"]
|
||||||
|
|
||||||
|
RUN groupadd cnb --gid ${cnb_gid} && \
|
||||||
|
useradd --uid ${cnb_uid} --gid ${cnb_gid} -m -s /bin/bash cnb
|
||||||
|
|
||||||
|
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layers"
|
||||||
|
|
||||||
|
WORKDIR /layers
|
||||||
|
|
||||||
|
USER ${cnb_uid}:${cnb_gid}
|
35
acceptance/testdata/builder/container/cnb/buildpacks/hello_world/0.0.1/bin/build
vendored
Executable file
35
acceptance/testdata/builder/container/cnb/buildpacks/hello_world/0.0.1/bin/build
vendored
Executable file
|
@ -0,0 +1,35 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
echo "---> Hello World buildpack"
|
||||||
|
|
||||||
|
# INPUT ARGUMENTS
|
||||||
|
platform_dir=$2
|
||||||
|
env_dir=${platform_dir}/env
|
||||||
|
layers_dir=$1
|
||||||
|
plan_path=$3
|
||||||
|
|
||||||
|
# CNB_APP_DIR
|
||||||
|
echo "CNB_APP_DIR: ${PWD}"
|
||||||
|
|
||||||
|
# PLATFORM DIR
|
||||||
|
echo "PLATFORM_DIR: ${platform_dir}"
|
||||||
|
|
||||||
|
# LAYERS
|
||||||
|
echo "LAYERS_DIR: ${layers_dir}"
|
||||||
|
|
||||||
|
# PLAN
|
||||||
|
echo "PLAN_PATH: ${plan_path}"
|
||||||
|
echo "plan contents:"
|
||||||
|
cat ${plan_path}
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Set default start command
|
||||||
|
cat > "${layers_dir}/launch.toml" << EOL
|
||||||
|
[[processes]]
|
||||||
|
type = "hello"
|
||||||
|
command = ["echo world"]
|
||||||
|
args = ["arg1"]
|
||||||
|
EOL
|
||||||
|
|
||||||
|
echo "---> Done"
|
7
acceptance/testdata/builder/container/cnb/buildpacks/hello_world/0.0.1/buildpack.toml
vendored
Normal file
7
acceptance/testdata/builder/container/cnb/buildpacks/hello_world/0.0.1/buildpack.toml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Buildpack API version
|
||||||
|
api = "0.10"
|
||||||
|
|
||||||
|
# Buildpack ID and metadata
|
||||||
|
[buildpack]
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
27
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_2/0.0.2/bin/build
vendored
Executable file
27
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_2/0.0.2/bin/build
vendored
Executable file
|
@ -0,0 +1,27 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
echo "---> Hello World 2 buildpack"
|
||||||
|
|
||||||
|
# INPUT ARGUMENTS
|
||||||
|
platform_dir=$2
|
||||||
|
env_dir=${platform_dir}/env
|
||||||
|
layers_dir=$1
|
||||||
|
plan_path=$3
|
||||||
|
|
||||||
|
# CNB_APP_DIR
|
||||||
|
echo "CNB_APP_DIR: ${PWD}"
|
||||||
|
|
||||||
|
# PLATFORM DIR
|
||||||
|
echo "PLATFORM_DIR: ${platform_dir}"
|
||||||
|
|
||||||
|
# LAYERS
|
||||||
|
echo "LAYERS_DIR: ${layers_dir}"
|
||||||
|
|
||||||
|
# PLAN
|
||||||
|
echo "PLAN_PATH: ${plan_path}"
|
||||||
|
echo "plan contents:"
|
||||||
|
cat ${plan_path}
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo "---> Done"
|
7
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_2/0.0.2/buildpack.toml
vendored
Normal file
7
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_2/0.0.2/buildpack.toml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Buildpack API version
|
||||||
|
api = "0.10"
|
||||||
|
|
||||||
|
# Buildpack ID and metadata
|
||||||
|
[buildpack]
|
||||||
|
id = "hello_world_2"
|
||||||
|
version = "0.0.2"
|
33
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_3/0.0.3/bin/build
vendored
Executable file
33
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_3/0.0.3/bin/build
vendored
Executable file
|
@ -0,0 +1,33 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
echo "---> Hello World 3 buildpack"
|
||||||
|
|
||||||
|
# INPUT ARGUMENTS
|
||||||
|
platform_dir=$2
|
||||||
|
env_dir=${platform_dir}/env
|
||||||
|
layers_dir=$1
|
||||||
|
plan_path=$3
|
||||||
|
|
||||||
|
# CNB_APP_DIR
|
||||||
|
echo "CNB_APP_DIR: ${PWD}"
|
||||||
|
|
||||||
|
# PLATFORM DIR
|
||||||
|
echo "PLATFORM_DIR: ${platform_dir}"
|
||||||
|
|
||||||
|
# LAYERS
|
||||||
|
echo "LAYERS_DIR: ${layers_dir}"
|
||||||
|
|
||||||
|
# PLAN
|
||||||
|
echo "PLAN_PATH: ${plan_path}"
|
||||||
|
echo "plan contents:"
|
||||||
|
cat ${plan_path}
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo "CNB_TARGET_ARCH:" `printenv CNB_TARGET_ARCH`
|
||||||
|
echo "CNB_TARGET_ARCH_VARIANT:" `printenv CNB_TARGET_ARCH_VARIANT`
|
||||||
|
echo "CNB_TARGET_OS:" `printenv CNB_TARGET_OS`
|
||||||
|
echo "CNB_TARGET_DISTRO_NAME:" `printenv CNB_TARGET_DISTRO_NAME`
|
||||||
|
echo "CNB_TARGET_DISTRO_VERSION:" `printenv CNB_TARGET_DISTRO_VERSION`
|
||||||
|
|
||||||
|
echo "---> Done"
|
7
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_3/0.0.3/buildpack.toml
vendored
Normal file
7
acceptance/testdata/builder/container/cnb/buildpacks/hello_world_3/0.0.3/buildpack.toml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Buildpack API version
|
||||||
|
api = "0.10"
|
||||||
|
|
||||||
|
# Buildpack ID and metadata
|
||||||
|
[buildpack]
|
||||||
|
id = "hello_world_3"
|
||||||
|
version = "0.0.3"
|
4
acceptance/testdata/builder/container/cnb/group_tomls/always_detect_group.toml
vendored
Normal file
4
acceptance/testdata/builder/container/cnb/group_tomls/always_detect_group.toml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
api = "0.10"
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
4
acceptance/testdata/builder/container/cnb/group_tomls/always_detect_group_buildpack2.toml
vendored
Normal file
4
acceptance/testdata/builder/container/cnb/group_tomls/always_detect_group_buildpack2.toml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
api = "0.10"
|
||||||
|
id = "hello_world_2"
|
||||||
|
version = "0.0.2"
|
|
@ -0,0 +1,9 @@
|
||||||
|
[[group]]
|
||||||
|
api = "0.10"
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
||||||
|
[[group-extensions]]
|
||||||
|
api = "0.10"
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
5
acceptance/testdata/builder/container/cnb/group_tomls/invalid_buildpack_api_group.toml
vendored
Normal file
5
acceptance/testdata/builder/container/cnb/group_tomls/invalid_buildpack_api_group.toml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
[[group]]
|
||||||
|
api = "0.2wrongapiblabla"
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
I am a group.toml file which not correct as context and syntax.
|
||||||
|
|
||||||
|
For more info please look at https://github.com/buildpacks/spec/blob/main/platform.md#grouptoml-toml
|
10
acceptance/testdata/builder/container/cnb/plan_tomls/always_detect_plan.toml
vendored
Normal file
10
acceptance/testdata/builder/container/cnb/plan_tomls/always_detect_plan.toml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
[[entries]]
|
||||||
|
|
||||||
|
[[entries.providers]]
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
||||||
|
[[entries.requires]]
|
||||||
|
name = "different_plan_from_env.toml_reqires_subset_content"
|
||||||
|
[entries.requires.metadata]
|
||||||
|
# arbitrary data describing the required dependency
|
10
acceptance/testdata/builder/container/cnb/plan_tomls/always_detect_plan_buildpack_2.toml
vendored
Normal file
10
acceptance/testdata/builder/container/cnb/plan_tomls/always_detect_plan_buildpack_2.toml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
[[entries]]
|
||||||
|
|
||||||
|
[[entries.providers]]
|
||||||
|
id = "hello_world_2"
|
||||||
|
version = "0.0.2"
|
||||||
|
|
||||||
|
[[entries.requires]]
|
||||||
|
name = "different_plan_from_env.toml_reqires_subset_content"
|
||||||
|
[entries.requires.metadata]
|
||||||
|
# arbitrary data describing the required dependency
|
10
acceptance/testdata/builder/container/cnb/plan_tomls/always_detect_plan_buildpack_3.toml
vendored
Normal file
10
acceptance/testdata/builder/container/cnb/plan_tomls/always_detect_plan_buildpack_3.toml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
[[entries]]
|
||||||
|
|
||||||
|
[[entries.providers]]
|
||||||
|
id = "hello_world_3"
|
||||||
|
version = "0.0.3"
|
||||||
|
|
||||||
|
[[entries.requires]]
|
||||||
|
name = "03_plan.toml_requires_subset_content_idk"
|
||||||
|
[entries.requires.metadata]
|
||||||
|
# arbitrary data describing the required dependency
|
11
acceptance/testdata/builder/container/cnb/plan_tomls/different_plan_from_env.toml
vendored
Normal file
11
acceptance/testdata/builder/container/cnb/plan_tomls/different_plan_from_env.toml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
[[entries]]
|
||||||
|
|
||||||
|
[[entries.providers]]
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
||||||
|
[[entries.requires]]
|
||||||
|
name = "different_plan_from_env.toml_reqires_subset_content"
|
||||||
|
[entries.requires.metadata]
|
||||||
|
# arbitrary data describing the required dependency
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
I am a plan.toml file which not correct as context and syntax.
|
||||||
|
|
||||||
|
For more info please look at https://github.com/buildpacks/spec/blob/main/platform.md#plantoml-toml
|
13
acceptance/testdata/builder/container/env_folders/different_buildpack_dir_from_env/hello_world/0.0.1/bin/build
vendored
Executable file
13
acceptance/testdata/builder/container/env_folders/different_buildpack_dir_from_env/hello_world/0.0.1/bin/build
vendored
Executable file
|
@ -0,0 +1,13 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
echo "---> Hello World buildpack"
|
||||||
|
|
||||||
|
# INPUT ARGUMENTS
|
||||||
|
platform_dir=$2
|
||||||
|
env_dir=${platform_dir}/env
|
||||||
|
layers_dir=$1
|
||||||
|
plan_path=$3
|
||||||
|
|
||||||
|
# acceptance test
|
||||||
|
echo "CNB_BUILDPACK_DIR: ${CNB_BUILDPACK_DIR}"
|
|
@ -0,0 +1,7 @@
|
||||||
|
# Buildpack API version
|
||||||
|
api = "0.10"
|
||||||
|
|
||||||
|
# Buildpack ID and metadata
|
||||||
|
[buildpack]
|
||||||
|
id = "hello_world"
|
||||||
|
version = "0.0.1"
|
0
acceptance/testdata/builder/container/env_folders/different_cnb_app_dir_from_env/.gitkeep
vendored
Normal file
0
acceptance/testdata/builder/container/env_folders/different_cnb_app_dir_from_env/.gitkeep
vendored
Normal file
0
acceptance/testdata/builder/container/env_folders/different_platform_dir_from_env/.gitkeep
vendored
Normal file
0
acceptance/testdata/builder/container/env_folders/different_platform_dir_from_env/.gitkeep
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
[run-image.target]
|
||||||
|
id = "my id"
|
||||||
|
os = "linux"
|
||||||
|
arch = "amd64"
|
||||||
|
arch-variant = "some-variant"
|
||||||
|
[run-image.target.distro]
|
||||||
|
name = "ubuntu"
|
||||||
|
version = "some-cute-version"
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
api = "0.10"
|
||||||
|
id = "hello_world_3"
|
||||||
|
version = "0.0.3"
|
|
@ -0,0 +1,6 @@
|
||||||
|
[[entries]]
|
||||||
|
|
||||||
|
[[entries.providers]]
|
||||||
|
id = "hello_world_3"
|
||||||
|
version = "0.0.3"
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
[run-image]
|
||||||
|
[target]
|
||||||
|
id = "software"
|
||||||
|
os = "linux"
|
||||||
|
arch = "amd64"
|
||||||
|
|
4
acceptance/testdata/builder/container/layers/different_layer_dir_from_env/group.toml
vendored
Normal file
4
acceptance/testdata/builder/container/layers/different_layer_dir_from_env/group.toml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[[group]]
|
||||||
|
api = "0.10"
|
||||||
|
id = "hello_world_2"
|
||||||
|
version = "0.0.2"
|
6
acceptance/testdata/builder/container/layers/different_layer_dir_from_env/plan.toml
vendored
Normal file
6
acceptance/testdata/builder/container/layers/different_layer_dir_from_env/plan.toml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
[[entries]]
|
||||||
|
|
||||||
|
[[entries.providers]]
|
||||||
|
id = "hello_world_2"
|
||||||
|
version = "0.0.2"
|
||||||
|
|
40
acceptance/testdata/cache-dir/committed/io.buildpacks.lifecycle.cache.metadata
vendored
Normal file
40
acceptance/testdata/cache-dir/committed/io.buildpacks.lifecycle.cache.metadata
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
{
|
||||||
|
"buildpacks": [
|
||||||
|
{
|
||||||
|
"key": "some-buildpack-id",
|
||||||
|
"layers": {
|
||||||
|
"some-layer": {
|
||||||
|
"cache": true,
|
||||||
|
"data": {
|
||||||
|
"some-layer-key": "some-layer-value"
|
||||||
|
},
|
||||||
|
"sha": "some-layer-sha"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "some-other-buildpack-id",
|
||||||
|
"layers": {
|
||||||
|
"some-layer": {
|
||||||
|
"cache": true,
|
||||||
|
"data": {
|
||||||
|
"some-layer-key": "some-layer-value"
|
||||||
|
},
|
||||||
|
"sha": "some-layer-sha"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "another-buildpack-id",
|
||||||
|
"layers": {
|
||||||
|
"some-layer": {
|
||||||
|
"cache": true,
|
||||||
|
"data": {
|
||||||
|
"some-layer-key": "some-layer-value"
|
||||||
|
},
|
||||||
|
"sha": "some-layer-sha"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
ARG fromImage
|
||||||
|
|
||||||
|
FROM $fromImage
|
||||||
|
|
||||||
|
ARG metadata
|
||||||
|
|
||||||
|
LABEL io.buildpacks.lifecycle.cache.metadata=$metadata
|
|
@ -0,0 +1,16 @@
|
||||||
|
{
|
||||||
|
"buildpacks": [
|
||||||
|
{
|
||||||
|
"key": "some-buildpack-id",
|
||||||
|
"layers": {
|
||||||
|
"some-layer": {
|
||||||
|
"cache": true,
|
||||||
|
"data": {
|
||||||
|
"some-layer-key": "some-layer-value"
|
||||||
|
},
|
||||||
|
"sha": "some-layer-sha"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
FROM ubuntu:bionic
|
||||||
|
|
||||||
|
ARG cnb_uid=1234
|
||||||
|
ARG cnb_gid=1000
|
||||||
|
|
||||||
|
ENV CNB_USER_ID=${cnb_uid}
|
||||||
|
ENV CNB_GROUP_ID=${cnb_gid}
|
||||||
|
|
||||||
|
COPY ./container/ /
|
||||||
|
|
||||||
|
RUN groupadd cnb --gid ${cnb_gid} && \
|
||||||
|
useradd --uid ${cnb_uid} --gid ${cnb_gid} -m -s /bin/bash cnb
|
||||||
|
|
||||||
|
# chown the directories so the tests do not have to run as root
|
||||||
|
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layers"
|
||||||
|
RUN chown -R "${cnb_uid}:${cnb_gid}" "/layout-repo"
|
||||||
|
|
||||||
|
WORKDIR /layers
|
||||||
|
|
||||||
|
USER ${cnb_uid}:${cnb_gid}
|
106
acceptance/testdata/creator/container/cnb/buildpacks/samples_hello-world/0.0.1/bin/build
vendored
Executable file
106
acceptance/testdata/creator/container/cnb/buildpacks/samples_hello-world/0.0.1/bin/build
vendored
Executable file
|
@ -0,0 +1,106 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
echo "---> Hello World buildpack"
|
||||||
|
|
||||||
|
# INPUT ARGUMENTS
|
||||||
|
platform_dir=$2
|
||||||
|
env_dir=${platform_dir}/env
|
||||||
|
layers_dir=$1
|
||||||
|
plan_path=$3
|
||||||
|
|
||||||
|
if test -d /layers/sbom; then
|
||||||
|
echo "/layers/sbom should not exist during buildpack builds"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# LAYERS
|
||||||
|
echo " layers_dir: ${layers_dir}"
|
||||||
|
|
||||||
|
# launch=true layer
|
||||||
|
mkdir -p ${layers_dir}/some-layer/env
|
||||||
|
|
||||||
|
echo -n "some-val" > ${layers_dir}/some-layer/env/SOME_VAR
|
||||||
|
|
||||||
|
if test -f ${layers_dir}/some-layer.sbom.cdx.json; then
|
||||||
|
echo "${layers_dir}/some-layer.sbom.cdx.json restored with content: $(cat ${layers_dir}/some-layer.sbom.cdx.json)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -n "{\"key\": \"some-launch-true-bom-content\"}" > ${layers_dir}/some-layer.sbom.cdx.json
|
||||||
|
|
||||||
|
if test -f ${layers_dir}/some-layer.toml; then
|
||||||
|
# mimic not downloading new content
|
||||||
|
echo "nop"
|
||||||
|
else
|
||||||
|
# mimic downloading new content
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat <<EOF > ${layers_dir}/some-layer.toml
|
||||||
|
[types]
|
||||||
|
launch = true
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# cache=true layer
|
||||||
|
mkdir -p ${layers_dir}/some-cache-layer
|
||||||
|
|
||||||
|
if test -f ${layers_dir}/some-cache-layer.sbom.cdx.json; then
|
||||||
|
echo "${layers_dir}/some-cache-layer.sbom.cdx.json restored with content: $(cat ${layers_dir}/some-cache-layer.sbom.cdx.json)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -n "{\"key\": \"some-cache-true-bom-content\"}" > ${layers_dir}/some-cache-layer.sbom.cdx.json
|
||||||
|
|
||||||
|
cat <<EOF > ${layers_dir}/some-cache-layer.toml
|
||||||
|
[types]
|
||||||
|
cache = true
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# launch=true cache=true layer
|
||||||
|
mkdir -p ${layers_dir}/some-launch-cache-layer
|
||||||
|
|
||||||
|
if test -f ${layers_dir}/some-launch-cache-layer.sbom.cdx.json; then
|
||||||
|
echo "${layers_dir}/some-launch-cache-layer.sbom.cdx.json restored with content: $(cat ${layers_dir}/some-launch-cache-layer.sbom.cdx.json)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -n "{\"key\": \"some-launch-true-cache-true-bom-content\"}" > ${layers_dir}/some-launch-cache-layer.sbom.cdx.json
|
||||||
|
|
||||||
|
cat <<EOF > ${layers_dir}/some-launch-cache-layer.toml
|
||||||
|
[types]
|
||||||
|
launch = true
|
||||||
|
cache = true
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# build=true layer
|
||||||
|
mkdir -p ${layers_dir}/some-build-layer
|
||||||
|
|
||||||
|
if test -f ${layers_dir}/some-build-layer.sbom.cdx.json; then
|
||||||
|
echo "${layers_dir}/some-build-layer.sbom.cdx.json" should never be restored
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/some-build-layer.sbom.cdx.json
|
||||||
|
|
||||||
|
cat <<EOF > ${layers_dir}/some-build-layer.toml
|
||||||
|
[types]
|
||||||
|
build = true
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# launch bom
|
||||||
|
if test -f ${layers_dir}/launch.sbom.cdx.json; then
|
||||||
|
echo "${layers_dir}/launch.sbom.cdx.json should never be restored"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/launch.sbom.cdx.json
|
||||||
|
|
||||||
|
# build bom
|
||||||
|
if test -f ${layers_dir}/build.sbom.cdx.json; then
|
||||||
|
echo "${layers_dir}/build.sbom.cdx.json should never be restored"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo -n "{\"key\": \"some-bom-content\"}" > ${layers_dir}/build.sbom.cdx.json
|
||||||
|
|
||||||
|
# store.toml
|
||||||
|
if test -f ${layers_dir}/store.toml; then
|
||||||
|
echo "${layers_dir}/store.toml restored with content: $(cat ${layers_dir}/store.toml)"
|
||||||
|
fi
|
||||||
|
printf "[metadata]\n\"some-key\" = \"some-value\"" > ${layers_dir}/store.toml
|
7
acceptance/testdata/creator/container/cnb/buildpacks/samples_hello-world/0.0.1/bin/detect
vendored
Executable file
7
acceptance/testdata/creator/container/cnb/buildpacks/samples_hello-world/0.0.1/bin/detect
vendored
Executable file
|
@ -0,0 +1,7 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
# 1. GET ARGS
|
||||||
|
plan_path=$2
|
||||||
|
|
||||||
|
exit 0
|
13
acceptance/testdata/creator/container/cnb/buildpacks/samples_hello-world/0.0.1/buildpack.toml
vendored
Normal file
13
acceptance/testdata/creator/container/cnb/buildpacks/samples_hello-world/0.0.1/buildpack.toml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Buildpack API version
|
||||||
|
api = "0.7"
|
||||||
|
|
||||||
|
# Buildpack ID and metadata
|
||||||
|
[buildpack]
|
||||||
|
id = "samples/hello-world"
|
||||||
|
version = "0.0.1"
|
||||||
|
name = "Hello World Buildpack"
|
||||||
|
sbom-formats = ["application/vnd.cyclonedx+json"]
|
||||||
|
|
||||||
|
# Stacks that the buildpack will work with
|
||||||
|
[[stacks]]
|
||||||
|
id = "*"
|
|
@ -0,0 +1,8 @@
|
||||||
|
# Buildpack API version
|
||||||
|
api = "0.9"
|
||||||
|
|
||||||
|
# Extension ID and metadata
|
||||||
|
[extension]
|
||||||
|
id = "samples/hello-world"
|
||||||
|
version = "0.0.1"
|
||||||
|
name = "Hello World Extension"
|
|
@ -0,0 +1,9 @@
|
||||||
|
[[order]]
|
||||||
|
[[order.group]]
|
||||||
|
id = "samples/hello-world"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
||||||
|
[[order-extensions]]
|
||||||
|
[[order-extensions.group]]
|
||||||
|
id = "samples/hello-world"
|
||||||
|
version = "0.0.1"
|
|
@ -0,0 +1,4 @@
|
||||||
|
[[order]]
|
||||||
|
[[order.group]]
|
||||||
|
id = "samples/hello-world"
|
||||||
|
version = "0.0.1"
|
|
@ -0,0 +1,5 @@
|
||||||
|
[[images]]
|
||||||
|
image = "some-run-image-from-run-toml"
|
||||||
|
|
||||||
|
[[images]]
|
||||||
|
image = "some-other-run-image"
|
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
{"architecture":"amd64","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"65aaed1d1f89cd3cd5aac9137c4786831e99a845ad823496c6008a22a725c780","container_config":{"Hostname":"65aaed1d1f89","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:688db7a53b2e8d0358c0e1f309856290bb25ce7acabbf9938f580582e921833f","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2022-11-18T01:19:29.442257773Z","docker_version":"20.10.12","history":[{"created":"2022-11-18T01:19:29.321465538Z","created_by":"/bin/sh -c #(nop) ADD file:36d9f497f679d56737ac1379d93f7b6a2e4c814e38e868a5a8e719c4b226ef6e in / "},{"created":"2022-11-18T01:19:29.442257773Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:40cf597a9181e86497f4121c604f9f0ab208950a98ca21db883f26b0a548a2eb"]}}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue