Compare commits
1048 Commits
Author | SHA1 | Date |
---|---|---|
|
b03bc242a7 | |
|
1c0fb347c3 | |
|
e729f60d50 | |
|
8b5354ee8c | |
|
b3eab30b05 | |
|
9c525fd526 | |
|
db61e10b3f | |
|
b8c6fcf211 | |
|
8403fd604b | |
|
3502889676 | |
|
252cc24fd5 | |
|
869997e379 | |
|
aa807d5863 | |
|
96588db2e6 | |
|
1c0e79c974 | |
|
a2108716b5 | |
|
d9443fe42f | |
|
cc938ce53f | |
|
d6e5cbaf74 | |
|
c694e3c7a2 | |
|
eea4838d88 | |
|
1b5cdd50fa | |
|
d23f64120a | |
|
063ee76258 | |
|
af210ea877 | |
|
f67acf9a69 | |
|
68c0bbacdf | |
|
1b4cb34e0f | |
|
16c0bdad5f | |
|
564f525213 | |
|
7a482acebc | |
|
c0fdc9a056 | |
|
513775f3f3 | |
|
34488b5497 | |
|
5408a8b3e9 | |
|
9f809a88ff | |
|
d95b2dad17 | |
|
4ceee32d77 | |
|
b47791a931 | |
|
7a243f955e | |
|
46e8141739 | |
|
f28c074787 | |
|
e6d26dd363 | |
|
f172a9d1de | |
|
9f6205610c | |
|
f828ea2f75 | |
|
149bf968f5 | |
|
b9c485c123 | |
|
06abe5fc77 | |
|
5bb8d5ed43 | |
|
0832c3e081 | |
|
52bbc61e1f | |
|
7944b4b2b0 | |
|
5968d82047 | |
|
e6375b3c28 | |
|
9bfdf2a0b5 | |
|
df36bb835a | |
|
a3bea818b8 | |
|
7d5f1e1ca9 | |
|
fce45b77d0 | |
|
dc1a1f5b3b | |
|
c2a39c2cfa | |
|
fe4c1b7342 | |
|
128153867b | |
|
5b1d3e666f | |
|
0b3a01c386 | |
|
18deda6ee2 | |
|
5995db7af7 | |
|
d944211073 | |
|
3ffed0fd6b | |
|
8b31e72fa1 | |
|
b0541b60a2 | |
|
c12808594b | |
|
ee5fd19c8f | |
|
c578786e55 | |
|
3e11fea02e | |
|
f8f398bb3c | |
|
a9a9a43962 | |
|
b97fd7d686 | |
|
6a367e69a3 | |
|
228f0cd1c5 | |
|
37706208b4 | |
|
47da2f29b2 | |
|
9ecc98e584 | |
|
1b39f59dd3 | |
|
ab8fc6deeb | |
|
2eb666c22d | |
|
873e5458c6 | |
|
aa84d9c347 | |
|
22201dbaa0 | |
|
bf6d1ccbb8 | |
|
1aa6c1e35a | |
|
2d8c3078ad | |
|
03b980a3ef | |
|
e5b876571b | |
|
962ab4bb1e | |
|
5777aa3694 | |
|
b167073d54 | |
|
f46d15d721 | |
|
9f35e8a2ac | |
|
d14b4f8dc7 | |
|
714f5ba2e5 | |
|
2d0152e99d | |
|
12e41eca79 | |
|
87d9db82c6 | |
|
f05fe6e568 | |
|
da45fc8b82 | |
|
5842593d28 | |
|
865daceebb | |
|
83acf720d9 | |
|
b8d8cc375f | |
|
6c82e7eac0 | |
|
2d32c9af2f | |
|
bd021becfb | |
|
a4a30cf5e8 | |
|
9ece6d4ce8 | |
|
5b26b79221 | |
|
7be35e17d3 | |
|
5da34800de | |
|
9c2b43cccb | |
|
a5f229f155 | |
|
db336cef4d | |
|
7a56587b5d | |
|
56f3171ab0 | |
|
56cfd2fdfc | |
|
5cc3e7d776 | |
|
2717599f93 | |
|
2158a0c331 | |
|
5a02e74b5d | |
|
910933b33f | |
|
b122c325fa | |
|
8178c3a5d3 | |
|
4ea64c3871 | |
|
d53d837e0e | |
|
328e613826 | |
|
913e4d2d35 | |
|
c0c4148fc5 | |
|
285dcc9140 | |
|
199468a453 | |
|
279e47d3ca | |
|
bdc01479ad | |
|
9986534eea | |
|
5168237d2b | |
|
b178f3b7ef | |
|
0637485b62 | |
|
232cb99b76 | |
|
b535d02d0e | |
|
8b940c7062 | |
|
1f6302a6ef | |
|
a99ad87afe | |
|
26f4f3a025 | |
|
cc66b6e9d8 | |
|
eee1f2102a | |
|
ad17dac13a | |
|
48a4a446e3 | |
|
dda8e65e84 | |
|
d568dda6c0 | |
|
3a85df003e | |
|
b9a65a9d86 | |
|
a57e7f4b24 | |
|
5cfbf72410 | |
|
ca3271a15c | |
|
e7b6bd0089 | |
|
ee0f750ea7 | |
|
5a60789759 | |
|
cf4635e86b | |
|
964ec24c13 | |
|
8f885843db | |
|
7fbca08c02 | |
|
a1dad28e32 | |
|
168eef4734 | |
|
ada0ac0b89 | |
|
cd2bb6169f | |
|
081e9b308d | |
|
646fb21ef2 | |
|
27c2c3e4ba | |
|
7ba98de415 | |
|
a79f540723 | |
|
ffcbec7f79 | |
|
cbe5823958 | |
|
36c3f13d85 | |
|
f97596adf3 | |
|
8e43db65ca | |
|
f91b3f70fe | |
|
5731583e94 | |
|
fd8e85e833 | |
|
9ac03e6bf9 | |
|
7ce7072f25 | |
|
a59af89cd8 | |
|
2dae3b4656 | |
|
15278aa27d | |
|
e886f271ee | |
|
ef7ad851a6 | |
|
420e4bfd0a | |
|
e4cf6cbb90 | |
|
5de286ca8e | |
|
d6370d08de | |
|
97cf88f504 | |
|
614e42b808 | |
|
58b1f29836 | |
|
36adabb2d6 | |
|
ff62e3e8a5 | |
|
d8495826cc | |
|
1db3b5465b | |
|
365ad53dfc | |
|
522bc8ba58 | |
|
a0945beb1e | |
|
a1df886a81 | |
|
01d92cd614 | |
|
3a4249802e | |
|
01b8522d7a | |
|
a9d6f2d7f8 | |
|
018e1915d2 | |
|
aedcd3d19e | |
|
352eda35bc | |
|
5789a60072 | |
|
0da7ea1cc4 | |
|
08cc023eef | |
|
26ad103469 | |
|
dca9197ba1 | |
|
b85a67e5ce | |
|
9a82bcc989 | |
|
a235033662 | |
|
ebbfb3ab13 | |
|
d2e334df97 | |
|
0db475592c | |
|
e6a7608219 | |
|
025e3e572f | |
|
bc05858e50 | |
|
2de361a69a | |
|
53cd313e10 | |
|
5367a714f2 | |
|
47b3b1e783 | |
|
d753211f60 | |
|
608e4cc24f | |
|
f261d6641c | |
|
76e58eea14 | |
|
0835cb4760 | |
|
84a3905f61 | |
|
17777cf8ac | |
|
c46bbf46af | |
|
1ede7ddce7 | |
|
bb240a6e40 | |
|
f42c70ea3a | |
|
d9978eb2fa | |
|
e8dba98314 | |
|
2162b3c93a | |
|
757175d2e6 | |
|
4249d94d76 | |
|
e548eaf146 | |
|
8b3c045638 | |
|
e8a5d77b09 | |
|
195c6b0c9e | |
|
b13670be84 | |
|
794715f9ae | |
|
b343757b11 | |
|
444edbee0c | |
|
6cf417995b | |
|
f6e63ef10c | |
|
f9aed9704e | |
|
52001ac4de | |
|
7b15646547 | |
|
35a5b3aaef | |
|
cc2c6d361c | |
|
dcabec56ec | |
|
ea1baab2c3 | |
|
1051965337 | |
|
b25cf06bd3 | |
|
76be539d63 | |
|
17bb74308c | |
|
b459a0720d | |
|
022ff233fc | |
|
7509c34542 | |
|
72fddb21db | |
|
32d78c69be | |
|
e758787602 | |
|
03da194c75 | |
|
5e6c645e63 | |
|
a4bc357820 | |
|
898fbb2d25 | |
|
e4d3dc08b1 | |
|
6e77a5cf84 | |
|
0c377816a9 | |
|
f15866c9f7 | |
|
c1fcbb2d96 | |
|
243d8976d8 | |
|
a6826938b8 | |
|
28dec564af | |
|
3828ac91b6 | |
|
42e63dc0b9 | |
|
9b41f3c9b5 | |
|
74cfd3cfc3 | |
|
915769a07b | |
|
42b5630f56 | |
|
72562d8145 | |
|
06b397f4c2 | |
|
1edce9c5e0 | |
|
bf439b70fb | |
|
3466f149a5 | |
|
d38eceee19 | |
|
6d9381d082 | |
|
cbf7e361e6 | |
|
a1715cc863 | |
|
b478b9f6d8 | |
|
a7ea2aeb2d | |
|
2029431a2d | |
|
38df169fcb | |
|
bc4d7eb70f | |
|
2031709a69 | |
|
4dcc397ac5 | |
|
3e3baeeb2c | |
|
7ca9f3464b | |
|
e8d8be71cd | |
|
e2062cb0f3 | |
|
981bea61b7 | |
|
2adb9e288e | |
|
218afc9211 | |
|
78d2c1f03e | |
|
ab5c5e519f | |
|
bfe568d354 | |
|
8e9e02c103 | |
|
8670aebc68 | |
|
12f89d1314 | |
|
9d1ecbb7f5 | |
|
1f44a9760c | |
|
0850612f5d | |
|
443eace8be | |
|
1192c71b63 | |
|
21f8dd3731 | |
|
45b31a92ee | |
|
dea716529e | |
|
225a6fcd2b | |
|
2bc7ac11a2 | |
|
8fd8abcf38 | |
|
ac000a6f23 | |
|
81eaf4fc3a | |
|
1c9cc54aa1 | |
|
1922841243 | |
|
4dd300b1a7 | |
|
03c54f8647 | |
|
7776f50395 | |
|
b2d07d9710 | |
|
df4b04f959 | |
|
0bab4329ee | |
|
c5d6b9e7b9 | |
|
524a4c63f1 | |
|
264f557dbe | |
|
ae1e1d55dc | |
|
c54e43a9fb | |
|
1547d4f787 | |
|
a1054580ec | |
|
2517aa202f | |
|
23b7736113 | |
|
68330c1699 | |
|
1f8f75eefa | |
|
cc4cca08d4 | |
|
c2e6d012d4 | |
|
e10af36639 | |
|
5a57a86a38 | |
|
d5b30b000a | |
|
4c3081b63e | |
|
bb6e3d45de | |
|
177b845bd6 | |
|
1f142eb581 | |
|
2b96c386e0 | |
|
f61d2ae9d3 | |
|
9743a49322 | |
|
ee83bf608e | |
|
778213a7c6 | |
|
7ed99c6ae7 | |
|
5b8423e1bc | |
|
699c84d40b | |
|
318f35e75c | |
|
fca3ca46bf | |
|
5868f44b52 | |
|
3d14858281 | |
|
36467356d6 | |
|
c87fd8e191 | |
|
d7d7878622 | |
|
efb28dcf7b | |
|
c86f554a18 | |
|
72e2bf4c69 | |
|
5dc3c23a3e | |
|
a45e659216 | |
|
7fea494a9c | |
|
6e5f6d8fd2 | |
|
6cc75b8e0f | |
|
b5e7a84d2e | |
|
d5a4770e8b | |
|
57b0afcfb3 | |
|
bd62d5afc7 | |
|
f89450213c | |
|
a35f5fa04d | |
|
03cd7821c9 | |
|
ecd200a89c | |
|
24826435f8 | |
|
a3701cb97d | |
|
4b234a19c1 | |
|
d5ed8b4788 | |
|
147a3ca916 | |
|
de3c3baf09 | |
|
81479b200c | |
|
1879f3dc65 | |
|
aadfc5cf30 | |
|
ae5e123314 | |
|
3f9bc73d59 | |
|
30bf35f3f1 | |
|
04febc5813 | |
|
324df7da73 | |
|
a13fa87ea2 | |
|
53872d88fa | |
|
6c3d7546e3 | |
|
c15ab104a1 | |
|
298a64106e | |
|
2fe355bb15 | |
|
3c7c611c7a | |
|
607ca3065c | |
|
d11bd71f43 | |
|
042414a056 | |
|
bd2d78e408 | |
|
ea39556a6f | |
|
72161b3a14 | |
|
56078ef2d6 | |
|
708fe0aff5 | |
|
92015b7f43 | |
|
e08082fa22 | |
|
670a7bec6e | |
|
21fe6dcf1f | |
|
865427c61e | |
|
ca957c4878 | |
|
6cee07f52e | |
|
2119c8da3c | |
|
4887a4fb1b | |
|
4383e34c0f | |
|
a7fe4793a8 | |
|
3541a3ce2a | |
|
ea50c7be71 | |
|
0c4864e676 | |
|
9e00b6b399 | |
|
59024c41ad | |
|
b48b0fce4c | |
|
17edf29c1f | |
|
72df882dae | |
|
a67b079577 | |
|
d694644ae5 | |
|
e40f629415 | |
|
a886140bca | |
|
eca1c8e901 | |
|
faaa804b46 | |
|
b0fff6dc9f | |
|
678dca8fd0 | |
|
eac9331dff | |
|
9b9c161ff5 | |
|
1ccc34515e | |
|
833420ea0b | |
|
80100ac447 | |
|
b08311f3ce | |
|
e7f790296e | |
|
1e4af433f7 | |
|
6e62bd0835 | |
|
c30f5784da | |
|
25a3b385af | |
|
0f5c6fc504 | |
|
c0efbe8210 | |
|
50210d51c9 | |
|
f7155c0abd | |
|
cda428282f | |
|
9a9fa9607a | |
|
4fcaadbffc | |
|
ccfd61bdde | |
|
989e6a59e9 | |
|
9beaf1018a | |
|
608cf9fb73 | |
|
9b55ec998f | |
|
826d9036cf | |
|
85bcd7e968 | |
|
65b634b7de | |
|
471de48c63 | |
|
676e588044 | |
|
292ecfe0fb | |
|
73e5522eb6 | |
|
9191c9ff7e | |
|
7662aec0b4 | |
|
67991e0d54 | |
|
c32529f8c5 | |
|
6b53330c3f | |
|
04cf61ed1b | |
|
c778b89b87 | |
|
727f00bf4a | |
|
e2b33d252f | |
|
ac98159920 | |
|
5a336bf1a8 | |
|
adf54cde0e | |
|
996c49d8ec | |
|
77e3570202 | |
|
a1dede87eb | |
|
247c786943 | |
|
975c1b14c2 | |
|
3cdc8b3bde | |
|
d183444166 | |
|
d77314a7a0 | |
|
4018418495 | |
|
62709d718e | |
|
cf7d80a72a | |
|
81015fa555 | |
|
c7f9925ba7 | |
|
ac62622c00 | |
|
cfd98d782a | |
|
4adca286fe | |
|
5f3975333c | |
|
c41f021306 | |
|
79bb8ab310 | |
|
2c918c6acb | |
|
f81b26560e | |
|
e9932b7bbb | |
|
d2830a0336 | |
|
ad5b02cdce | |
|
493c4b10fa | |
|
2b7e1e9be4 | |
|
a0fa244df6 | |
|
a3a7ece54d | |
|
a82826f56f | |
|
8eb6b10b03 | |
|
36f6038d30 | |
|
83b280599b | |
|
2b6cf64520 | |
|
0e2fd7305b | |
|
42ef924ed4 | |
|
4afa31d077 | |
|
c35374f9c7 | |
|
13bb88d06f | |
|
6f0af0192f | |
|
6522b0dc8d | |
|
0287fdc0a7 | |
|
041c1f26ff | |
|
058a2d37f1 | |
|
a34792457e | |
|
7a8312a617 | |
|
564f90f09c | |
|
da08204ab0 | |
|
7b45052fd6 | |
|
998312277d | |
|
d7c1963fce | |
|
17ee51d753 | |
|
52437ef15d | |
|
75c8852a8c | |
|
8e525d4451 | |
|
f1543bdd7d | |
|
55b5e814d6 | |
|
eb7a422176 | |
|
6cfd93424c | |
|
a65fd39480 | |
|
787018179b | |
|
135431f8d5 | |
|
c2b42c5e4f | |
|
f0361414de | |
|
c07a5eb567 | |
|
5adb14cbfe | |
|
967fb5ec12 | |
|
f4d4bb240f | |
|
596bbedbd4 | |
|
dbf14dcb9f | |
|
6832420b6d | |
|
147765b45c | |
|
7aa3b5ee04 | |
|
e911fe4a28 | |
|
b51c12c6e7 | |
|
356e4d41e3 | |
|
7db27d2bff | |
|
6e1a389a77 | |
|
6b1e4a58cb | |
|
663798799b | |
|
3a583bf04a | |
|
e3950407f2 | |
|
4703b5b0cd | |
|
d655c589a7 | |
|
0385666b30 | |
|
50d02415f0 | |
|
0f07c3db73 | |
|
2cba66285e | |
|
3cf2ec21bd | |
|
0dc5f95893 | |
|
a7c316ab75 | |
|
0787ba6360 | |
|
701d6bbe91 | |
|
9a5b88d3ee | |
|
953fb32591 | |
|
1752337176 | |
|
92dca53dab | |
|
d8d117534e | |
|
fddd1be386 | |
|
3c4e552eb3 | |
|
3c43322419 | |
|
046ce4203e | |
|
1cd6346b3c | |
|
2797d22ab3 | |
|
9665efd712 | |
|
83b5dcf219 | |
|
aa70771648 | |
|
e213d65be6 | |
|
aeec2a18b4 | |
|
855ec0f0c5 | |
|
5abf038ddc | |
|
e385eeb8e6 | |
|
2551c8f3d1 | |
|
bb9d622f49 | |
|
27324c2564 | |
|
8b7adfa2c8 | |
|
6c85740bab | |
|
5518774574 | |
|
aeb6ffeece | |
|
9abec81a5c | |
|
dee45256b9 | |
|
cb8fa86f69 | |
|
57bba34698 | |
|
9cf613a2b5 | |
|
6349dc4a27 | |
|
aa67e5d71e | |
|
873beddbab | |
|
98382f02bb | |
|
0d27af46f2 | |
|
1450bf8a0b | |
|
e1be44ced5 | |
|
e0b7b10e6e | |
|
714a23c26f | |
|
bd0de14c46 | |
|
322425b757 | |
|
c68e17b4ff | |
|
b93b1df702 | |
|
c7dd90dd7b | |
|
a018ec2b0c | |
|
a518f88139 | |
|
732f770648 | |
|
4aeddcc80d | |
|
e56ba01e1b | |
|
95f2e1020f | |
|
37a6bde534 | |
|
5193f21b0a | |
|
271093d511 | |
|
a7dd29cafa | |
|
b03740fc2f | |
|
03ca93fb01 | |
|
fea5c3d498 | |
|
d0819591d9 | |
|
2ff91b8b08 | |
|
509de30ca8 | |
|
27b92881b4 | |
|
d8a9a87218 | |
|
5e82f27fb6 | |
|
c07ae2b4cf | |
|
64ffb74d2b | |
|
35d43d3521 | |
|
4bc98d54d6 | |
|
b4d5c2f62b | |
|
1281172249 | |
|
bd12ae1bb0 | |
|
7527799220 | |
|
3f97ba1092 | |
|
1f02b65c63 | |
|
baf91f2b00 | |
|
81d1256bd7 | |
|
18d09f02e2 | |
|
80e15584c5 | |
|
49406e1834 | |
|
edc43c4e6d | |
|
b2defd39e7 | |
|
0494df68b0 | |
|
19e7088edb | |
|
4565497fe5 | |
|
599520fe63 | |
|
81b357e369 | |
|
71fc845d0b | |
|
47297b3d5c | |
|
695a29d9b9 | |
|
5184bf1612 | |
|
7bf7da57ad | |
|
8d72867620 | |
|
e084370009 | |
|
101d5eaf78 | |
|
f56150a1f9 | |
|
fb40f69e94 | |
|
a353800596 | |
|
909cca8b1b | |
|
db2a666ddb | |
|
3f047d49b8 | |
|
ed881abfeb | |
|
9e374f9fd4 | |
|
e58ee285c3 | |
|
ed761e0120 | |
|
cee3c13110 | |
|
9ab093138a | |
|
69259725a0 | |
|
f88579d1fe | |
|
ef563e1e5c | |
|
01a58991cd | |
|
45198f3489 | |
|
09c231f830 | |
|
63a31fc903 | |
|
1314520d5e | |
|
510dd75f68 | |
|
c5085392c5 | |
|
3443669351 | |
|
e8e83bb8bd | |
|
965be0bf38 | |
|
1b95a575b9 | |
|
a2629f5287 | |
|
b695098b0d | |
|
d0988936b4 | |
|
f1cd5ae4a8 | |
|
577b6ac39c | |
|
0ade010484 | |
|
899b05ed72 | |
|
688affa5b7 | |
|
ca3b80eb0f | |
|
0ad0ffaad9 | |
|
662abee0bf | |
|
8ae99121c1 | |
|
bb3ab31e79 | |
|
d07c3dfb0e | |
|
b5765b1e5e | |
|
a60788c56a | |
|
a36a5cc1e4 | |
|
ec87847c97 | |
|
a47263d9fa | |
|
b926eb02c8 | |
|
fd3aba9917 | |
|
24da18800e | |
|
57d820d376 | |
|
94aa41b84b | |
|
f2156795f4 | |
|
8f862607f4 | |
|
fc16a8128b | |
|
3ea43566ac | |
|
05da9a7450 | |
|
c39f622d0a | |
|
d7390578d9 | |
|
a1ce1c1ee0 | |
|
c7b7f12727 | |
|
832ff6cb81 | |
|
f113537d71 | |
|
54f8d1df15 | |
|
ed6dd4227d | |
|
f52158f0e1 | |
|
abea603df3 | |
|
fdf1c75cd3 | |
|
6224e9a44e | |
|
ae0d9d002e | |
|
282cc38c39 | |
|
d01e4ede36 | |
|
0b722a48e9 | |
|
930dc5225a | |
|
dce9b748d1 | |
|
c65328683c | |
|
72104e3c93 | |
|
cbe97da5a5 | |
|
10dc8bcb48 | |
|
1f1e8ba49c | |
|
23f241e6fb | |
|
a5c7f6c51c | |
|
277d40a2e4 | |
|
b6fe4f5779 | |
|
aac50f6618 | |
|
7c091d129d | |
|
96d8b14f11 | |
|
33892e8ebe | |
|
dd87159769 | |
|
34a7ce99b1 | |
|
00d67a48f1 | |
|
987e63a2d5 | |
|
e3c1953f79 | |
|
5f96e03923 | |
|
9254e5ca88 | |
|
edc1b77522 | |
|
4ee64aca64 | |
|
30fa40d03f | |
|
2a88c22e0a | |
|
4e78b5723c | |
|
f9de1bbd1f | |
|
7de5d5d6e0 | |
|
5f18ba259a | |
|
c467bd139a | |
|
8aa9a1c225 | |
|
b4b19f40ee | |
|
6373be5a1d | |
|
dcf3ea5fd5 | |
|
3e38e5e138 | |
|
7f1fe46fc7 | |
|
59316508c3 | |
|
f6f2da76d7 | |
|
2833a6b0f6 | |
|
eb689652f8 | |
|
5e20b928c7 | |
|
54d84483a5 | |
|
16bd9a3105 | |
|
3ad0fd1ed1 | |
|
52df6e0e0f | |
|
8f59436d55 | |
|
2a99f9bcda | |
|
ec5b8feb76 | |
|
750598d153 | |
|
b756a3d00c | |
|
b1dff416b9 | |
|
67d5eca276 | |
|
1cb75cac54 | |
|
9bb25a6daf | |
|
1861c13207 | |
|
a7098c776a | |
|
b5eafdf55b | |
|
5940e721ca | |
|
4a82e0a3f3 | |
|
e2f080554e | |
|
7f38bf9950 | |
|
16d23f4afb | |
|
5d8b53910b | |
|
e59e5e2868 | |
|
b08c9cf1ce | |
|
34bbe47b20 | |
|
e646e6cd47 | |
|
2d172d50e9 | |
|
aa6f75225a | |
|
dde8d7e96a | |
|
d6771f81af | |
|
a63fbba3f8 | |
|
7a950c5718 | |
|
21fb5ea4f2 | |
|
9086bc009e | |
|
feb17a95cd | |
|
d50616dce4 | |
|
0094fd4346 | |
|
3ab48aa55a | |
|
7fbd12d642 | |
|
41d879323e | |
|
361835da21 | |
|
93118e2dff | |
|
7ac793169a | |
|
d79a36faee | |
|
3a35d2fd60 | |
|
14f19e980b | |
|
bfa84dfcbb | |
|
bfaee577d5 | |
|
66957cc05e | |
|
a7224d1674 | |
|
75ae17c0fa | |
|
4c781b59b4 | |
|
a4cdb7879f | |
|
6bef4b71c1 | |
|
fe2fd86201 | |
|
3b02cc0ac8 | |
|
b9c43961a0 | |
|
9989113144 | |
|
44ef3a8d08 | |
|
a7bc7b5bc2 | |
|
b8148f0543 | |
|
dd6be38d07 | |
|
5afd52fd9d | |
|
1480f147fa | |
|
8b0ecd7870 | |
|
0569f3728a | |
|
5891afffea | |
|
56fb08092e | |
|
ba0947a3d5 | |
|
9565585a61 | |
|
afe1a5dd55 | |
|
41e9f5165c | |
|
f552bd57f4 | |
|
79b3acaf53 | |
|
18328aad82 | |
|
aae381cec2 | |
|
420beb248d | |
|
a4f32a1422 | |
|
04e5423aed | |
|
ccc6ecba6c | |
|
a9e5a1496d | |
|
a42019d614 | |
|
509b4b5b51 | |
|
b8c3530cbb | |
|
63ded6f31b | |
|
cb302075b5 | |
|
8b4ec1bc08 | |
|
08d0c86f78 | |
|
e795e68370 | |
|
e591844e9c | |
|
30c2e31d8e | |
|
e47ffc831c | |
|
159425c45c | |
|
bf33b9291e | |
|
7602e9e1d6 | |
|
0d7eb12dbe | |
|
9f0f6d783b | |
|
4877088193 | |
|
9521672fdb | |
|
6ad7efbad3 | |
|
939a58b967 | |
|
7ff83d5055 | |
|
e6c7949aa7 | |
|
a33fda67a8 | |
|
bb4c8b0479 | |
|
f39d5fba25 | |
|
6ceba8838d | |
|
9b6b2f1db7 | |
|
747ff64086 | |
|
c09e08b53b | |
|
3fa5531ca5 | |
|
fb316cd64d | |
|
d5718be7fc | |
|
1c8ec0bd23 | |
|
4d1690bc3a | |
|
4cb117b886 | |
|
208b7111e0 | |
|
c7937cd1d2 | |
|
671f298701 | |
|
183582f030 | |
|
465dbfcdf0 | |
|
de8510c9db | |
|
7f0a322ee5 | |
|
d9191e17cd | |
|
97382f9288 | |
|
b77e43588c | |
|
f83d2666fb | |
|
864a40cbdc | |
|
41710862bf | |
|
97d44ab9c4 | |
|
4a524737f5 | |
|
b25597170a | |
|
296e8fbbe7 | |
|
420949a5ae | |
|
77f239ae12 | |
|
bbab575ff3 | |
|
608a375786 | |
|
453d2fc109 | |
|
6a48ae700a | |
|
d5b2e3c41a | |
|
da4e2d7ad9 | |
|
fdf2948263 | |
|
1ff9a43f18 | |
|
6f1f687c3e | |
|
87019bdc44 | |
|
6ca6116a8d | |
|
cd19607250 | |
|
7921120b72 | |
|
f6d49125a7 | |
|
c917f8dabc | |
|
46e2cf4636 | |
|
a04741d9f3 | |
|
8cce223d05 | |
|
19ea251059 | |
|
53f2dc231b | |
|
e0077e8c67 | |
|
fdfd2237d2 | |
|
f2825bf7b9 | |
|
0330d43dca | |
|
2c1c731692 | |
|
2227a814d6 | |
|
7688d880d0 | |
|
983fa426d0 | |
|
7425f6f8f1 | |
|
5e46f9c55f | |
|
fbd2b7db47 | |
|
3763681506 | |
|
89ee4f4b0c | |
|
fc18157da3 | |
|
c01061e472 | |
|
dd5c55e0ed | |
|
f86c2748e0 | |
|
292afc59ab | |
|
14d964d92f | |
|
8bb87aefa0 | |
|
3e756d507d | |
|
8ee7bebd7e | |
|
c7f0881422 | |
|
b213c5e7a1 | |
|
e393e57728 | |
|
8bdd6a66d4 | |
|
441bdc9274 | |
|
7053383792 | |
|
9dcd1cc9a6 | |
|
358814f694 | |
|
392c64a8be | |
|
be74b733fc | |
|
dd7c1d5ce8 | |
|
a99fe15134 | |
|
77400c4fc2 | |
|
062e6f62ab | |
|
dc99fbe888 | |
|
ff13a08c9d | |
|
45bbc1fd04 | |
|
5b414ad08f | |
|
d9129b463d | |
|
cb2e044848 | |
|
1ad7511d26 | |
|
f9a84a2454 | |
|
f8cdb7dc2f | |
|
58579ba656 | |
|
2cd31a0698 | |
|
f812c89496 | |
|
5198af7d7c | |
|
14bf3db27a | |
|
2a7ff9905e | |
|
39ea15cedd | |
|
c1b43b57bb | |
|
23b8d9767a | |
|
e3c8c0e649 | |
|
c1119f8865 | |
|
8afe75dd5c | |
|
dffd83424a | |
|
2c47fdccd7 | |
|
cab0e936bd | |
|
07fb7d7c18 | |
|
f95b7a1884 | |
|
9cf83d8f8d | |
|
e64e6cc09d | |
|
5808b96033 | |
|
37994356f8 | |
|
4304d618f3 | |
|
e51411590d | |
|
ccde5d569f | |
|
516f9e9a24 | |
|
9db5a16f7d | |
|
8c1fee7e75 | |
|
f8a7841cc0 | |
|
0b11148f27 | |
|
26da74d54e | |
|
547d9bf0e5 | |
|
1acd44390f | |
|
80f9f7cb08 | |
|
6bac78ff65 | |
|
eb03dd439e | |
|
ca01f9f234 | |
|
5cf05ac082 | |
|
c355679d36 | |
|
5501399166 | |
|
3f2d5fc013 | |
|
0b1bd5f30b | |
|
1a6d3513fb | |
|
80b7ac6fa1 | |
|
0bcfaa14b3 | |
|
bf3263ec11 | |
|
e635558d2a | |
|
e53e50f7a1 | |
|
77ce7545aa | |
|
87ef84d3a4 | |
|
d50122aeb2 | |
|
1e8e9351e5 | |
|
1bc5dd6921 | |
|
5599d34fa7 | |
|
7c88bb3728 | |
|
1589b0a6ae | |
|
da81d2e2b4 |
149
.cirrus.yml
149
.cirrus.yml
|
@ -9,6 +9,7 @@ env:
|
|||
DEST_BRANCH: "main"
|
||||
GOPATH: "/var/tmp/go"
|
||||
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
|
||||
GOCACHE: "/tmp/go-build"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
CIRRUS_WORKING_DIR: "${GOSRC}"
|
||||
# Shell used to execute all script commands
|
||||
|
@ -21,18 +22,20 @@ env:
|
|||
IN_PODMAN: 'false'
|
||||
# root or rootless
|
||||
PRIV_NAME: root
|
||||
# default "mention the $BUILDAH_RUNTIME in the task alias, with initial whitespace" value
|
||||
RUNTIME_N: ""
|
||||
|
||||
####
|
||||
#### Cache-image names to test with
|
||||
####
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
FEDORA_NAME: "fedora-39"
|
||||
PRIOR_FEDORA_NAME: "fedora-38"
|
||||
FEDORA_NAME: "fedora-42"
|
||||
PRIOR_FEDORA_NAME: "fedora-41"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# Image identifiers
|
||||
IMAGE_SUFFIX: "c20240222t143004z-f39f38d13"
|
||||
IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
@ -49,14 +52,14 @@ env:
|
|||
gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7]
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 120m
|
||||
timeout_in: 30m
|
||||
|
||||
# Default VM to use unless set or modified by task
|
||||
gce_instance: &standardvm
|
||||
image_project: "${IMAGE_PROJECT}"
|
||||
zone: "us-central1-c" # Required by Cirrus for the time being
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
memory: "4G"
|
||||
disk: 200 # Gigabytes, do not set less than 200 per obscure GCE docs re: I/O performance
|
||||
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
|
||||
|
@ -69,7 +72,7 @@ meta_task:
|
|||
container:
|
||||
image: "quay.io/libpod/imgts:latest"
|
||||
cpu: 1
|
||||
memory: 1
|
||||
memory: "1G"
|
||||
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
|
@ -93,12 +96,13 @@ smoke_task:
|
|||
name: "Smoke Test"
|
||||
|
||||
gce_instance:
|
||||
memory: "12Gb"
|
||||
memory: "12G"
|
||||
cpu: 8
|
||||
|
||||
# Don't bother running on branches (including cron), or for tags.
|
||||
only_if: $CIRRUS_PR != ''
|
||||
skip: $CIRRUS_PR == ''
|
||||
|
||||
timeout_in: 30m
|
||||
timeout_in: 10m
|
||||
|
||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
|
||||
|
@ -120,13 +124,14 @@ vendor_task:
|
|||
|
||||
# Runs within Cirrus's "community cluster"
|
||||
container:
|
||||
image: docker.io/library/golang:latest
|
||||
image: docker.io/library/golang:1.23.3
|
||||
cpu: 1
|
||||
memory: 1
|
||||
|
||||
timeout_in: 5m
|
||||
|
||||
vendor_script:
|
||||
- './hack/check_vendor_toolchain.sh Try updating the image used by the vendor_task in .cirrus.yml.'
|
||||
- 'make vendor'
|
||||
- './hack/tree_status.sh'
|
||||
|
||||
|
@ -134,37 +139,32 @@ vendor_task:
|
|||
# Confirm cross-compile ALL architectures on a Mac OS-X VM.
|
||||
cross_build_task:
|
||||
name: "Cross Compile"
|
||||
gce_instance:
|
||||
cpu: 8
|
||||
memory: "24G"
|
||||
alias: cross_build
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
|
||||
osx_instance:
|
||||
image: ghcr.io/cirruslabs/macos-ventura-base:latest
|
||||
|
||||
skip: >-
|
||||
$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'
|
||||
env:
|
||||
HOME: /root
|
||||
script:
|
||||
- brew update
|
||||
- brew install go
|
||||
- brew install go-md2man
|
||||
- brew install gpgme
|
||||
- go version
|
||||
- make cross CGO_ENABLED=0
|
||||
|
||||
- make -j cross CGO_ENABLED=0
|
||||
binary_artifacts:
|
||||
path: ./bin/*
|
||||
|
||||
|
||||
unit_task:
|
||||
name: 'Unit tests w/ $STORAGE_DRIVER'
|
||||
gce_instance:
|
||||
cpu: 4
|
||||
alias: unit
|
||||
only_if: ¬_build_docs >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*'
|
||||
depends_on: &smoke_vendor_cross
|
||||
skip: ¬_build_docs >-
|
||||
$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' ||
|
||||
$CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
|
||||
depends_on: &smoke_vendor
|
||||
- smoke
|
||||
- vendor
|
||||
- cross_build
|
||||
|
||||
timeout_in: 1h
|
||||
|
||||
matrix:
|
||||
- env:
|
||||
|
@ -173,27 +173,23 @@ unit_task:
|
|||
STORAGE_DRIVER: 'overlay'
|
||||
|
||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
|
||||
unit_test_script: '${SCRIPT_BASE}/test.sh unit |& ${_TIMESTAMP}'
|
||||
|
||||
binary_artifacts:
|
||||
path: ./bin/*
|
||||
|
||||
|
||||
conformance_task:
|
||||
name: 'Build Conformance w/ $STORAGE_DRIVER'
|
||||
name: 'Debian Conformance w/ $STORAGE_DRIVER'
|
||||
alias: conformance
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
skip: *not_build_docs
|
||||
depends_on: *smoke_vendor
|
||||
|
||||
gce_instance:
|
||||
cpu: 4
|
||||
image_name: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
|
||||
timeout_in: 65m
|
||||
|
||||
matrix:
|
||||
- env:
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
TMPDIR: '/var/tmp'
|
||||
- env:
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
|
||||
|
@ -202,10 +198,10 @@ conformance_task:
|
|||
|
||||
|
||||
integration_task:
|
||||
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
|
||||
name: "Integration $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
|
||||
alias: integration
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
skip: *not_build_docs
|
||||
depends_on: *smoke_vendor
|
||||
|
||||
matrix:
|
||||
# VFS
|
||||
|
@ -213,32 +209,64 @@ integration_task:
|
|||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
BUILDAH_RUNTIME: crun
|
||||
RUNTIME_N: " using crun"
|
||||
- env:
|
||||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
BUILDAH_RUNTIME: runc
|
||||
RUNTIME_N: " using runc"
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
BUILDAH_RUNTIME: crun
|
||||
RUNTIME_N: " using crun"
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
BUILDAH_RUNTIME: runc
|
||||
RUNTIME_N: " using runc"
|
||||
- env:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
CI_DESIRED_RUNTIME: runc
|
||||
# OVERLAY
|
||||
- env:
|
||||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
BUILDAH_RUNTIME: crun
|
||||
RUNTIME_N: " using crun"
|
||||
- env:
|
||||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
BUILDAH_RUNTIME: runc
|
||||
RUNTIME_N: " using runc"
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
BUILDAH_RUNTIME: crun
|
||||
RUNTIME_N: " using crun"
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
BUILDAH_RUNTIME: runc
|
||||
RUNTIME_N: " using runc"
|
||||
- env:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
CI_DESIRED_RUNTIME: runc
|
||||
|
||||
gce_instance:
|
||||
image_name: "$IMAGE_NAME"
|
||||
cpu: 8
|
||||
memory: "8G"
|
||||
|
||||
# Separate scripts for separate outputs, makes debugging easier.
|
||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||
|
@ -259,10 +287,10 @@ integration_task:
|
|||
golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang'
|
||||
|
||||
integration_rootless_task:
|
||||
name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER"
|
||||
name: "Integration rootless $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
|
||||
alias: integration_rootless
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
skip: *not_build_docs
|
||||
depends_on: *smoke_vendor
|
||||
|
||||
matrix:
|
||||
# Running rootless tests on overlay
|
||||
|
@ -272,11 +300,29 @@ integration_rootless_task:
|
|||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
PRIV_NAME: rootless
|
||||
BUILDAH_RUNTIME: runc
|
||||
RUNTIME_N: " using runc"
|
||||
- env:
|
||||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
PRIV_NAME: rootless
|
||||
BUILDAH_RUNTIME: crun
|
||||
RUNTIME_N: " using crun"
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
PRIV_NAME: rootless
|
||||
BUILDAH_RUNTIME: runc
|
||||
RUNTIME_N: " using runc"
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
PRIV_NAME: rootless
|
||||
BUILDAH_RUNTIME: crun
|
||||
RUNTIME_N: " using crun"
|
||||
- env:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
|
@ -285,6 +331,8 @@ integration_rootless_task:
|
|||
|
||||
gce_instance:
|
||||
image_name: "$IMAGE_NAME"
|
||||
cpu: 8
|
||||
memory: "8G"
|
||||
|
||||
# Separate scripts for separate outputs, makes debugging easier.
|
||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||
|
@ -300,8 +348,12 @@ integration_rootless_task:
|
|||
in_podman_task:
|
||||
name: "Containerized Integration"
|
||||
alias: in_podman
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
skip: *not_build_docs
|
||||
depends_on: *smoke_vendor
|
||||
|
||||
gce_instance:
|
||||
cpu: 8
|
||||
memory: "8G"
|
||||
|
||||
env:
|
||||
# This is key, cause the scripts to re-execute themselves inside a container.
|
||||
|
@ -338,6 +390,7 @@ success_task:
|
|||
- vendor
|
||||
- cross_build
|
||||
- integration
|
||||
- integration_rootless
|
||||
- in_podman
|
||||
|
||||
container:
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[codespell]
|
||||
skip = ./vendor,./.git,./go.sum,./docs/*.1,./docker/AUTHORS,./CHANGELOG.md,./changelog.txt,./tests/tools/vendor,./tests/tools/go.mod,./tests/tools/go.sum
|
||||
ignore-words-list = fo,passt,secon,erro
|
|
@ -0,0 +1 @@
|
|||
1
|
|
@ -1,71 +0,0 @@
|
|||
<!--
|
||||
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||
already open. You can ensure this by searching the issue list for this
|
||||
repository. If there is a duplicate, please close your issue and add a comment
|
||||
to the existing issue instead.
|
||||
|
||||
If you suspect your issue is a bug, please edit your issue description to
|
||||
include the BUG REPORT INFORMATION shown below. If you fail to provide this
|
||||
information within 7 days, we cannot debug your issue and will close it. We
|
||||
will, however, reopen it if you later provide the information.
|
||||
|
||||
---------------------------------------------------
|
||||
BUG REPORT INFORMATION
|
||||
---------------------------------------------------
|
||||
Use the commands below to provide key information from your environment:
|
||||
You do NOT have to include this information if this is a FEATURE REQUEST
|
||||
-->
|
||||
|
||||
**Description**
|
||||
|
||||
<!--
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
-->
|
||||
|
||||
**Steps to reproduce the issue:**
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
**Describe the results you received:**
|
||||
|
||||
|
||||
**Describe the results you expected:**
|
||||
|
||||
|
||||
**Output of `rpm -q buildah` or `apt list buildah`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `buildah version`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `podman version` if reporting a `podman build` issue:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `cat /etc/*release`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `uname -a`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `cat /etc/containers/storage.conf`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
|
@ -0,0 +1,99 @@
|
|||
---
|
||||
name: Buildah Bug Report
|
||||
description: File a Buildah bug report
|
||||
labels: ["kind/bug", "triage-needed"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
|
||||
**NOTE** A large number of issues reported against Buildah are often found to already be fixed in more current versions of the project.
|
||||
Before reporting an issue, please verify the version you are running with `buildah version` and compare it to the latest released version under
|
||||
[releases](https://github.com/containers/buildah/releases).
|
||||
If they differ, please update your version of Buildah to the latest possible and retry your command before creating an issue.
|
||||
|
||||
Commands you might need to run to create the issue
|
||||
$ `buildah version`
|
||||
$ `buildah info`
|
||||
$ `rpm -q buildah` or `apt list buildah`
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Issue Description
|
||||
description: Please explain your issue
|
||||
value: "Describe your issue"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproducer
|
||||
attributes:
|
||||
label: Steps to reproduce the issue
|
||||
description: Please explain the steps to reproduce the issue
|
||||
value: "Steps to reproduce the issue\n1.\n2.\n3.\n"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: received_results
|
||||
attributes:
|
||||
label: Describe the results you received
|
||||
description: Please explain the results you are noticing
|
||||
value: "Describe the results you received"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected_results
|
||||
attributes:
|
||||
label: Describe the results you expected
|
||||
description: Please explain the results you are expecting
|
||||
value: "Describe the results you expected"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: buildah_version
|
||||
attributes:
|
||||
label: buildah version output
|
||||
description: Please copy and paste `buildah version` output.
|
||||
value: If you are unable to run `buildah version` for any reason, please provide the output of `rpm -q buildah` or `apt list buildah`.
|
||||
render: yaml
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: buildah_info
|
||||
attributes:
|
||||
label: buildah info output
|
||||
description: Please copy and paste `buildah info` output.
|
||||
value: If you are unable to run `buildah info` for any reason, please provide the operating system and its version and the architecture you are running.
|
||||
render: yaml
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: storage_conf
|
||||
attributes:
|
||||
label: Provide your storage.conf
|
||||
description: "Please provide the relevant [storage.conf file](https://github.com/containers/storage/blob/main/docs/containers-storage.conf.5.md#files)"
|
||||
render: toml
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: upstream_latest
|
||||
attributes:
|
||||
label: Upstream Latest Release
|
||||
description: Have you tried running the [latest upstream release](https://github.com/containers/buildah/releases/latest)
|
||||
options:
|
||||
- "Yes"
|
||||
- "No"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional_environment
|
||||
attributes:
|
||||
label: Additional environment details
|
||||
description: Please describe any additional environment details like (AWS, VirtualBox,...)
|
||||
value: "Additional environment details"
|
||||
- type: textarea
|
||||
id: additional_info
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Please explain the additional information you deem important
|
||||
value: "Additional information like issue happens only occasionally or issue happens with a particular architecture or on a particular setting"
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
url: https://github.com/containers/buildah/discussions/new
|
||||
about: Ask a question about Buildah
|
||||
- name:
|
||||
If your issue is a general Podman issue unrelated to `podman build`, please open an issue in the Podman repository.
|
||||
If the issue is with the `podman build` command, please report it here.
|
||||
url: https://github.com/containers/podman/issues
|
||||
about: Please report issues with Podman here.
|
|
@ -0,0 +1,133 @@
|
|||
---
|
||||
name: Podman Build Bug Report
|
||||
description: File a Podman build bug report
|
||||
labels: ["kind/bug", "triage-needed"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
|
||||
**NOTE** A large number of issues reported against Buildah are often found to already be fixed in more current versions of the project.
|
||||
Before reporting an issue, please verify the version you are running with `podman version` and compare it to the latest released version under
|
||||
[releases](https://github.com/containers/podman/releases).
|
||||
If they differ, please update your version of Podman to the latest possible and retry your command before creating an issue.
|
||||
|
||||
Commands you might need to run to create the issue
|
||||
$ `podman version`
|
||||
$ `podman info`
|
||||
$ `rpm -q podman` or `apt list podman`
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Issue Description
|
||||
description: Please explain your issue
|
||||
value: "Describe your issue"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproducer
|
||||
attributes:
|
||||
label: Steps to reproduce the issue
|
||||
description: Please explain the steps to reproduce the issue
|
||||
value: "Steps to reproduce the issue\n1.\n2.\n3.\n"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: received_results
|
||||
attributes:
|
||||
label: Describe the results you received
|
||||
description: Please explain the results you are noticing
|
||||
value: "Describe the results you received"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected_results
|
||||
attributes:
|
||||
label: Describe the results you expected
|
||||
description: Please explain the results you are expecting
|
||||
value: "Describe the results you expected"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: podman_version
|
||||
attributes:
|
||||
label: podman version output
|
||||
description: Please copy and paste `podman version` output.
|
||||
value: If you are unable to run `podman version` for any reason, please provide the output of `rpm -q podman` or `apt list podman`.
|
||||
render: yaml
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: podman_info
|
||||
attributes:
|
||||
label: podman info output
|
||||
description: Please copy and paste `podman info` output.
|
||||
value: If you are unable to run `podman info` for any reason, please provide the operating system and its version and the architecture you are running.
|
||||
render: yaml
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: storage_conf
|
||||
attributes:
|
||||
label: Provide your storage.conf
|
||||
description: "Please provide the relevant [storage.conf file](https://github.com/containers/storage/blob/main/docs/containers-storage.conf.5.md#files)"
|
||||
render: toml
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: podman_in_a_container
|
||||
attributes:
|
||||
label: Podman in a container
|
||||
description: Please select Yes if you are running Podman in a container
|
||||
options:
|
||||
- "No"
|
||||
- "Yes"
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: privileged_rootless
|
||||
attributes:
|
||||
label: Privileged Or Rootless
|
||||
description:
|
||||
Are you running the containers as privileged or non-root user? Note that using `su` or `sudo` does not establish a proper login session required for running
|
||||
Podman as a non-root user. Please refer to the [troubleshooting guide](https://github.com/containers/podman/blob/main/troubleshooting.md#solution-28) for alternatives.
|
||||
options:
|
||||
- Privileged
|
||||
- Rootless
|
||||
- type: dropdown
|
||||
id: upstream_latest
|
||||
attributes:
|
||||
label: Upstream Latest Release
|
||||
description: Have you tried running the [latest upstream release](https://github.com/containers/podman/releases/latest)
|
||||
options:
|
||||
- "Yes"
|
||||
- "No"
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: installation_source
|
||||
attributes:
|
||||
label: Installation Source
|
||||
description: What installion type did you use?
|
||||
multiple: false
|
||||
options:
|
||||
- Distribution package (DNF, apt, yay)
|
||||
- Brew
|
||||
- Offical Podman Installer (Mac)
|
||||
- Podman Desktop
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional_environment
|
||||
attributes:
|
||||
label: Additional environment details
|
||||
description: Please describe any additional environment details like (AWS, VirtualBox,...)
|
||||
value: "Additional environment details"
|
||||
- type: textarea
|
||||
id: additional_info
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Please explain the additional information you deem important
|
||||
value: "Additional information like issue happens only occasionally or issue happens with a particular architecture or on a particular setting"
|
|
@ -0,0 +1,13 @@
|
|||
# https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes#configuring-automatically-generated-release-notes
|
||||
|
||||
changelog:
|
||||
categories:
|
||||
- title: Notable changes
|
||||
labels:
|
||||
- '*'
|
||||
exclude:
|
||||
labels:
|
||||
- dependencies
|
||||
- title: Dependency updates
|
||||
labels:
|
||||
- dependencies
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
podman run -it \
|
||||
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
||||
docker.io/renovate/renovate:latest \
|
||||
ghcr.io/renovatebot/renovate:latest \
|
||||
renovate-config-validator
|
||||
3. Commit.
|
||||
|
||||
|
@ -42,10 +42,6 @@
|
|||
"github>containers/automation//renovate/defaults.json5"
|
||||
],
|
||||
|
||||
// Permit automatic rebasing when base-branch changes by more than
|
||||
// one commit.
|
||||
"rebaseWhen": "behind-base-branch",
|
||||
|
||||
/*************************************************
|
||||
*** Repository-specific configuration options ***
|
||||
*************************************************/
|
||||
|
@ -58,6 +54,5 @@
|
|||
"**/docs/**",
|
||||
"**/examples/**",
|
||||
"**/tests/**"
|
||||
],
|
||||
|
||||
]
|
||||
}
|
||||
|
|
|
@ -17,4 +17,9 @@ jobs:
|
|||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
secrets:
|
||||
SECRET_CIRRUS_API_KEY: ${{secrets.SECRET_CIRRUS_API_KEY}}
|
||||
ACTION_MAIL_SERVER: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
ACTION_MAIL_USERNAME: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
ACTION_MAIL_PASSWORD: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
ACTION_MAIL_SENDER: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/discussion_lock.yml
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
closed_issue_discussion_lock:
|
||||
uses: containers/podman/.github/workflows/discussion_lock.yml@main
|
||||
secrets: inherit
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/issue_pr_lock.yml
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
closed_issue_discussion_lock:
|
||||
uses: containers/podman/.github/workflows/issue_pr_lock.yml@main
|
||||
secrets: inherit
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
|
@ -4,7 +4,7 @@ on:
|
|||
|
||||
jobs:
|
||||
commit:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
# Only check commits on pull requests.
|
||||
if: github.event_name == 'pull_request'
|
||||
steps:
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
|
||||
# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '01 01 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_rerun:
|
||||
uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
|
||||
secrets: inherit
|
|
@ -10,5 +10,5 @@ Dockerfile*
|
|||
!/tests/conformance/**/Dockerfile*
|
||||
*.swp
|
||||
/result/
|
||||
internal/mkcw/embed/entrypoint.o
|
||||
internal/mkcw/embed/entrypoint
|
||||
internal/mkcw/embed/entrypoint_amd64.o
|
||||
internal/mkcw/embed/entrypoint_amd64
|
||||
|
|
|
@ -1,13 +1,29 @@
|
|||
---
|
||||
version: "2"
|
||||
|
||||
run:
|
||||
build-tags:
|
||||
- apparmor
|
||||
- seccomp
|
||||
- selinux
|
||||
# Don't exceed number of threads available when running under CI
|
||||
concurrency: 4
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- nolintlint
|
||||
- revive
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
exclusions:
|
||||
presets:
|
||||
- comments
|
||||
- std-error-handling
|
||||
settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression.
|
||||
|
|
135
.packit.yaml
135
.packit.yaml
|
@ -2,53 +2,148 @@
|
|||
# See the documentation for more information:
|
||||
# https://packit.dev/docs/configuration/
|
||||
|
||||
specfile_path: rpm/buildah.spec
|
||||
downstream_package_name: buildah
|
||||
upstream_tag_template: v{version}
|
||||
|
||||
# These files get synced from upstream to downstream (Fedora / CentOS Stream) on every
|
||||
# propose-downstream job. This is done so tests maintained upstream can be run
|
||||
# downstream in Zuul CI and Bodhi.
|
||||
# Ref: https://packit.dev/docs/configuration#files_to_sync
|
||||
files_to_sync:
|
||||
- src: rpm/gating.yaml
|
||||
dest: gating.yaml
|
||||
delete: true
|
||||
- src: plans/
|
||||
dest: plans/
|
||||
delete: true
|
||||
mkpath: true
|
||||
- src: tests/tmt/
|
||||
dest: tests/tmt/
|
||||
delete: true
|
||||
mkpath: true
|
||||
- src: .fmf/
|
||||
dest: .fmf/
|
||||
delete: true
|
||||
- .packit.yaml
|
||||
|
||||
packages:
|
||||
buildah-fedora:
|
||||
pkg_tool: fedpkg
|
||||
specfile_path: rpm/buildah.spec
|
||||
buildah-centos:
|
||||
pkg_tool: centpkg
|
||||
specfile_path: rpm/buildah.spec
|
||||
buildah-eln:
|
||||
specfile_path: rpm/buildah.spec
|
||||
|
||||
srpm_build_deps:
|
||||
- make
|
||||
|
||||
jobs:
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
notifications:
|
||||
packages: [buildah-fedora]
|
||||
notifications: &copr_build_failure_notification
|
||||
failure_comment:
|
||||
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||
enable_net: true
|
||||
targets:
|
||||
# Fedora aliases documentation: https://packit.dev/docs/configuration#aliases
|
||||
# python3-fedora-distro-aliases provides `resolve-fedora-aliases` command
|
||||
targets: &fedora_copr_targets
|
||||
- fedora-all-x86_64
|
||||
- fedora-all-aarch64
|
||||
- fedora-eln-x86_64
|
||||
- fedora-eln-aarch64
|
||||
- centos-stream+epel-next-8-x86_64
|
||||
- centos-stream+epel-next-8-aarch64
|
||||
- centos-stream+epel-next-9-x86_64
|
||||
- centos-stream+epel-next-9-aarch64
|
||||
additional_repos:
|
||||
- "copr://rhcontainerbot/podman-next"
|
||||
enable_net: true
|
||||
# Disable osh diff scan until Go support is available
|
||||
# Ref: https://github.com/openscanhub/known-false-positives/pull/30#issuecomment-2858698495
|
||||
osh_diff_scan_after_copr_build: false
|
||||
|
||||
# Ignore until golang is updated in distro buildroot to 1.23.3+
|
||||
- job: copr_build
|
||||
trigger: ignore
|
||||
packages: [buildah-eln]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets:
|
||||
fedora-eln-x86_64:
|
||||
additional_repos:
|
||||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/"
|
||||
fedora-eln-aarch64:
|
||||
additional_repos:
|
||||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
|
||||
enable_net: true
|
||||
|
||||
# Ignore until golang is updated in distro buildroot to 1.23.3+
|
||||
- job: copr_build
|
||||
trigger: ignore
|
||||
packages: [buildah-centos]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets: ¢os_copr_targets
|
||||
- centos-stream-9-x86_64
|
||||
- centos-stream-9-aarch64
|
||||
- centos-stream-10-x86_64
|
||||
- centos-stream-10-aarch64
|
||||
enable_net: true
|
||||
|
||||
# Run on commit to main branch
|
||||
- job: copr_build
|
||||
trigger: commit
|
||||
packages: [buildah-fedora]
|
||||
notifications:
|
||||
failure_comment:
|
||||
message: "podman-next COPR build failed. @containers/packit-build please check."
|
||||
branch: main
|
||||
owner: rhcontainerbot
|
||||
project: podman-next
|
||||
enable_net: true
|
||||
|
||||
# Tests on Fedora for main branch PRs
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
packages: [buildah-fedora]
|
||||
targets:
|
||||
- fedora-all-x86_64
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- artifacts:
|
||||
- type: repository-file
|
||||
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
|
||||
|
||||
# Ignore until golang is updated in distro buildroot to 1.23.3+
|
||||
# Tests on CentOS Stream for main branch PRs
|
||||
- job: tests
|
||||
trigger: ignore
|
||||
packages: [buildah-centos]
|
||||
targets:
|
||||
- centos-stream-9-x86_64
|
||||
- centos-stream-10-x86_64
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- artifacts:
|
||||
- type: repository-file
|
||||
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/centos-stream-$releasever/rhcontainerbot-podman-next-centos-stream-$releasever.repo
|
||||
|
||||
# Sync to Fedora
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
packages: [buildah-fedora]
|
||||
update_release: false
|
||||
dist_git_branches: &fedora_targets
|
||||
- fedora-all
|
||||
|
||||
# Sync to CentOS Stream
|
||||
- job: propose_downstream
|
||||
trigger: release
|
||||
packages: [buildah-centos]
|
||||
update_release: false
|
||||
dist_git_branches:
|
||||
- fedora-all
|
||||
- c10s
|
||||
|
||||
# Fedora Koji build
|
||||
- job: koji_build
|
||||
trigger: commit
|
||||
dist_git_branches:
|
||||
- fedora-all
|
||||
|
||||
- job: bodhi_update
|
||||
trigger: commit
|
||||
dist_git_branches:
|
||||
- fedora-branched # rawhide updates are created automatically
|
||||
packages: [buildah-fedora]
|
||||
sidetag_group: podman-releases
|
||||
# Dependents are not rpm dependencies, but the package whose bodhi update
|
||||
# should include this package.
|
||||
# Ref: https://packit.dev/docs/fedora-releases-guide/releasing-multiple-packages
|
||||
dependents:
|
||||
- podman
|
||||
dist_git_branches: *fedora_targets
|
||||
|
|
472
CHANGELOG.md
472
CHANGELOG.md
|
@ -2,6 +2,478 @@
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.40.0 (2025-04-17)
|
||||
|
||||
Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0
|
||||
fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
|
||||
fix(deps): update module github.com/containers/storage to v1.58.0
|
||||
cirrus: make Total Success wait for rootless integration
|
||||
chroot: use symbolic names when complaining about mount() errors
|
||||
cli: hide the `completion` command instead of disabling it outright
|
||||
Document rw and src options for --mount flag in buildah-run(1)
|
||||
fix(deps): update module github.com/moby/buildkit to v0.21.0
|
||||
build: add support for inherit-labels
|
||||
chore(deps): update dependency golangci/golangci-lint to v2.1.0
|
||||
.github: check_cirrus_cron work around github bug
|
||||
stage_executor,getCreatedBy: expand buildArgs for sources correctly
|
||||
Add a link to project governance and MAINTAINERS file
|
||||
fix(deps): update github.com/containers/storage digest to b1d1b45
|
||||
generateHostname: simplify
|
||||
Use maps.Copy
|
||||
Use slices.Concat
|
||||
Use slices.Clone
|
||||
Use slices.Contains
|
||||
Use for range over integers
|
||||
tests/testreport: don't copy os.Environ
|
||||
Use any instead of interface{}
|
||||
ci: add golangci-lint run with --tests=false
|
||||
ci: add nolintlint, fix found issues
|
||||
copier: rm nolint:unparam annotation
|
||||
.golangci.yml: add unused linter
|
||||
chroot: fix unused warnings
|
||||
copier: fix unused warnings
|
||||
tests/conformance: fix unused warning
|
||||
ci: switch to golangci-lint v2
|
||||
internal/mkcw: disable ST1003 warnings
|
||||
tests/conformance: do not double import (fix ST1019)
|
||||
cmd/buildah: don't double import (fix ST1019)
|
||||
Do not capitalize error strings
|
||||
cmd/buildah: do not capitalize error strings
|
||||
tests/conformance: fix QF1012 warnings
|
||||
tests/serve: fix QF1012 warning
|
||||
Use strings.ReplaceAll to fix QF1004 warnings
|
||||
Use switch to fix QF1003 warnings
|
||||
Apply De Morgan's law to fix QF1001 warnings
|
||||
Fix QF1007 staticcheck warnings
|
||||
imagebuildah: fix revive warning
|
||||
Rename max variable
|
||||
tests/tools: install lint from binary, use renovate
|
||||
fix(deps): update module github.com/containernetworking/cni to v1.3.0
|
||||
Update Buildah issue template to new version and support podman build
|
||||
fix(deps): update module golang.org/x/crypto to v0.37.0
|
||||
stage_executor: reset platform in systemcontext for stages
|
||||
fix(deps): update github.com/opencontainers/runtime-tools digest to 260e151
|
||||
cmd/buildah: rm unused containerOutputUsingTemplate
|
||||
cmd/buildah: rm unused getDateAndDigestAndSize
|
||||
build: return ExecErrorCodeGeneric when git operation fails
|
||||
add: report error while creating dir for URL source.
|
||||
createPlatformContainer: drop MS_REMOUNT|MS_BIND
|
||||
fix(deps): update module github.com/docker/docker to v28.0.3+incompatible
|
||||
fix: bats won't fail on ! without cleverness
|
||||
feat: use HistoryTimestamp, if set, for oci-archive entries
|
||||
Allow extendedGlob to work with Windows paths
|
||||
fix(deps): update module github.com/moby/buildkit to v0.20.2
|
||||
fix(deps): update github.com/openshift/imagebuilder digest to e87e4e1
|
||||
fix(deps): update module github.com/docker/docker to v28.0.2+incompatible
|
||||
fix(deps): update module tags.cncf.io/container-device-interface to v1.0.1
|
||||
chore(deps): update dependency containers/automation_images to v20250324
|
||||
vendor: update github.com/opencontainers/selinux to v1.12.0
|
||||
replace deprecated selinux/label calls
|
||||
vendor: bump c/common to dbeb17e40c80
|
||||
Use builtin arg defaults from imagebuilder
|
||||
linux: accept unmask paths as glob values
|
||||
vendor: update containers/common
|
||||
Add --parents option for COPY in Dockerfiles
|
||||
fix(deps): update module github.com/opencontainers/runc to v1.2.6
|
||||
update go.sum from the previous commit
|
||||
fix(deps): update module tags.cncf.io/container-device-interface to v1
|
||||
chore(deps): update module golang.org/x/net to v0.36.0 [security]
|
||||
packit: remove f40 from copr builds
|
||||
cirrus: update to go 1.23 image
|
||||
vendor bump to golang.org/x/crypto v0.36.0
|
||||
cirrus: update PRIOR_FEDORA comment
|
||||
github: remove cirrus rerun action
|
||||
fix(deps): update module github.com/containers/common to v0.62.2
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.34.2
|
||||
fix: close files properly when BuildDockerfiles exits
|
||||
fix(deps): update module github.com/containers/storage to v1.57.2
|
||||
stage_executor: history should include heredoc summary correctly
|
||||
fix(deps): update module github.com/containers/common to v0.62.1
|
||||
github: disable cron rerun action
|
||||
fix(deps): update module github.com/moby/buildkit to v0.20.1
|
||||
internal/mkcw.Archive(): use github.com/containers/storage/pkg/ioutils
|
||||
[skip-ci] TMT: system tests
|
||||
buildah-build.1.md: secret examples
|
||||
fix(deps): update github.com/containers/luksy digest to 40bd943
|
||||
fix(deps): update module github.com/opencontainers/image-spec to v1.1.1
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.34.1
|
||||
Use UnparsedInstance.Manifest instead of ImageSource.GetManifest
|
||||
fix(deps): update module github.com/opencontainers/runtime-spec to v1.2.1
|
||||
tests/conformance/testdata/Dockerfile.add: update some URLs
|
||||
Vendor imagebuilder
|
||||
Fix source of OS, architecture and variant
|
||||
chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
|
||||
fix(deps): update module tags.cncf.io/container-device-interface to v0.8.1
|
||||
fix(deps): update module github.com/moby/buildkit to v0.20.0
|
||||
chroot createPlatformContainer: use MS_REMOUNT
|
||||
conformance: make TestCommit and TestConformance parallel
|
||||
cirrus: reduce task timeout
|
||||
mkcw: mkcw_check_image use bats run_with_log
|
||||
test: use /tmp as TMPDIR
|
||||
heredoc: create temp subdirs for each build
|
||||
test: heredoc remove python dependency from test
|
||||
Support the containers.conf container_name_as_hostname option
|
||||
fix(deps): update module github.com/opencontainers/runc to v1.2.5
|
||||
fix(deps): update module github.com/spf13/cobra to v1.9.0
|
||||
.cirrus: use more cores for smoke
|
||||
Switch to the CNCF Code of Conduct
|
||||
.cirrus: bump ci resources
|
||||
fix(deps): update module golang.org/x/crypto to v0.33.0
|
||||
Distinguish --mount=type=cache locations by ownership, too
|
||||
fix(deps): update module golang.org/x/term to v0.29.0
|
||||
.cirrus: run -race only on non-PR branch
|
||||
unit: deparallize some tests
|
||||
.cirrus: use multiple cpu for unit tests
|
||||
Makefile: use -parallel for go test
|
||||
unit_test: use Parallel test where possible
|
||||
Update module golang.org/x/sys to v0.30.0
|
||||
Update module golang.org/x/sync to v0.11.0
|
||||
Update dependency containers/automation_images to v20250131
|
||||
Bump to Buildah v1.40.0-dev
|
||||
|
||||
## v1.39.0 (2025-01-31)
|
||||
|
||||
Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
|
||||
Update module github.com/containers/storage to v1.57.0
|
||||
CI, .cirrus: parallelize containerized integration
|
||||
ed's comment: cleanup
|
||||
use seperate blobinfocache for flaky test
|
||||
bump CI VMs to 4 CPUs (was: 2) for integration tests
|
||||
cleanup, debug, and disable parallel in blobcache tests
|
||||
bats tests - parallelize
|
||||
pkg/overlay: cleanups
|
||||
RPM: include check section to silence rpmlint
|
||||
RPM: use default gobuild macro on RHEL
|
||||
tests: remove masked /sys/dev/block check
|
||||
vendor to latest c/{common,image,storage}
|
||||
build, run: record hash or digest in image history
|
||||
Accept image names as sources for cache mounts
|
||||
Run(): always clean up options.ExternalImageMounts
|
||||
refactor: replace golang.org/x/exp with stdlib
|
||||
Update to c/image @main
|
||||
fix broken doc link
|
||||
run_freebsd.go: only import runtime-spec once
|
||||
fix(deps): update module github.com/docker/docker to v27.5.1+incompatible
|
||||
bump github.com/vbatts/tar-split
|
||||
Add more checks to the --mount flag parsing logic
|
||||
chroot mount flags integration test: copy binaries
|
||||
fix(deps): update module github.com/moby/buildkit to v0.19.0
|
||||
relabel(): correct a misleading parameter name
|
||||
Fix TOCTOU error when bind and cache mounts use "src" values
|
||||
define.TempDirForURL(): always use an intermediate subdirectory
|
||||
internal/volume.GetBindMount(): discard writes in bind mounts
|
||||
pkg/overlay: add a MountLabel flag to Options
|
||||
pkg/overlay: add a ForceMount flag to Options
|
||||
Add internal/volumes.bindFromChroot()
|
||||
Add an internal/open package
|
||||
fix(deps): update module github.com/containers/common to v0.61.1
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.33.1
|
||||
[CI:DOCS] Touch up changelogs
|
||||
fix(deps): update module github.com/docker/docker to v27.5.0+incompatible
|
||||
copy-preserving-extended-attributes: use a different base image
|
||||
fix(deps): update github.com/containers/luksy digest to a3a812d
|
||||
chore(deps): update module golang.org/x/net to v0.33.0 [security]
|
||||
fix(deps): update module golang.org/x/crypto to v0.32.0
|
||||
New VM Images
|
||||
fix(deps): update module github.com/opencontainers/runc to v1.2.4
|
||||
fix(deps): update module github.com/docker/docker to v27.4.1+incompatible
|
||||
fix(deps): update module github.com/containers/ocicrypt to v1.2.1
|
||||
Add support for --security-opt mask and unmask
|
||||
Allow cache mounts to be stages or additional build contexts
|
||||
[skip-ci] RPM: cleanup changelog conditionals
|
||||
fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.6
|
||||
fix(deps): update module github.com/moby/buildkit to v0.18.2
|
||||
Fix an error message in the chroot unit test
|
||||
copier: use .PAXRecords instead of .Xattrs
|
||||
chroot: on Linux, try to pivot_root before falling back to chroot
|
||||
manifest add: add --artifact-annotation
|
||||
Add context to an error message
|
||||
Update module golang.org/x/crypto to v0.31.0
|
||||
Update module github.com/opencontainers/runc to v1.2.3
|
||||
Update module github.com/docker/docker to v27.4.0+incompatible
|
||||
Update module github.com/cyphar/filepath-securejoin to v0.3.5
|
||||
CI: don't build a binary in the unit tests task
|
||||
CI: use /tmp for $GOCACHE
|
||||
CI: remove dependencies on the cross-build task
|
||||
CI: run cross-compile task with make -j
|
||||
Update module github.com/docker/docker to v27.4.0-rc.4+incompatible
|
||||
Update module github.com/moby/buildkit to v0.18.1
|
||||
Update module golang.org/x/crypto to v0.30.0
|
||||
Update golang.org/x/exp digest to 2d47ceb
|
||||
Update github.com/opencontainers/runtime-tools digest to f7e3563
|
||||
[skip-ci] Packit: remove rhel copr build jobs
|
||||
[skip-ci] Packit: switch to fedora-all for copr
|
||||
Update module github.com/stretchr/testify to v1.10.0
|
||||
Update module github.com/moby/buildkit to v0.17.2
|
||||
Makefile: use `find` to detect source files
|
||||
Tests: make _prefetch() parallel-safe
|
||||
Update module github.com/opencontainers/runc to v1.2.2
|
||||
executor: allow to specify --no-pivot-root
|
||||
Update module github.com/moby/sys/capability to v0.4.0
|
||||
Makefile: mv codespell config to .codespellrc
|
||||
Fix some codespell errors
|
||||
Makefile,install.md: rm gopath stuff
|
||||
Makefile: rm targets working on ..
|
||||
build: rm exclude_graphdriver_devicemapper tag
|
||||
Makefile: rm unused var
|
||||
Finish updating to go 1.22
|
||||
CI VMs: bump again
|
||||
Bump to Buidah v1.39.0-dev
|
||||
stage_executor: set avoidLookingCache only if mounting stage
|
||||
imagebuildah: additionalContext is not a local built stage
|
||||
|
||||
## v1.38.0 (2024-11-08)
|
||||
|
||||
Bump to c/common v0.61.0, c/image v5.33.0, c/storage v1.56.0
|
||||
fix(deps): update module golang.org/x/crypto to v0.29.0
|
||||
fix(deps): update module github.com/moby/buildkit to v0.17.1
|
||||
fix(deps): update module github.com/containers/storage to v1.56.0
|
||||
tests: skip two ulimit tests
|
||||
CI VMs: bump f40 -> f41
|
||||
tests/tools: rebuild tools when we change versions
|
||||
tests/tools: update golangci-lint to v1.61.0
|
||||
fix(deps): update module github.com/moby/buildkit to v0.17.0
|
||||
Handle RUN --mount with relative targets and no configured workdir
|
||||
tests: bud: make parallel-safe
|
||||
fix(deps): update module github.com/opencontainers/runc to v1.2.1
|
||||
fix(deps): update golang.org/x/exp digest to f66d83c
|
||||
fix(deps): update github.com/opencontainers/runtime-tools digest to 6c9570a
|
||||
tests: blobcache: use unique image name
|
||||
tests: sbom: never write to cwd
|
||||
tests: mkcw: bug fixes, refactor
|
||||
deps: bump runc to v1.2.0
|
||||
deps: switch to moby/sys/userns
|
||||
tests/test_runner.sh: remove some redundancies
|
||||
Integration tests: run git daemon on a random-but-bind()able port
|
||||
fix(deps): update module github.com/opencontainers/selinux to v1.11.1
|
||||
go.mod: remove unnecessary replace
|
||||
Document more buildah build --secret options
|
||||
Add support for COPY --exclude and ADD --exclude options
|
||||
fix(deps): update github.com/containers/luksy digest to e2530d6
|
||||
chore(deps): update dependency containers/automation_images to v20241010
|
||||
fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.4
|
||||
Properly validate cache IDs and sources
|
||||
[skip-ci] Packit: constrain koji job to fedora package to avoid dupes
|
||||
Audit and tidy OWNERS
|
||||
fix(deps): update module golang.org/x/crypto to v0.28.0
|
||||
tests: add quotes to names
|
||||
vendor: update c/common to latest
|
||||
CVE-2024-9407: validate "bind-propagation" flag settings
|
||||
vendor: switch to moby/sys/capability
|
||||
Don't set ambient capabilities
|
||||
Document that zstd:chunked is downgraded to zstd when encrypting
|
||||
fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.3
|
||||
buildah-manifest-create.1: Fix manpage section
|
||||
chore(deps): update dependency ubuntu to v24
|
||||
Make `buildah manifest push --all` true by default
|
||||
chroot: add newlines at the end of printed error messages
|
||||
Do not error on trying to write IMA xattr as rootless
|
||||
fix: remove duplicate conditions
|
||||
fix(deps): update module github.com/moby/buildkit to v0.16.0
|
||||
fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.2
|
||||
Document how entrypoint is configured in buildah config
|
||||
In a container, try to register binfmt_misc
|
||||
imagebuildah.StageExecutor: clean up volumes/volumeCache
|
||||
build: fall back to parsing a TARGETPLATFORM build-arg
|
||||
`manifest add --artifact`: handle multiple values
|
||||
Packit: split out ELN jobs and reuse fedora downstream targets
|
||||
Packit: Enable sidetags for bodhi updates
|
||||
fix(deps): update module github.com/docker/docker to v27.2.1+incompatible
|
||||
tests/bud.bats: add git source
|
||||
add: add support for git source
|
||||
Add support for the new c/common pasta options
|
||||
vendor latest c/common
|
||||
fix(deps): update module golang.org/x/term to v0.24.0
|
||||
fix(deps): update module github.com/fsouza/go-dockerclient to v1.12.0
|
||||
packit: update fedora and epel targets
|
||||
cirrus: disable f39 testing
|
||||
cirrus: fix fedora names
|
||||
update to go 1.22
|
||||
Vendor c/common:9d025e4cb348
|
||||
copier: handle globbing with "**" path components
|
||||
fix(deps): update golang.org/x/exp digest to 9b4947d
|
||||
fix(deps): update github.com/containers/luksy digest to 2e7307c
|
||||
imagebuildah: make scratch config handling toggleable
|
||||
fix(deps): update module github.com/docker/docker to v27.2.0+incompatible
|
||||
Add a validation script for Makefile $(SOURCES)
|
||||
fix(deps): update module github.com/openshift/imagebuilder to v1.2.15
|
||||
New VMs
|
||||
Update some godocs, use 0o to prefix an octal in a comment
|
||||
buildah-build.1.md: expand the --layer-label description
|
||||
fix(deps): update module github.com/containers/common to v0.60.2
|
||||
run: fix a nil pointer dereference on FreeBSD
|
||||
CI: enable the whitespace linter
|
||||
Fix some govet linter warnings
|
||||
Commit(): retry committing to local storage on storage.LayerUnknown
|
||||
CI: enable the gofumpt linter
|
||||
conformance: move weirdly-named files out of the repository
|
||||
fix(deps): update module github.com/docker/docker to v27.1.2+incompatible
|
||||
fix(deps): update module github.com/containers/common to v0.60.1
|
||||
*: use gofmt -s, add gofmt linter
|
||||
*: fix build tags
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.32.1
|
||||
Add(): re-escape any globbed items that included escapes
|
||||
conformance tests: use mirror.gcr.io for most images
|
||||
unit tests: use test-specific policy.json and registries.conf
|
||||
fix(deps): update module golang.org/x/sys to v0.24.0
|
||||
Update to spun-out "github.com/containerd/platforms"
|
||||
Bump github.com/containerd/containerd
|
||||
test/tools/Makefile: duplicate the vendor-in-container target
|
||||
linters: unchecked error
|
||||
linters: don't end loop iterations with "else" when "then" would
|
||||
linters: unused arguments shouldn't have names
|
||||
linters: rename checkIdsGreaterThan5() to checkIDsGreaterThan5()
|
||||
linters: don't name variables "cap"
|
||||
`make lint`: use --timeout instead of --deadline
|
||||
Drop the e2e test suite
|
||||
fix(deps): update module golang.org/x/crypto to v0.26.0
|
||||
fix(deps): update module github.com/onsi/gomega to v1.34.1
|
||||
`make vendor-in-container`: use the caller's Go cache if it exists
|
||||
fix(deps): fix test/tools ginkgo typo
|
||||
fix(deps): update module github.com/onsi/ginkgo/v2 to v2.19.1
|
||||
Update to keep up with API changes in storage
|
||||
fix(deps): update github.com/containers/luksy digest to 1f482a9
|
||||
install: On Debian/Ubuntu, add installation of libbtrfs-dev
|
||||
fix(deps): update module golang.org/x/sys to v0.23.0
|
||||
fix(deps): update golang.org/x/exp digest to 8a7402a
|
||||
fix(deps): update module github.com/fsouza/go-dockerclient to v1.11.2
|
||||
Use Epoch: 2 and respect the epoch in dependencies.
|
||||
Bump to Buildah v1.38.0-dev
|
||||
AddAndCopyOptions: add CertPath, InsecureSkipTLSVerify, Retry fields
|
||||
Add PrependedLinkedLayers/AppendedLinkedLayers to CommitOptions
|
||||
integration tests: teach starthttpd() about TLS and pid files
|
||||
|
||||
## v1.37.0 (2024-07-26)
|
||||
|
||||
Bump c/storage, c/image, c/common for v1.37.0
|
||||
"build with basename resolving user arg" tests: correct ARG use
|
||||
bud-multiple-platform-no-run test: correct ARG use
|
||||
imagebuildah: always have default values for $TARGET... args ready
|
||||
bump github.com/openshift/imagebuilder to v1.2.14
|
||||
fix(deps): update module github.com/docker/docker to v27.1.1+incompatible
|
||||
fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.1
|
||||
fix(deps): update module github.com/docker/docker to v27.1.0+incompatible
|
||||
CI: use local registry, part 2 of 2
|
||||
CI: use local registry, part 1 of 2
|
||||
fix(deps): update module github.com/fsouza/go-dockerclient to v1.11.1
|
||||
Revert "fix(deps): update github.com/containers/image/v5 to v5.31.1"
|
||||
Replace libimage.LookupReferenceFunc with the manifests version
|
||||
conformance tests: enable testing CompatVolumes
|
||||
conformance tests: add a test that tries to chown a volume
|
||||
imagebuildah: make traditional volume handling not the default
|
||||
StageExecutor.prepare(): mark base image volumes for preservation
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.31.1
|
||||
Vendor in latest containers/(common, storage, image)
|
||||
fix(deps): update module golang.org/x/term to v0.22.0
|
||||
fix(deps): update module golang.org/x/sys to v0.22.0
|
||||
fix(deps): update golang.org/x/exp digest to 7f521ea
|
||||
fix(deps): update github.com/containers/luksy digest to a8846e2
|
||||
imagebuildah.StageExecutor.Copy(): reject new flags for now
|
||||
bump github.com/openshift/imagebuilder to v1.2.11
|
||||
Rework parsing of --pull flags
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.31.1
|
||||
imagebuildah.StageExecutor.prepare(): log the --platform flag
|
||||
CI VMs: bump
|
||||
buildah copy: preserve owner info with --from= a container or image
|
||||
conformance tests: enable testing CompatSetParent
|
||||
containerImageRef.NewImageSource(): move the FROM comment to first
|
||||
commit: set "parent" for docker format only when requested
|
||||
Update godoc for Builder.EnsureContainerPathAs
|
||||
fix(deps): update module github.com/spf13/cobra to v1.8.1
|
||||
fix(deps): update module github.com/containernetworking/cni to v1.2.0
|
||||
fix(deps): update module github.com/opencontainers/runc to v1.1.13
|
||||
Change default for podman build to --pull missing
|
||||
fix(deps): update module github.com/containers/common to v0.59.1
|
||||
Clarify definition of --pull options
|
||||
buildah: fix a nil pointer reference on FreeBSD
|
||||
Use /var/tmp for $TMPDIR for vfs conformance jobs
|
||||
Cirrus: run `df` during job setup
|
||||
conformance: use quay.io/libpod/centos:7 instead of centos:8
|
||||
Stop setting "parent" in docker format
|
||||
conformance: check if workdir trims path separator suffixes
|
||||
push integration test: pass password to docker login via stdin
|
||||
Re-enable the "copy with chown" conformance test
|
||||
healthcheck: Add support for `--start-interval`
|
||||
fix(deps): update module github.com/docker/docker to v26.1.4+incompatible
|
||||
fix(deps): update module github.com/containerd/containerd to v1.7.18
|
||||
tests: set _CONTAINERS_USERNS_CONFIGURED=done for libnetwork
|
||||
Cross-build on Fedora
|
||||
Drop copyStringSlice() and copyStringStringMap()
|
||||
fix(deps): update module golang.org/x/crypto to v0.24.0
|
||||
fix(deps): update module github.com/openshift/imagebuilder to v1.2.10
|
||||
Provide an uptime_netbsd.go
|
||||
Spell unix as "!windows"
|
||||
Add netbsd to lists-of-OSes
|
||||
fix(deps): update golang.org/x/exp digest to fd00a4e
|
||||
[skip-ci] Packit: enable c10s downstream sync
|
||||
CI VMs: bump, to debian with cgroups v2
|
||||
Document when BlobDirectory is overridden
|
||||
fix secret mounts for env vars when using chroot isolation
|
||||
Change to take a types.ImageReference arg
|
||||
imagebuildah: Support custom image reference lookup for cache push/pull
|
||||
fix(deps): update module github.com/onsi/ginkgo/v2 to v2.19.0
|
||||
Bump to v1.37.0-dev
|
||||
CI: Clarify Debian use for conformance tests
|
||||
|
||||
## v1.36.0 (2024-05-23)
|
||||
|
||||
build: be more selective about specifying the default OS
|
||||
Bump to c/common v0.59.0
|
||||
Fix buildah prune --help showing the same example twice
|
||||
fix(deps): update module github.com/onsi/ginkgo/v2 to v2.18.0
|
||||
fix(deps): update module github.com/containers/image/v5 to v5.31.0
|
||||
bud tests: fix breakage when vendoring into podman
|
||||
Integration tests: fake up a replacement for nixery.dev/shell
|
||||
copierWithSubprocess(): try to capture stderr on io.ErrClosedPipe
|
||||
Don't expand RUN heredocs ourselves, let the shell do it
|
||||
Don't leak temp files on failures
|
||||
Add release note template to split dependency chores
|
||||
fix CentOS/RHEL build - no BATS there
|
||||
fix(deps): update module github.com/containers/luksy to v0.0.0-20240506205542-84b50f50f3ee
|
||||
Address CVE-2024-3727
|
||||
chore(deps): update module github.com/opencontainers/runtime-spec to v1.2.0
|
||||
Builder.cdiSetupDevicesInSpecdefConfig(): use configured CDI dirs
|
||||
Setting --arch should set the TARGETARCH build arg
|
||||
fix(deps): update module golang.org/x/exp to v0.0.0-20240416160154-fe59bbe5cc7f
|
||||
[CI:DOCS] Add link to Buildah image page to README.md
|
||||
Don't set GOTOOLCHAIN=local
|
||||
fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.5
|
||||
Makefile: set GOTOOLCHAIN=local
|
||||
Integration tests: switch some base images
|
||||
containerImageRef.NewImageSource: merge the tar filters
|
||||
fix(deps): update module github.com/onsi/ginkgo/v2 to v2.17.2
|
||||
fix(deps): update module github.com/containers/luksy to v0.0.0-20240408185936-afd8e7619947
|
||||
Disable packit builds for centos-stream+epel-next-8
|
||||
Makefile: add missing files to $(SOURCES)
|
||||
CI VMs: bump to new versions with tmpfs /tmp
|
||||
chore(deps): update module golang.org/x/net to v0.23.0 [security]
|
||||
integration test: handle new labels in "bud and test --unsetlabel"
|
||||
Switch packit configuration to use epel-9-$arch ...
|
||||
Give unit tests a bit more time
|
||||
Integration tests: remove a couple of duplicated tests
|
||||
Integration tests: whitespace tweaks
|
||||
Integration tests: don't remove images at start or end of test
|
||||
Integration tests: use cached images more
|
||||
Integration tests _prefetch: use registry configs
|
||||
internal: use fileutils.(Le|E)xists
|
||||
pkg/parse: use fileutils.(Le|E)xists
|
||||
buildah: use fileutils.(Le|E)xists
|
||||
chroot: use fileutils.(Le|E)xists
|
||||
vendor: update containers/(common|storage)
|
||||
Fix issue/pr lock workflow
|
||||
[CI:DOCS] Add golang 1.21 update warning
|
||||
heredoc: honor inline COPY irrespective of ignorefiles
|
||||
Update install.md
|
||||
source-push: add support for --digestfile
|
||||
Fix caching when mounting a cached stage with COPY/ADD
|
||||
fix(deps): update github.com/containers/luksy digest to 3d2cf0e
|
||||
Makefile: softcode `strip`, use it from env var
|
||||
Man page updates
|
||||
Add support for passing CDI specs to --device
|
||||
Update comments on some API objects
|
||||
pkg/parse.DeviceFromPath(): dereference src symlinks
|
||||
fix(deps): update module github.com/onsi/ginkgo/v2 to v2.17.1
|
||||
|
||||
## v1.35.0 (2024-03-06)
|
||||
|
||||
fix(deps): update module github.com/stretchr/testify to v1.9.0
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
## The Buildah Project Community Code of Conduct
|
||||
|
||||
The Buildah Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
||||
The Buildah Project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
|
|
@ -173,5 +173,5 @@ Normally, a maintainer will only be removed if they are considered to be
|
|||
inactive for a long period of time or are viewed as disruptive to the community.
|
||||
|
||||
The current list of maintainers can be found in the
|
||||
[MAINTAINERS](MAINTAINERS) file.
|
||||
[MAINTAINERS](./MAINTAINERS.md) file.
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
## The Buildah Project Community Governance
|
||||
|
||||
The Buildah project, as part of Podman Container Tools, follows the [Podman Project Governance](https://github.com/containers/podman/blob/main/GOVERNANCE.md)
|
||||
except sections found in this document, which override those found in Podman's Governance.
|
||||
|
||||
---
|
||||
|
||||
# Maintainers File
|
||||
|
||||
The definitive source of truth for maintainers of this repository is the local [MAINTAINERS.md](./MAINTAINERS.md) file. The [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file in the main Podman repository is used for project-spanning roles, including Core Maintainer and Community Manager. Some repositories in the project will also have a local [OWNERS](./OWNERS) file, which the CI system uses to map users to roles. Any changes to the [OWNERS](./OWNERS) file must make a corresponding change to the [MAINTAINERS.md](./MAINTAINERS.md) file to ensure that the file remains up to date. Most changes to [MAINTAINERS.md](./MAINTAINERS.md) will require a change to the repository’s [OWNERS](.OWNERS) file (e.g., adding a Reviewer), but some will not (e.g., promoting a Maintainer to a Core Maintainer, which comes with no additional CI-related privileges).
|
||||
|
||||
Any Core Maintainers listed in Podman’s [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file should also be added to the list of “approvers” in the local [OWNERS](./OWNERS) file and as a Core Maintainer in the list of “Maintainers” in the local [MAINTAINERS.md](./MAINTAINERS.md) file.
|
|
@ -1,4 +0,0 @@
|
|||
Dan Walsh <dwalsh@redhat.com> (@rhatdan)
|
||||
Nalin Dahyabhai <nalin@redhat.com> (@nalind)
|
||||
Tom Sweeney <tsweeney@redhat.com> (@tomsweeneyredhat)
|
||||
Urvashi Mohnani <umohnani@redhat.com> (@umohnani8)
|
|
@ -0,0 +1,35 @@
|
|||
# Buildah Maintainers
|
||||
|
||||
[GOVERNANCE.md](GOVERNANCE.md)
|
||||
describes the project's governance and the Project Roles used below.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Project Roles | Affiliation |
|
||||
|-------------------|----------------------------------------------------------|----------------------------------|----------------------------------------------|
|
||||
| Brent Baude | [baude](https://github.com/baude) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Nalin Dahyabhai | [nalind](https://github.com/nalind) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Matthew Heon | [mheon](https://github.com/mheon) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Paul Holzinger | [Luap99](https://github.com/Luap99) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Giuseppe Scrivano | [giuseppe](https://github.com/giuseppe) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Miloslav Trmač | [mtrmac](https://github.com/mtrmac) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Neil Smith | [actionmancan](https://github.com/actionmancan) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Tom Sweeney | [TomSweeneyRedHat](https://github.com/TomSweeneyRedHat/) | Maintainer and Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Lokesh Mandvekar | [lsm5](https://github.com/lsm5) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Aditya Rajan | [flouthoc](https://github.com/flouthoc) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Dan Walsh | [rhatdan](https://github.com/rhatdan) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Ashley Cui | [ashley-cui](https://github.com/ashley-cui) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Jan Rodák | [Honny1](https://github.com/Honny1) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
| Valentin Rothberg | [vrothberg](https://github.com/vrothberg) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
|
||||
|
||||
## Alumni
|
||||
|
||||
None at present
|
||||
|
||||
## Credits
|
||||
|
||||
The structure of this document was based off of the equivalent one in the [CRI-O Project](https://github.com/cri-o/cri-o/blob/main/MAINTAINERS.md).
|
||||
|
||||
## Note
|
||||
|
||||
If there is a discrepancy between the [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file in the main Podman repository and this file regarding Core Maintainers or Community Managers, the file in the Podman Repository is considered the source of truth.
|
137
Makefile
137
Makefile
|
@ -1,9 +1,9 @@
|
|||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
APPARMORTAG := $(shell hack/apparmor_tag.sh)
|
||||
STORAGETAGS := exclude_graphdriver_devicemapper $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
||||
STORAGETAGS := $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
||||
SECURITYTAGS ?= seccomp $(APPARMORTAG)
|
||||
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh)
|
||||
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) $(shell ./hack/sqlite_tag.sh)
|
||||
ifeq ($(shell uname -s),FreeBSD)
|
||||
# FreeBSD needs CNI until netavark is supported
|
||||
TAGS += cni
|
||||
|
@ -16,36 +16,35 @@ BUILDFLAGS := -tags "$(BUILDTAGS)"
|
|||
BUILDAH := buildah
|
||||
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
|
||||
SELINUXTYPE=container_runtime_exec_t
|
||||
AS ?= as
|
||||
STRIP ?= strip
|
||||
|
||||
GO := go
|
||||
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
|
||||
GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
|
||||
# test for go module support
|
||||
ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
|
||||
export GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor
|
||||
export GO_TEST=GO111MODULE=on $(GO) test -mod=vendor
|
||||
else
|
||||
NPROCS := $(shell nproc)
|
||||
export GO_BUILD=$(GO) build
|
||||
export GO_TEST=$(GO) test
|
||||
endif
|
||||
RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
|
||||
export GO_TEST=$(GO) test -parallel=$(NPROCS)
|
||||
RACEFLAGS ?= $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
|
||||
|
||||
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||
SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
|
||||
STATIC_STORAGETAGS = "containers_image_openpgp $(STORAGE_TAGS)"
|
||||
|
||||
# we get GNU make 3.x in MacOS build envs, which wants # to be escaped in
|
||||
# strings, while the 4.x we have on Linux doesn't. this is the documented
|
||||
# workaround
|
||||
COMMENT := \#
|
||||
CNI_COMMIT := $(shell sed -n 's;^$(COMMENT) github.com/containernetworking/cni \([^ \n]*\).*$$;\1;p' vendor/modules.txt)
|
||||
RUNC_COMMIT := $(shell sed -n 's;^$(COMMENT) github.com/opencontainers/runc \([^ \n]*\).*$$;\1;p' vendor/modules.txt)
|
||||
LIBSECCOMP_COMMIT := release-2.3
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
|
||||
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
|
||||
|
||||
# This isn't what we actually build; it's a superset, used for target
|
||||
# dependencies. Basically: all *.go and *.c files, except *_test.go,
|
||||
# and except anything in a dot subdirectory. If any of these files is
|
||||
# newer than our target (bin/buildah), a rebuild is triggered.
|
||||
SOURCES=$(shell find . -path './.*' -prune -o \( \( -name '*.go' -o -name '*.c' \) -a ! -name '*_test.go' \) -print)
|
||||
|
||||
LINTFLAGS ?=
|
||||
|
||||
|
@ -53,40 +52,27 @@ ifeq ($(BUILDDEBUG), 1)
|
|||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
# Managed by renovate.
|
||||
export GOLANGCI_LINT_VERSION := 2.1.0
|
||||
|
||||
# make all BUILDDEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: bin/buildah bin/imgtype bin/copy bin/tutorial docs
|
||||
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec bin/passwd docs
|
||||
|
||||
# Update nix/nixpkgs.json its latest stable commit
|
||||
.PHONY: nixpkgs
|
||||
nixpkgs:
|
||||
@nix run \
|
||||
-f channel:nixos-20.09 nix-prefetch-git \
|
||||
-c nix-prefetch-git \
|
||||
--no-deepClone \
|
||||
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
|
||||
|
||||
# Build statically linked binary
|
||||
.PHONY: static
|
||||
static:
|
||||
@nix build -f nix/
|
||||
mkdir -p ./bin
|
||||
cp -rfp ./result/bin/* ./bin/
|
||||
|
||||
bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint_amd64.gz
|
||||
bin/buildah: $(SOURCES) internal/mkcw/embed/entrypoint_amd64.gz
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
|
||||
test -z "${SELINUXOPT}" || chcon --verbose -t $(SELINUXTYPE) $@
|
||||
|
||||
ifneq ($(shell as --version | grep x86_64),)
|
||||
ifneq ($(shell $(AS) --version | grep x86_64),)
|
||||
internal/mkcw/embed/entrypoint_amd64.gz: internal/mkcw/embed/entrypoint_amd64
|
||||
gzip -k9nf $^
|
||||
|
||||
internal/mkcw/embed/entrypoint_amd64: internal/mkcw/embed/entrypoint_amd64.s
|
||||
$(AS) -o $(patsubst %.s,%.o,$^) $^
|
||||
$(LD) -o $@ $(patsubst %.s,%.o,$^)
|
||||
strip $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
|
||||
|
||||
|
@ -101,36 +87,39 @@ FREEBSD_CROSS_TARGETS := $(filter bin/buildah.freebsd.%,$(ALL_CROSS_TARGETS))
|
|||
.PHONY: cross
|
||||
cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) $(FREEBSD_CROSS_TARGETS)
|
||||
|
||||
bin/buildah.%:
|
||||
bin/buildah.%: $(SOURCES)
|
||||
mkdir -p ./bin
|
||||
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
|
||||
|
||||
bin/imgtype: $(SOURCES) tests/imgtype/imgtype.go
|
||||
bin/dumpspec: $(SOURCES)
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/dumpspec
|
||||
|
||||
bin/imgtype: $(SOURCES)
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go
|
||||
|
||||
bin/copy: $(SOURCES) tests/copy/copy.go
|
||||
bin/copy: $(SOURCES)
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/copy/copy.go
|
||||
|
||||
bin/tutorial: $(SOURCES) tests/tutorial/tutorial.go
|
||||
bin/tutorial: $(SOURCES)
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/tutorial/tutorial.go
|
||||
|
||||
bin/inet: tests/inet/inet.go
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/inet/inet.go
|
||||
|
||||
bin/passwd: tests/passwd/passwd.go
|
||||
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/passwd/passwd.go
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(RM) -r bin tests/testreport/testreport
|
||||
$(RM) -r bin tests/testreport/testreport tests/conformance/testdata/mount-targets/true
|
||||
$(MAKE) -C docs clean
|
||||
|
||||
.PHONY: docs
|
||||
docs: install.tools ## build the docs on the host
|
||||
$(MAKE) -C docs
|
||||
|
||||
# For vendoring to work right, the checkout directory must be such that our top
|
||||
# level is at $GOPATH/src/github.com/containers/buildah.
|
||||
.PHONY: gopath
|
||||
gopath:
|
||||
test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd)
|
||||
|
||||
codespell:
|
||||
codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L secon,passt,bu,uint,iff,od,erro -w
|
||||
codespell -w
|
||||
|
||||
.PHONY: validate
|
||||
validate: install.tools
|
||||
|
@ -142,25 +131,6 @@ validate: install.tools
|
|||
install.tools:
|
||||
$(MAKE) -C tests/tools
|
||||
|
||||
.PHONY: runc
|
||||
runc: gopath
|
||||
rm -rf ../../opencontainers/runc
|
||||
git clone https://github.com/opencontainers/runc ../../opencontainers/runc
|
||||
cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && $(GO) build -tags "$(STORAGETAGS) $(SECURITYTAGS)"
|
||||
ln -sf ../../opencontainers/runc/runc
|
||||
|
||||
.PHONY: install.libseccomp.sudo
|
||||
install.libseccomp.sudo: gopath
|
||||
rm -rf ../../seccomp/libseccomp
|
||||
git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp
|
||||
cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && sudo make install
|
||||
|
||||
.PHONY: install.cni.sudo
|
||||
install.cni.sudo: gopath
|
||||
rm -rf ../../containernetworking/plugins
|
||||
git clone https://github.com/containernetworking/plugins ../../containernetworking/plugins
|
||||
cd ../../containernetworking/plugins && ./build_linux.sh && sudo install -D -v -m755 -t /opt/cni/bin/ bin/*
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
install -d -m 755 $(DESTDIR)/$(BINDIR)
|
||||
|
@ -178,22 +148,20 @@ install.completions:
|
|||
install -m 755 -d $(DESTDIR)/$(BASHINSTALLDIR)
|
||||
install -m 644 contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah
|
||||
|
||||
.PHONY: install.runc
|
||||
install.runc:
|
||||
install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/
|
||||
|
||||
.PHONY: test-conformance
|
||||
test-conformance:
|
||||
test-conformance: tests/conformance/testdata/mount-targets/true
|
||||
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 60m ./tests/conformance
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration: install.tools
|
||||
./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/.
|
||||
cd tests; ./test_runner.sh
|
||||
|
||||
tests/testreport/testreport: tests/testreport/testreport.go
|
||||
$(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go
|
||||
|
||||
tests/conformance/testdata/mount-targets/true: tests/conformance/testdata/mount-targets/true.go
|
||||
$(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -o tests/conformance/testdata/mount-targets/true tests/conformance/testdata/mount-targets/true.go
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: tests/testreport/testreport
|
||||
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd | grep -v chroot | grep -v copier) -timeout 45m
|
||||
|
@ -203,20 +171,35 @@ test-unit: tests/testreport/testreport
|
|||
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor
|
||||
goversion=$(shell sed -e '/^go /!d' -e '/^go /s,.* ,,g' go.mod) ; \
|
||||
if test -d `$(GO) env GOCACHE` && test -w `$(GO) env GOCACHE` ; then \
|
||||
podman run --privileged --rm --env HOME=/root -v `$(GO) env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \
|
||||
else \
|
||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \
|
||||
fi
|
||||
|
||||
.PHONY: vendor
|
||||
vendor:
|
||||
GO111MODULE=on $(GO) mod tidy
|
||||
GO111MODULE=on $(GO) mod vendor
|
||||
GO111MODULE=on $(GO) mod verify
|
||||
$(GO) mod tidy
|
||||
$(GO) mod vendor
|
||||
$(GO) mod verify
|
||||
if test -n "$(strip $(shell $(GO) env GOTOOLCHAIN))"; then go mod edit -toolchain none ; fi
|
||||
|
||||
.PHONY: lint
|
||||
lint: install.tools
|
||||
./tests/tools/build/golangci-lint run $(LINTFLAGS)
|
||||
./tests/tools/build/golangci-lint run --tests=false $(LINTFLAGS)
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
.PHONY: rpm
|
||||
rpm:
|
||||
rpkg local
|
||||
rpm: ## Build rpm packages
|
||||
$(MAKE) -C rpm
|
||||
|
||||
# Remember that rpms install exec to /usr/bin/buildah while a `make install`
|
||||
# installs them to /usr/local/bin/buildah which is likely before. Always use
|
||||
# a full path to test installed buildah or you risk to call another executable.
|
||||
.PHONY: rpm-install
|
||||
rpm-install: package ## Install rpm packages
|
||||
$(call err_if_empty,PKG_MANAGER) -y install rpm/RPMS/*/*.rpm
|
||||
/usr/bin/buildah version
|
||||
|
|
23
OWNERS
23
OWNERS
|
@ -1,28 +1,25 @@
|
|||
approvers:
|
||||
- TomSweeneyRedHat
|
||||
- ashley-cui
|
||||
- cevich
|
||||
- baude
|
||||
- flouthoc
|
||||
- giuseppe
|
||||
- lsm5
|
||||
- Luap99
|
||||
- mheon
|
||||
- mtrmac
|
||||
- nalind
|
||||
- rhatdan
|
||||
- umohnani8
|
||||
- vrothberg
|
||||
reviewers:
|
||||
- QiWang19
|
||||
- TomSweeneyRedHat
|
||||
reviewers:
|
||||
- ashley-cui
|
||||
- baude
|
||||
- cevich
|
||||
- edsantiago
|
||||
- flouthoc
|
||||
- giuseppe
|
||||
- haircommander
|
||||
- jwhonce
|
||||
- Honny1
|
||||
- lsm5
|
||||
- Luap99
|
||||
- mheon
|
||||
- mrunalp
|
||||
- mtrmac
|
||||
- nalind
|
||||
- rhatdan
|
||||
- umohnani8
|
||||
- TomSweeneyRedHat
|
||||
- vrothberg
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
|
||||
|
||||
[](https://goreportcard.com/report/github.com/containers/buildah)
|
||||
[](https://www.bestpractices.dev/projects/10579)
|
||||
|
||||
|
||||
The Buildah package provides a command line tool that can be used to
|
||||
|
@ -20,6 +21,8 @@ The Buildah package provides a command line tool that can be used to
|
|||
|
||||
For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website!
|
||||
|
||||
**[Buildah Container Images](https://github.com/containers/image_build/blob/main/buildah/README.md)**
|
||||
|
||||
**[Buildah Demos](demos)**
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||

|
||||

|
||||
|
||||
# Buildah Roadmap
|
||||
|
||||
The Buildah development team reviews feature requests from its various stakeholders for consideration
|
||||
quarterly along with the Podman Development team. These features are then prioritized and the top
|
||||
features are then assigned to one or more engineers.
|
||||
|
||||
|
||||
## Future feature considerations
|
||||
|
||||
The following features are of general importantance to Buildah. While these features have no timeline
|
||||
associated with them yet, they will likely be on future quarterly milestones.
|
||||
|
||||
* Ongoing work around partial pull support (zstd:chunked)
|
||||
* Improved support for the BuildKit API.
|
||||
* Performance and stability improvements.
|
||||
* Reductions to the size of the Buildah binary.
|
||||
|
||||
## Milestones and commitments by quarter
|
||||
|
||||
This section is a historical account of what features were prioritized by quarter. Results of the prioritization will be added at start of each quarter (Jan, Apr, July, Oct).
|
||||
|
||||
### 2025 Q2 ####
|
||||
|
||||
#### Releases ####
|
||||
- [ ] Buildah 1.40
|
||||
|
||||
#### Features ####
|
||||
- [ ] Reduce binary size of Buildah
|
||||
- [ ] Additional Containerfile command options
|
||||
|
||||
#### CNCF ####
|
||||
- [ ] Add and adhere to Governance model
|
||||
- [ ] Update Maintainers file
|
||||
|
||||
### 2025 Q1 ####
|
||||
|
||||
#### Releases ####
|
||||
- [x] Buildah 1.39
|
||||
|
||||
#### Features ####
|
||||
- [x] Artifact add --options
|
||||
|
||||
#### CNCF ####
|
||||
- [x] Create Governance documentation
|
||||
- [x] Create Maintainers file
|
383
add.go
383
add.go
|
@ -2,6 +2,8 @@ package buildah
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -10,6 +12,7 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -18,18 +21,26 @@ import (
|
|||
|
||||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/internal/tmpdir"
|
||||
"github.com/containers/buildah/pkg/chrootuser"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/moby/sys/userns"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// AddAndCopyOptions holds options for add and copy commands.
|
||||
type AddAndCopyOptions struct {
|
||||
//Chmod sets the access permissions of the destination content.
|
||||
// Chmod sets the access permissions of the destination content.
|
||||
Chmod string
|
||||
// Chown is a spec for the user who should be given ownership over the
|
||||
// newly-added content, potentially overriding permissions which would
|
||||
|
@ -72,20 +83,77 @@ type AddAndCopyOptions struct {
|
|||
// Clear the sticky bit on items being copied. Has no effect on
|
||||
// archives being extracted, where the bit is always preserved.
|
||||
StripStickyBit bool
|
||||
// If not "", a directory containing a CA certificate (ending with
|
||||
// ".crt"), a client certificate (ending with ".cert") and a client
|
||||
// certificate key (ending with ".key") used when downloading sources
|
||||
// from locations protected with TLS.
|
||||
CertPath string
|
||||
// Allow downloading sources from HTTPS where TLS verification fails.
|
||||
InsecureSkipTLSVerify types.OptionalBool
|
||||
// MaxRetries is the maximum number of attempts we'll make to retrieve
|
||||
// contents from a remote location.
|
||||
MaxRetries int
|
||||
// RetryDelay is how long to wait before retrying attempts to retrieve
|
||||
// remote contents.
|
||||
RetryDelay time.Duration
|
||||
// Parents specifies that we should preserve either all of the parent
|
||||
// directories of source locations, or the ones which follow "/./" in
|
||||
// the source paths for source locations which include such a
|
||||
// component.
|
||||
Parents bool
|
||||
// Timestamp is a timestamp to override on all content as it is being read.
|
||||
Timestamp *time.Time
|
||||
// Link, when set to true, creates an independent layer containing the copied content
|
||||
// that sits on top of existing layers. This layer can be cached and reused
|
||||
// separately, and is not affected by filesystem changes from previous instructions.
|
||||
Link bool
|
||||
// BuildMetadata is consulted only when Link is true. Contains metadata used by
|
||||
// imagebuildah for cache evaluation of linked layers (inheritLabels, unsetAnnotations,
|
||||
// inheritAnnotations, newAnnotations). This field is internally managed and should
|
||||
// not be set by external API users.
|
||||
BuildMetadata string
|
||||
}
|
||||
|
||||
// sourceIsRemote returns true if "source" is a remote location.
|
||||
// gitURLFragmentSuffix matches fragments to use as Git reference and build
|
||||
// context from the Git repository e.g.
|
||||
//
|
||||
// github.com/containers/buildah.git
|
||||
// github.com/containers/buildah.git#main
|
||||
// github.com/containers/buildah.git#v1.35.0
|
||||
var gitURLFragmentSuffix = regexp.Delayed(`\.git(?:#.+)?$`)
|
||||
|
||||
// sourceIsGit returns true if "source" is a git location.
|
||||
func sourceIsGit(source string) bool {
|
||||
return isURL(source) && gitURLFragmentSuffix.MatchString(source)
|
||||
}
|
||||
|
||||
func isURL(url string) bool {
|
||||
return strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")
|
||||
}
|
||||
|
||||
// sourceIsRemote returns true if "source" is a remote location
|
||||
// and *not* a git repo. Certain github urls such as raw.github.* are allowed.
|
||||
func sourceIsRemote(source string) bool {
|
||||
return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
|
||||
return isURL(source) && !gitURLFragmentSuffix.MatchString(source)
|
||||
}
|
||||
|
||||
// getURL writes a tar archive containing the named content
|
||||
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error {
|
||||
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool, timestamp *time.Time) error {
|
||||
url, err := url.Parse(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response, err := http.Get(src)
|
||||
tlsClientConfig := &tls.Config{
|
||||
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
|
||||
}
|
||||
if err := tlsclientconfig.SetupCertificates(certPath, tlsClientConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
tlsClientConfig.InsecureSkipVerify = insecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
|
||||
tr := &http.Transport{TLSClientConfig: tlsClientConfig}
|
||||
httpClient := &http.Client{Transport: tr}
|
||||
response, err := httpClient.Get(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -101,15 +169,19 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
|
|||
name = path.Base(url.Path)
|
||||
}
|
||||
// If there's a date on the content, use it. If not, use the Unix epoch
|
||||
// for compatibility.
|
||||
// or a specified value for compatibility.
|
||||
date := time.Unix(0, 0).UTC()
|
||||
lastModified := response.Header.Get("Last-Modified")
|
||||
if lastModified != "" {
|
||||
d, err := time.Parse(time.RFC1123, lastModified)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing last-modified time: %w", err)
|
||||
if timestamp != nil {
|
||||
date = timestamp.UTC()
|
||||
} else {
|
||||
lastModified := response.Header.Get("Last-Modified")
|
||||
if lastModified != "" {
|
||||
d, err := time.Parse(time.RFC1123, lastModified)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing last-modified time %q: %w", lastModified, err)
|
||||
}
|
||||
date = d.UTC()
|
||||
}
|
||||
date = d
|
||||
}
|
||||
// Figure out the size of the content.
|
||||
size := response.ContentLength
|
||||
|
@ -147,7 +219,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
|
|||
uid = chown.UID
|
||||
gid = chown.GID
|
||||
}
|
||||
var mode int64 = 0600
|
||||
var mode int64 = 0o600
|
||||
if chmod != nil {
|
||||
mode = int64(*chmod)
|
||||
}
|
||||
|
@ -201,6 +273,37 @@ func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// globbedToGlobbable takes a pathname which might include the '[', *, or ?
|
||||
// characters, and converts it into a glob pattern that matches itself by
|
||||
// marking the '[' characters as _not_ the beginning of match ranges and
|
||||
// escaping the * and ? characters.
|
||||
func globbedToGlobbable(glob string) string {
|
||||
result := glob
|
||||
result = strings.ReplaceAll(result, "[", "[[]")
|
||||
result = strings.ReplaceAll(result, "?", "\\?")
|
||||
result = strings.ReplaceAll(result, "*", "\\*")
|
||||
return result
|
||||
}
|
||||
|
||||
// getParentsPrefixToRemoveAndParentsToSkip gets from the pattern the prefix before the "pivot point",
|
||||
// the location in the source path marked by the path component named "."
|
||||
// (i.e. where "/./" occurs in the path). And list of parents to skip.
|
||||
// In case "/./" is not present is returned "/".
|
||||
func getParentsPrefixToRemoveAndParentsToSkip(pattern string, contextDir string) (string, []string) {
|
||||
prefix, _, found := strings.Cut(strings.TrimPrefix(pattern, contextDir), "/./")
|
||||
if !found {
|
||||
return string(filepath.Separator), []string{}
|
||||
}
|
||||
prefix = strings.TrimPrefix(filepath.Clean(string(filepath.Separator)+prefix), string(filepath.Separator))
|
||||
out := []string{}
|
||||
parentPath := prefix
|
||||
for parentPath != "/" && parentPath != "." {
|
||||
out = append(out, parentPath)
|
||||
parentPath = filepath.Dir(parentPath)
|
||||
}
|
||||
return prefix, out
|
||||
}
|
||||
|
||||
// Add copies the contents of the specified sources into the container's root
|
||||
// filesystem, optionally extracting contents of local files that look like
|
||||
// non-empty archives.
|
||||
|
@ -233,18 +336,31 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}
|
||||
|
||||
// Figure out what sorts of sources we have.
|
||||
var localSources, remoteSources []string
|
||||
var localSources, remoteSources, gitSources []string
|
||||
for i, src := range sources {
|
||||
if src == "" {
|
||||
return errors.New("empty source location")
|
||||
}
|
||||
if sourceIsRemote(src) {
|
||||
remoteSources = append(remoteSources, src)
|
||||
continue
|
||||
}
|
||||
if sourceIsGit(src) {
|
||||
gitSources = append(gitSources, src)
|
||||
continue
|
||||
}
|
||||
if !filepath.IsAbs(src) && options.ContextDir == "" {
|
||||
sources[i] = filepath.Join(currentDir, src)
|
||||
}
|
||||
localSources = append(localSources, sources[i])
|
||||
}
|
||||
|
||||
// Treat git sources as a subset of remote sources
|
||||
// differentiating only in how we fetch the two later on.
|
||||
if len(gitSources) > 0 {
|
||||
remoteSources = append(remoteSources, gitSources...)
|
||||
}
|
||||
|
||||
// Check how many items our local source specs matched. Each spec
|
||||
// should have matched at least one item, otherwise we consider it an
|
||||
// error.
|
||||
|
@ -276,7 +392,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}
|
||||
numLocalSourceItems += len(localSourceStat.Globbed)
|
||||
}
|
||||
if numLocalSourceItems+len(remoteSources) == 0 {
|
||||
if numLocalSourceItems+len(remoteSources)+len(gitSources) == 0 {
|
||||
return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
|
||||
}
|
||||
|
||||
|
@ -333,6 +449,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
destCanBeFile = true
|
||||
}
|
||||
}
|
||||
if len(gitSources) > 0 {
|
||||
destMustBeDirectory = true
|
||||
}
|
||||
}
|
||||
|
||||
// We care if the destination either doesn't exist, or exists and is a
|
||||
|
@ -354,10 +473,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}
|
||||
|
||||
// if the destination is a directory that doesn't yet exist, let's copy it.
|
||||
newDestDirFound := false
|
||||
if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile {
|
||||
newDestDirFound = true
|
||||
}
|
||||
newDestDirFound := (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile
|
||||
|
||||
if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
|
||||
if destMustBeDirectory {
|
||||
|
@ -389,14 +505,73 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}
|
||||
destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
|
||||
|
||||
// Create the target directory if it doesn't exist yet.
|
||||
var putRoot, putDir, stagingDir string
|
||||
var createdDirs []string
|
||||
var latestTimestamp time.Time
|
||||
|
||||
mkdirOptions := copier.MkdirOptions{
|
||||
UIDMap: destUIDMap,
|
||||
GIDMap: destGIDMap,
|
||||
ChownNew: chownDirs,
|
||||
}
|
||||
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
|
||||
return fmt.Errorf("ensuring target directory exists: %w", err)
|
||||
|
||||
// If --link is specified, we create a staging directory to hold the content
|
||||
// that will then become an independent layer
|
||||
if options.Link {
|
||||
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting container directory for %q: %w", b.ContainerID, err)
|
||||
}
|
||||
|
||||
stagingDir, err = os.MkdirTemp(containerDir, "link-stage-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating staging directory for link %q: %w", b.ContainerID, err)
|
||||
}
|
||||
|
||||
putRoot = stagingDir
|
||||
|
||||
cleanDest := filepath.Clean(destination)
|
||||
|
||||
if strings.Contains(cleanDest, "..") {
|
||||
return fmt.Errorf("invalid destination path %q: contains path traversal", destination)
|
||||
}
|
||||
|
||||
if renameTarget != "" {
|
||||
putDir = filepath.Dir(filepath.Join(stagingDir, cleanDest))
|
||||
} else {
|
||||
putDir = filepath.Join(stagingDir, cleanDest)
|
||||
}
|
||||
|
||||
putDirAbs, err := filepath.Abs(putDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve absolute path: %w", err)
|
||||
}
|
||||
|
||||
stagingDirAbs, err := filepath.Abs(stagingDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve staging directory absolute path: %w", err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(putDirAbs, stagingDirAbs+string(os.PathSeparator)) && putDirAbs != stagingDirAbs {
|
||||
return fmt.Errorf("destination path %q escapes staging directory", destination)
|
||||
}
|
||||
if err := copier.Mkdir(putRoot, putDirAbs, mkdirOptions); err != nil {
|
||||
return fmt.Errorf("ensuring target directory exists: %w", err)
|
||||
}
|
||||
tempPath := putDir
|
||||
for tempPath != stagingDir && tempPath != filepath.Dir(tempPath) {
|
||||
if _, err := os.Stat(tempPath); err == nil {
|
||||
createdDirs = append(createdDirs, tempPath)
|
||||
}
|
||||
tempPath = filepath.Dir(tempPath)
|
||||
}
|
||||
} else {
|
||||
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
|
||||
return fmt.Errorf("ensuring target directory exists: %w", err)
|
||||
}
|
||||
|
||||
putRoot = extractDirectory
|
||||
putDir = extractDirectory
|
||||
}
|
||||
|
||||
// Copy each source in turn.
|
||||
|
@ -404,7 +579,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
var multiErr *multierror.Error
|
||||
var getErr, closeErr, renameErr, putErr error
|
||||
var wg sync.WaitGroup
|
||||
if sourceIsRemote(src) {
|
||||
if sourceIsRemote(src) || sourceIsGit(src) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
var srcDigest digest.Digest
|
||||
if options.Checksum != "" {
|
||||
|
@ -413,12 +588,48 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
return fmt.Errorf("invalid checksum flag: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest)
|
||||
pipeWriter.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
if sourceIsGit(src) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer pipeWriter.Close()
|
||||
var cloneDir, subdir string
|
||||
cloneDir, subdir, getErr = define.TempDirForURL(tmpdir.GetTempDir(), "", src)
|
||||
if getErr != nil {
|
||||
return
|
||||
}
|
||||
getOptions := copier.GetOptions{
|
||||
UIDMap: srcUIDMap,
|
||||
GIDMap: srcGIDMap,
|
||||
Excludes: options.Excludes,
|
||||
ExpandArchives: extract,
|
||||
ChownDirs: chownDirs,
|
||||
ChmodDirs: chmodDirsFiles,
|
||||
ChownFiles: chownFiles,
|
||||
ChmodFiles: chmodDirsFiles,
|
||||
StripSetuidBit: options.StripSetuidBit,
|
||||
StripSetgidBit: options.StripSetgidBit,
|
||||
StripStickyBit: options.StripStickyBit,
|
||||
Timestamp: options.Timestamp,
|
||||
}
|
||||
writer := io.WriteCloser(pipeWriter)
|
||||
repositoryDir := filepath.Join(cloneDir, subdir)
|
||||
getErr = copier.Get(repositoryDir, repositoryDir, getOptions, []string{"."}, writer)
|
||||
}()
|
||||
} else {
|
||||
go func() {
|
||||
getErr = retry.IfNecessary(context.TODO(), func() error {
|
||||
return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify, options.Timestamp)
|
||||
}, &retry.Options{
|
||||
MaxRetry: options.MaxRetries,
|
||||
Delay: options.RetryDelay,
|
||||
})
|
||||
pipeWriter.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
b.ContentDigester.Start("")
|
||||
|
@ -437,9 +648,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
ChmodDirs: nil,
|
||||
ChownFiles: nil,
|
||||
ChmodFiles: nil,
|
||||
IgnoreDevices: runningInUserNS(),
|
||||
IgnoreDevices: userns.RunningInUserNS(),
|
||||
}
|
||||
putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
|
||||
putErr = copier.Put(putRoot, putDir, putOptions, io.TeeReader(pipeReader, hasher))
|
||||
}
|
||||
hashCloser.Close()
|
||||
pipeReader.Close()
|
||||
|
@ -477,30 +688,29 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
if localSourceStat == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through every item that matched the glob.
|
||||
itemsCopied := 0
|
||||
for _, glob := range localSourceStat.Globbed {
|
||||
rel := glob
|
||||
if filepath.IsAbs(glob) {
|
||||
if rel, err = filepath.Rel(contextDir, glob); err != nil {
|
||||
return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
|
||||
for _, globbed := range localSourceStat.Globbed {
|
||||
rel := globbed
|
||||
if filepath.IsAbs(globbed) {
|
||||
if rel, err = filepath.Rel(contextDir, globbed); err != nil {
|
||||
return fmt.Errorf("computing path of %q relative to %q: %w", globbed, contextDir, err)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
|
||||
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", globbed, contextDir)
|
||||
}
|
||||
// Check for dockerignore-style exclusion of this item.
|
||||
if rel != "." {
|
||||
excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
|
||||
excluded, err := pm.Matches(filepath.ToSlash(rel)) //nolint:staticcheck
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err)
|
||||
return fmt.Errorf("checking if %q(%q) is excluded: %w", globbed, rel, err)
|
||||
}
|
||||
if excluded {
|
||||
// non-directories that are excluded are excluded, no question, but
|
||||
// directories can only be skipped if we don't have to allow for the
|
||||
// possibility of finding things to include under them
|
||||
globInfo := localSourceStat.Results[glob]
|
||||
globInfo := localSourceStat.Results[globbed]
|
||||
if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) {
|
||||
continue
|
||||
}
|
||||
|
@ -517,7 +727,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
// due to potentially not having anything in the tarstream that we passed.
|
||||
itemsCopied++
|
||||
}
|
||||
st := localSourceStat.Results[glob]
|
||||
st := localSourceStat.Results[globbed]
|
||||
if options.Link && st.ModTime.After(latestTimestamp) {
|
||||
latestTimestamp = st.ModTime
|
||||
}
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
@ -530,7 +743,26 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
return false, false, nil
|
||||
})
|
||||
}
|
||||
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||
|
||||
if options.Parents {
|
||||
parentsPrefixToRemove, parentsToSkip := getParentsPrefixToRemoveAndParentsToSkip(src, options.ContextDir)
|
||||
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||
if slices.Contains(parentsToSkip, hdr.Name) && hdr.Typeflag == tar.TypeDir {
|
||||
return true, false, nil
|
||||
}
|
||||
hdr.Name = strings.TrimPrefix(hdr.Name, parentsPrefixToRemove)
|
||||
hdr.Name = strings.TrimPrefix(hdr.Name, "/")
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, parentsPrefixToRemove)
|
||||
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "/")
|
||||
}
|
||||
if hdr.Name == "" {
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
}
|
||||
writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) {
|
||||
itemsCopied++
|
||||
return false, false, nil
|
||||
})
|
||||
|
@ -546,8 +778,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
StripSetuidBit: options.StripSetuidBit,
|
||||
StripSetgidBit: options.StripSetgidBit,
|
||||
StripStickyBit: options.StripStickyBit,
|
||||
Parents: options.Parents,
|
||||
Timestamp: options.Timestamp,
|
||||
}
|
||||
getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
|
||||
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
|
||||
closeErr = writer.Close()
|
||||
if renameTarget != "" && renamedItems > 1 {
|
||||
renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
|
||||
|
@ -578,14 +812,15 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
ChmodDirs: nil,
|
||||
ChownFiles: nil,
|
||||
ChmodFiles: nil,
|
||||
IgnoreDevices: runningInUserNS(),
|
||||
IgnoreDevices: userns.RunningInUserNS(),
|
||||
}
|
||||
putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
|
||||
putErr = copier.Put(putRoot, putDir, putOptions, io.TeeReader(pipeReader, hasher))
|
||||
}
|
||||
hashCloser.Close()
|
||||
pipeReader.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
if getErr != nil {
|
||||
getErr = fmt.Errorf("reading %q: %w", src, getErr)
|
||||
|
@ -615,6 +850,58 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
|
||||
}
|
||||
}
|
||||
|
||||
if options.Link {
|
||||
if !latestTimestamp.IsZero() {
|
||||
for _, dir := range createdDirs {
|
||||
if err := os.Chtimes(dir, latestTimestamp, latestTimestamp); err != nil {
|
||||
logrus.Warnf("failed to set timestamp on directory %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
var created time.Time
|
||||
if options.Timestamp != nil {
|
||||
created = *options.Timestamp
|
||||
} else if !latestTimestamp.IsZero() {
|
||||
created = latestTimestamp
|
||||
} else {
|
||||
created = time.Unix(0, 0).UTC()
|
||||
}
|
||||
|
||||
command := "ADD"
|
||||
if !extract {
|
||||
command = "COPY"
|
||||
}
|
||||
|
||||
contentType, digest := b.ContentDigester.Digest()
|
||||
summary := contentType
|
||||
if digest != "" {
|
||||
if summary != "" {
|
||||
summary = summary + ":"
|
||||
}
|
||||
summary = summary + digest.Encoded()
|
||||
logrus.Debugf("added content from --link %s", summary)
|
||||
}
|
||||
|
||||
createdBy := "/bin/sh -c #(nop) " + command + " --link " + summary + " in " + destination + " " + options.BuildMetadata
|
||||
history := v1.History{
|
||||
Created: &created,
|
||||
CreatedBy: createdBy,
|
||||
Comment: b.HistoryComment(),
|
||||
}
|
||||
|
||||
linkedLayer := LinkedLayer{
|
||||
History: history,
|
||||
BlobPath: stagingDir,
|
||||
}
|
||||
|
||||
b.AppendedLinkedLayers = append(b.AppendedLinkedLayers, linkedLayer)
|
||||
|
||||
if err := b.Save(); err != nil {
|
||||
return fmt.Errorf("saving builder state after queuing linked layer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -640,7 +927,6 @@ func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, st
|
|||
} else {
|
||||
u.AdditionalGids = groups
|
||||
}
|
||||
|
||||
}
|
||||
return u, homeDir, err
|
||||
}
|
||||
|
@ -691,8 +977,8 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3
|
|||
return owner.UID, owner.GID, nil
|
||||
}
|
||||
|
||||
// EnsureContainerPathAs creates the specified directory owned by USER
|
||||
// with the file mode set to MODE.
|
||||
// EnsureContainerPathAs creates the specified directory if it doesn't exist,
|
||||
// setting a newly-created directory's owner to USER and its permissions to MODE.
|
||||
func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
|
@ -722,5 +1008,4 @@ func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) er
|
|||
GIDMap: destGIDMap,
|
||||
}
|
||||
return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts)
|
||||
|
||||
}
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package buildah
|
||||
|
||||
func runningInUserNS() bool {
|
||||
return false
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
)
|
||||
|
||||
func runningInUserNS() bool {
|
||||
return userns.RunningInUserNS()
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package bind
|
||||
|
||||
|
@ -8,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/buildah/util"
|
||||
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -49,7 +48,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("checking permissions on %q: %w", bundlePath, err)
|
||||
}
|
||||
if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil {
|
||||
if err = os.Chmod(bundlePath, info.Mode()|0o111); err != nil {
|
||||
return nil, fmt.Errorf("loosening permissions on %q: %w", bundlePath, err)
|
||||
}
|
||||
|
||||
|
@ -116,7 +115,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
// other unprivileged users outside of containers, shouldn't be able to
|
||||
// access.
|
||||
mnt := filepath.Join(bundlePath, "mnt")
|
||||
if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
|
||||
if err = idtools.MkdirAndChown(mnt, 0o100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
|
||||
return unmountAll, fmt.Errorf("creating %q owned by the container's root user: %w", mnt, err)
|
||||
}
|
||||
|
||||
|
@ -129,7 +128,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
|
||||
// Create a bind mount for the root filesystem and add it to the list.
|
||||
rootfs := filepath.Join(mnt, "rootfs")
|
||||
if err = os.Mkdir(rootfs, 0000); err != nil {
|
||||
if err = os.Mkdir(rootfs, 0o000); err != nil {
|
||||
return unmountAll, fmt.Errorf("creating directory %q: %w", rootfs, err)
|
||||
}
|
||||
if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
|
||||
|
@ -160,13 +159,13 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
if info.IsDir() {
|
||||
// If the source is a directory, make one to use as the
|
||||
// mount target.
|
||||
if err = os.Mkdir(stage, 0000); err != nil {
|
||||
if err = os.Mkdir(stage, 0o000); err != nil {
|
||||
return unmountAll, fmt.Errorf("creating directory %q: %w", stage, err)
|
||||
}
|
||||
} else {
|
||||
// If the source is not a directory, create an empty
|
||||
// file to use as the mount target.
|
||||
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000)
|
||||
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0o000)
|
||||
if err != nil {
|
||||
return unmountAll, fmt.Errorf("creating file %q: %w", stage, err)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build !linux
|
||||
//go:build !linux
|
||||
|
||||
package bind
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package bind
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo btrfs_noversion
|
||||
fi
|
62
buildah.go
62
buildah.go
|
@ -91,7 +91,7 @@ type Builder struct {
|
|||
// Logger is the logrus logger to write log messages with
|
||||
Logger *logrus.Logger `json:"-"`
|
||||
|
||||
// Args define variables that users can pass at build-time to the builder
|
||||
// Args define variables that users can pass at build-time to the builder.
|
||||
Args map[string]string
|
||||
// Type is used to help identify a build container's metadata. It
|
||||
// should not be modified.
|
||||
|
@ -118,7 +118,7 @@ type Builder struct {
|
|||
// MountPoint is the last location where the container's root
|
||||
// filesystem was mounted. It should not be modified.
|
||||
MountPoint string `json:"mountpoint,omitempty"`
|
||||
// ProcessLabel is the SELinux process label associated with the container
|
||||
// ProcessLabel is the SELinux process label to use during subsequent Run() calls.
|
||||
ProcessLabel string `json:"process-label,omitempty"`
|
||||
// MountLabel is the SELinux mount label associated with the container
|
||||
MountLabel string `json:"mount-label,omitempty"`
|
||||
|
@ -139,7 +139,7 @@ type Builder struct {
|
|||
|
||||
// Isolation controls how we handle "RUN" statements and the Run() method.
|
||||
Isolation define.Isolation
|
||||
// NamespaceOptions controls how we set up the namespaces for processes that we run in the container.
|
||||
// NamespaceOptions controls how we set up the namespaces for processes that we Run().
|
||||
NamespaceOptions define.NamespaceOptions
|
||||
// ConfigureNetwork controls whether or not network interfaces and
|
||||
// routing are configured for a new network namespace (i.e., when not
|
||||
|
@ -157,11 +157,11 @@ type Builder struct {
|
|||
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
|
||||
NetworkInterface nettypes.ContainerNetwork `json:"-"`
|
||||
|
||||
// GroupAdd is a list of groups to add to the primary process within
|
||||
// the container. 'keep-groups' allows container processes to use
|
||||
// supplementary groups.
|
||||
// GroupAdd is a list of groups to add to the primary process when Run() is
|
||||
// called. The magic 'keep-groups' value indicates that the process should
|
||||
// be allowed to inherit the current set of supplementary groups.
|
||||
GroupAdd []string
|
||||
// ID mapping options to use when running processes in the container with non-host user namespaces.
|
||||
// ID mapping options to use when running processes with non-host user namespaces.
|
||||
IDMappingOptions define.IDMappingOptions
|
||||
// Capabilities is a list of capabilities to use when running commands in the container.
|
||||
Capabilities []string
|
||||
|
@ -177,14 +177,28 @@ type Builder struct {
|
|||
CommonBuildOpts *define.CommonBuildOptions
|
||||
// TopLayer is the top layer of the image
|
||||
TopLayer string
|
||||
// Format for the build Image
|
||||
// Format to use for a container image we eventually commit, when we do.
|
||||
Format string
|
||||
// TempVolumes are temporary mount points created during container runs
|
||||
// TempVolumes are temporary mount points created during Run() calls.
|
||||
// Deprecated: do not use.
|
||||
TempVolumes map[string]bool
|
||||
// ContentDigester counts the digest of all Add()ed content
|
||||
// ContentDigester counts the digest of all Add()ed content since it was
|
||||
// last restarted.
|
||||
ContentDigester CompositeDigester
|
||||
// Devices are the additional devices to add to the containers
|
||||
// Devices are parsed additional devices to provide to Run() calls.
|
||||
Devices define.ContainerDevices
|
||||
// DeviceSpecs are unparsed additional devices to provide to Run() calls.
|
||||
DeviceSpecs []string
|
||||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
// PrependedLinkedLayers and AppendedLinkedLayers are combinations of
|
||||
// history entries and locations of either directory trees (if
|
||||
// directories, per os.Stat()) or uncompressed layer blobs which should
|
||||
// be added to the image at commit-time. The order of these relative
|
||||
// to PrependedEmptyLayers and AppendedEmptyLayers in the committed
|
||||
// image is not guaranteed.
|
||||
PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer
|
||||
}
|
||||
|
||||
// BuilderInfo are used as objects to display container information
|
||||
|
@ -215,6 +229,8 @@ type BuilderInfo struct {
|
|||
IDMappingOptions define.IDMappingOptions
|
||||
History []v1.History
|
||||
Devices define.ContainerDevices
|
||||
DeviceSpecs []string
|
||||
CDIConfigDir string
|
||||
}
|
||||
|
||||
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
|
||||
|
@ -251,6 +267,8 @@ func GetBuildInfo(b *Builder) BuilderInfo {
|
|||
Capabilities: b.Capabilities,
|
||||
History: history,
|
||||
Devices: b.Devices,
|
||||
DeviceSpecs: b.DeviceSpecs,
|
||||
CDIConfigDir: b.CDIConfigDir,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -328,13 +346,15 @@ type BuilderOptions struct {
|
|||
// ID mapping options to use if we're setting up our own user namespace.
|
||||
IDMappingOptions *define.IDMappingOptions
|
||||
// Capabilities is a list of capabilities to use when
|
||||
// running commands in the container.
|
||||
// running commands for Run().
|
||||
Capabilities []string
|
||||
CommonBuildOpts *define.CommonBuildOptions
|
||||
// Format for the container image
|
||||
// Format to use for a container image we eventually commit, when we do.
|
||||
Format string
|
||||
// Devices are the additional devices to add to the containers
|
||||
// Devices are additional parsed devices to provide for Run() calls.
|
||||
Devices define.ContainerDevices
|
||||
// DeviceSpecs are additional unparsed devices to provide for Run() calls.
|
||||
DeviceSpecs []string
|
||||
// DefaultEnv is deprecated and ignored.
|
||||
DefaultEnv []string
|
||||
// MaxPullRetries is the maximum number of attempts we'll make to pull
|
||||
|
@ -345,9 +365,9 @@ type BuilderOptions struct {
|
|||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
// ProcessLabel is the SELinux process label associated with the container
|
||||
// ProcessLabel is the SELinux process label associated with commands we Run()
|
||||
ProcessLabel string
|
||||
// MountLabel is the SELinux mount label associated with the container
|
||||
// MountLabel is the SELinux mount label associated with the working container
|
||||
MountLabel string
|
||||
// PreserveBaseImageAnns indicates that we should preserve base
|
||||
// image information (Annotations) that are present in our base image,
|
||||
|
@ -355,6 +375,14 @@ type BuilderOptions struct {
|
|||
// itself. Useful as an internal implementation detail of multistage
|
||||
// builds, and does not need to be set by most callers.
|
||||
PreserveBaseImageAnns bool
|
||||
// CDIConfigDir is the location of CDI configuration files, if the files in
|
||||
// the default configuration locations shouldn't be used.
|
||||
CDIConfigDir string
|
||||
// CompatScratchConfig controls whether a "scratch" image is created
|
||||
// with a truly empty configuration, as would have happened in the past
|
||||
// (when set to true), or with a minimal initial configuration which
|
||||
// has a working directory set in it.
|
||||
CompatScratchConfig types.OptionalBool
|
||||
}
|
||||
|
||||
// ImportOptions are used to initialize a Builder from an existing container
|
||||
|
@ -548,7 +576,7 @@ func (b *Builder) Save() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
|
||||
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0o600); err != nil {
|
||||
return fmt.Errorf("saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
imagetypes "github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -13,6 +14,11 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testSystemContext = imagetypes.SystemContext{
|
||||
SignaturePolicyPath: "tests/policy.json",
|
||||
SystemRegistriesConfPath: "tests/registries.conf",
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var logLevel string
|
||||
debug := false
|
||||
|
@ -34,6 +40,12 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestOpenBuilderCommonBuildOpts(t *testing.T) {
|
||||
// This test cannot be parallelized as this uses NewBuilder()
|
||||
// which eventually and indirectly accesses a global variable
|
||||
// defined in `go-selinux`, this must be fixed at `go-selinux`
|
||||
// or builder must enable sometime of locking mechanism i.e if
|
||||
// routine is creating Builder other's must wait for it.
|
||||
// Tracked here: https://github.com/containers/buildah/issues/5967
|
||||
ctx := context.TODO()
|
||||
store, err := storage.GetStore(types.StoreOptions{
|
||||
RunRoot: t.TempDir(),
|
||||
|
@ -68,7 +80,8 @@ func TestOpenBuilderCommonBuildOpts(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NotNil(t, container)
|
||||
b, err = ImportBuilder(ctx, store, ImportOptions{
|
||||
Container: container.ID,
|
||||
Container: container.ID,
|
||||
SignaturePolicyPath: testSystemContext.SignaturePolicyPath,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b.CommonBuildOpts)
|
||||
|
|
467
changelog.txt
467
changelog.txt
|
@ -1,3 +1,470 @@
|
|||
- Changelog for v1.40.0 (2025-04-17)
|
||||
* Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0
|
||||
* fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
|
||||
* fix(deps): update module github.com/containers/storage to v1.58.0
|
||||
* cirrus: make Total Success wait for rootless integration
|
||||
* chroot: use symbolic names when complaining about mount() errors
|
||||
* cli: hide the `completion` command instead of disabling it outright
|
||||
* Document rw and src options for --mount flag in buildah-run(1)
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.21.0
|
||||
* build: add support for inherit-labels
|
||||
* chore(deps): update dependency golangci/golangci-lint to v2.1.0
|
||||
* .github: check_cirrus_cron work around github bug
|
||||
* stage_executor,getCreatedBy: expand buildArgs for sources correctly
|
||||
* Add a link to project governance and MAINTAINERS file
|
||||
* fix(deps): update github.com/containers/storage digest to b1d1b45
|
||||
* generateHostname: simplify
|
||||
* Use maps.Copy
|
||||
* Use slices.Concat
|
||||
* Use slices.Clone
|
||||
* Use slices.Contains
|
||||
* Use for range over integers
|
||||
* tests/testreport: don't copy os.Environ
|
||||
* Use any instead of interface{}
|
||||
* ci: add golangci-lint run with --tests=false
|
||||
* ci: add nolintlint, fix found issues
|
||||
* copier: rm nolint:unparam annotation
|
||||
* .golangci.yml: add unused linter
|
||||
* chroot: fix unused warnings
|
||||
* copier: fix unused warnings
|
||||
* tests/conformance: fix unused warning
|
||||
* ci: switch to golangci-lint v2
|
||||
* internal/mkcw: disable ST1003 warnings
|
||||
* tests/conformance: do not double import (fix ST1019)
|
||||
* cmd/buildah: don't double import (fix ST1019)
|
||||
* Do not capitalize error strings
|
||||
* cmd/buildah: do not capitalize error strings
|
||||
* tests/conformance: fix QF1012 warnings
|
||||
* tests/serve: fix QF1012 warning
|
||||
* Use strings.ReplaceAll to fix QF1004 warnings
|
||||
* Use switch to fix QF1003 warnings
|
||||
* Apply De Morgan's law to fix QF1001 warnings
|
||||
* Fix QF1007 staticcheck warnings
|
||||
* imagebuildah: fix revive warning
|
||||
* Rename max variable
|
||||
* tests/tools: install lint from binary, use renovate
|
||||
* fix(deps): update module github.com/containernetworking/cni to v1.3.0
|
||||
* Update Buildah issue template to new version and support podman build
|
||||
* fix(deps): update module golang.org/x/crypto to v0.37.0
|
||||
* stage_executor: reset platform in systemcontext for stages
|
||||
* fix(deps): update github.com/opencontainers/runtime-tools digest to 260e151
|
||||
* cmd/buildah: rm unused containerOutputUsingTemplate
|
||||
* cmd/buildah: rm unused getDateAndDigestAndSize
|
||||
* build: return ExecErrorCodeGeneric when git operation fails
|
||||
* add: report error while creating dir for URL source.
|
||||
* createPlatformContainer: drop MS_REMOUNT|MS_BIND
|
||||
* fix(deps): update module github.com/docker/docker to v28.0.3+incompatible
|
||||
* fix: bats won't fail on ! without cleverness
|
||||
* feat: use HistoryTimestamp, if set, for oci-archive entries
|
||||
* Allow extendedGlob to work with Windows paths
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.20.2
|
||||
* fix(deps): update github.com/openshift/imagebuilder digest to e87e4e1
|
||||
* fix(deps): update module github.com/docker/docker to v28.0.2+incompatible
|
||||
* fix(deps): update module tags.cncf.io/container-device-interface to v1.0.1
|
||||
* chore(deps): update dependency containers/automation_images to v20250324
|
||||
* vendor: update github.com/opencontainers/selinux to v1.12.0
|
||||
* replace deprecated selinux/label calls
|
||||
* vendor: bump c/common to dbeb17e40c80
|
||||
* Use builtin arg defaults from imagebuilder
|
||||
* linux: accept unmask paths as glob values
|
||||
* vendor: update containers/common
|
||||
* Add --parents option for COPY in Dockerfiles
|
||||
* fix(deps): update module github.com/opencontainers/runc to v1.2.6
|
||||
* update go.sum from the previous commit
|
||||
* fix(deps): update module tags.cncf.io/container-device-interface to v1
|
||||
* chore(deps): update module golang.org/x/net to v0.36.0 [security]
|
||||
* packit: remove f40 from copr builds
|
||||
* cirrus: update to go 1.23 image
|
||||
* vendor bump to golang.org/x/crypto v0.36.0
|
||||
* cirrus: update PRIOR_FEDORA comment
|
||||
* github: remove cirrus rerun action
|
||||
* fix(deps): update module github.com/containers/common to v0.62.2
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.34.2
|
||||
* fix: close files properly when BuildDockerfiles exits
|
||||
* fix(deps): update module github.com/containers/storage to v1.57.2
|
||||
* stage_executor: history should include heredoc summary correctly
|
||||
* fix(deps): update module github.com/containers/common to v0.62.1
|
||||
* github: disable cron rerun action
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.20.1
|
||||
* internal/mkcw.Archive(): use github.com/containers/storage/pkg/ioutils
|
||||
* [skip-ci] TMT: system tests
|
||||
* buildah-build.1.md: secret examples
|
||||
* fix(deps): update github.com/containers/luksy digest to 40bd943
|
||||
* fix(deps): update module github.com/opencontainers/image-spec to v1.1.1
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.34.1
|
||||
* Use UnparsedInstance.Manifest instead of ImageSource.GetManifest
|
||||
* fix(deps): update module github.com/opencontainers/runtime-spec to v1.2.1
|
||||
* tests/conformance/testdata/Dockerfile.add: update some URLs
|
||||
* Vendor imagebuilder
|
||||
* Fix source of OS, architecture and variant
|
||||
* chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
|
||||
* fix(deps): update module tags.cncf.io/container-device-interface to v0.8.1
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.20.0
|
||||
* chroot createPlatformContainer: use MS_REMOUNT
|
||||
* conformance: make TestCommit and TestConformance parallel
|
||||
* cirrus: reduce task timeout
|
||||
* mkcw: mkcw_check_image use bats run_with_log
|
||||
* test: use /tmp as TMPDIR
|
||||
* heredoc: create temp subdirs for each build
|
||||
* test: heredoc remove python dependency from test
|
||||
* Support the containers.conf container_name_as_hostname option
|
||||
* fix(deps): update module github.com/opencontainers/runc to v1.2.5
|
||||
* fix(deps): update module github.com/spf13/cobra to v1.9.0
|
||||
* .cirrus: use more cores for smoke
|
||||
* Switch to the CNCF Code of Conduct
|
||||
* .cirrus: bump ci resources
|
||||
* fix(deps): update module golang.org/x/crypto to v0.33.0
|
||||
* Distinguish --mount=type=cache locations by ownership, too
|
||||
* fix(deps): update module golang.org/x/term to v0.29.0
|
||||
* .cirrus: run -race only on non-PR branch
|
||||
* unit: deparallize some tests
|
||||
* .cirrus: use multiple cpu for unit tests
|
||||
* Makefile: use -parallel for go test
|
||||
* unit_test: use Parallel test where possible
|
||||
* Update module golang.org/x/sys to v0.30.0
|
||||
* Update module golang.org/x/sync to v0.11.0
|
||||
* Update dependency containers/automation_images to v20250131
|
||||
* Bump to Buildah v1.40.0-dev
|
||||
|
||||
- Changelog for v1.39.0 (2025-01-31)
|
||||
* Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
|
||||
* Update module github.com/containers/storage to v1.57.0
|
||||
* CI, .cirrus: parallelize containerized integration
|
||||
* ed's comment: cleanup
|
||||
* use seperate blobinfocache for flaky test
|
||||
* bump CI VMs to 4 CPUs (was: 2) for integration tests
|
||||
* cleanup, debug, and disable parallel in blobcache tests
|
||||
* bats tests - parallelize
|
||||
* pkg/overlay: cleanups
|
||||
* RPM: include check section to silence rpmlint
|
||||
* RPM: use default gobuild macro on RHEL
|
||||
* tests: remove masked /sys/dev/block check
|
||||
* vendor to latest c/{common,image,storage}
|
||||
* build, run: record hash or digest in image history
|
||||
* Accept image names as sources for cache mounts
|
||||
* Run(): always clean up options.ExternalImageMounts
|
||||
* refactor: replace golang.org/x/exp with stdlib
|
||||
* Update to c/image @main
|
||||
* fix broken doc link
|
||||
* run_freebsd.go: only import runtime-spec once
|
||||
* fix(deps): update module github.com/docker/docker to v27.5.1+incompatible
|
||||
* bump github.com/vbatts/tar-split
|
||||
* Add more checks to the --mount flag parsing logic
|
||||
* chroot mount flags integration test: copy binaries
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.19.0
|
||||
* relabel(): correct a misleading parameter name
|
||||
* Fix TOCTOU error when bind and cache mounts use "src" values
|
||||
* define.TempDirForURL(): always use an intermediate subdirectory
|
||||
* internal/volume.GetBindMount(): discard writes in bind mounts
|
||||
* pkg/overlay: add a MountLabel flag to Options
|
||||
* pkg/overlay: add a ForceMount flag to Options
|
||||
* Add internal/volumes.bindFromChroot()
|
||||
* Add an internal/open package
|
||||
* fix(deps): update module github.com/containers/common to v0.61.1
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.33.1
|
||||
* [CI:DOCS] Touch up changelogs
|
||||
* fix(deps): update module github.com/docker/docker to v27.5.0+incompatible
|
||||
* copy-preserving-extended-attributes: use a different base image
|
||||
* fix(deps): update github.com/containers/luksy digest to a3a812d
|
||||
* chore(deps): update module golang.org/x/net to v0.33.0 [security]
|
||||
* fix(deps): update module golang.org/x/crypto to v0.32.0
|
||||
* New VM Images
|
||||
* fix(deps): update module github.com/opencontainers/runc to v1.2.4
|
||||
* fix(deps): update module github.com/docker/docker to v27.4.1+incompatible
|
||||
* fix(deps): update module github.com/containers/ocicrypt to v1.2.1
|
||||
* Add support for --security-opt mask and unmask
|
||||
* Allow cache mounts to be stages or additional build contexts
|
||||
* [skip-ci] RPM: cleanup changelog conditionals
|
||||
* fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.6
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.18.2
|
||||
* Fix an error message in the chroot unit test
|
||||
* copier: use .PAXRecords instead of .Xattrs
|
||||
* chroot: on Linux, try to pivot_root before falling back to chroot
|
||||
* manifest add: add --artifact-annotation
|
||||
* Add context to an error message
|
||||
* Update module golang.org/x/crypto to v0.31.0
|
||||
* Update module github.com/opencontainers/runc to v1.2.3
|
||||
* Update module github.com/docker/docker to v27.4.0+incompatible
|
||||
* Update module github.com/cyphar/filepath-securejoin to v0.3.5
|
||||
* CI: don't build a binary in the unit tests task
|
||||
* CI: use /tmp for $GOCACHE
|
||||
* CI: remove dependencies on the cross-build task
|
||||
* CI: run cross-compile task with make -j
|
||||
* Update module github.com/docker/docker to v27.4.0-rc.4+incompatible
|
||||
* Update module github.com/moby/buildkit to v0.18.1
|
||||
* Update module golang.org/x/crypto to v0.30.0
|
||||
* Update golang.org/x/exp digest to 2d47ceb
|
||||
* Update github.com/opencontainers/runtime-tools digest to f7e3563
|
||||
* [skip-ci] Packit: remove rhel copr build jobs
|
||||
* [skip-ci] Packit: switch to fedora-all for copr
|
||||
* Update module github.com/stretchr/testify to v1.10.0
|
||||
* Update module github.com/moby/buildkit to v0.17.2
|
||||
* Makefile: use `find` to detect source files
|
||||
* Tests: make _prefetch() parallel-safe
|
||||
* Update module github.com/opencontainers/runc to v1.2.2
|
||||
* executor: allow to specify --no-pivot-root
|
||||
* Update module github.com/moby/sys/capability to v0.4.0
|
||||
* Makefile: mv codespell config to .codespellrc
|
||||
* Fix some codespell errors
|
||||
* Makefile,install.md: rm gopath stuff
|
||||
* Makefile: rm targets working on ..
|
||||
* build: rm exclude_graphdriver_devicemapper tag
|
||||
* Makefile: rm unused var
|
||||
* Finish updating to go 1.22
|
||||
* CI VMs: bump again
|
||||
* Bump to Buidah v1.39.0-dev
|
||||
* stage_executor: set avoidLookingCache only if mounting stage
|
||||
* imagebuildah: additionalContext is not a local built stage
|
||||
|
||||
- Changelog for v1.38.0 (2024-11-08)
|
||||
* Bump to c/common v0.61.0, c/image v5.33.0, c/storage v1.56.0
|
||||
* fix(deps): update module golang.org/x/crypto to v0.29.0
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.17.1
|
||||
* fix(deps): update module github.com/containers/storage to v1.56.0
|
||||
* tests: skip two ulimit tests
|
||||
* CI VMs: bump f40 -> f41
|
||||
* tests/tools: rebuild tools when we change versions
|
||||
* tests/tools: update golangci-lint to v1.61.0
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.17.0
|
||||
* Handle RUN --mount with relative targets and no configured workdir
|
||||
* tests: bud: make parallel-safe
|
||||
* fix(deps): update module github.com/opencontainers/runc to v1.2.1
|
||||
* fix(deps): update golang.org/x/exp digest to f66d83c
|
||||
* fix(deps): update github.com/opencontainers/runtime-tools digest to 6c9570a
|
||||
* tests: blobcache: use unique image name
|
||||
* tests: sbom: never write to cwd
|
||||
* tests: mkcw: bug fixes, refactor
|
||||
* deps: bump runc to v1.2.0
|
||||
* deps: switch to moby/sys/userns
|
||||
* tests/test_runner.sh: remove some redundancies
|
||||
* Integration tests: run git daemon on a random-but-bind()able port
|
||||
* fix(deps): update module github.com/opencontainers/selinux to v1.11.1
|
||||
* go.mod: remove unnecessary replace
|
||||
* Document more buildah build --secret options
|
||||
* Add support for COPY --exclude and ADD --exclude options
|
||||
* fix(deps): update github.com/containers/luksy digest to e2530d6
|
||||
* chore(deps): update dependency containers/automation_images to v20241010
|
||||
* fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.4
|
||||
* Properly validate cache IDs and sources
|
||||
* [skip-ci] Packit: constrain koji job to fedora package to avoid dupes
|
||||
* Audit and tidy OWNERS
|
||||
* fix(deps): update module golang.org/x/crypto to v0.28.0
|
||||
* tests: add quotes to names
|
||||
* vendor: update c/common to latest
|
||||
* CVE-2024-9407: validate "bind-propagation" flag settings
|
||||
* vendor: switch to moby/sys/capability
|
||||
* Don't set ambient capabilities
|
||||
* Document that zstd:chunked is downgraded to zstd when encrypting
|
||||
* fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.3
|
||||
* buildah-manifest-create.1: Fix manpage section
|
||||
* chore(deps): update dependency ubuntu to v24
|
||||
* Make `buildah manifest push --all` true by default
|
||||
* chroot: add newlines at the end of printed error messages
|
||||
* Do not error on trying to write IMA xattr as rootless
|
||||
* fix: remove duplicate conditions
|
||||
* fix(deps): update module github.com/moby/buildkit to v0.16.0
|
||||
* fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.2
|
||||
* Document how entrypoint is configured in buildah config
|
||||
* In a container, try to register binfmt_misc
|
||||
* imagebuildah.StageExecutor: clean up volumes/volumeCache
|
||||
* build: fall back to parsing a TARGETPLATFORM build-arg
|
||||
* `manifest add --artifact`: handle multiple values
|
||||
* Packit: split out ELN jobs and reuse fedora downstream targets
|
||||
* Packit: Enable sidetags for bodhi updates
|
||||
* fix(deps): update module github.com/docker/docker to v27.2.1+incompatible
|
||||
* tests/bud.bats: add git source
|
||||
* add: add support for git source
|
||||
* Add support for the new c/common pasta options
|
||||
* vendor latest c/common
|
||||
* fix(deps): update module golang.org/x/term to v0.24.0
|
||||
* fix(deps): update module github.com/fsouza/go-dockerclient to v1.12.0
|
||||
* packit: update fedora and epel targets
|
||||
* cirrus: disable f39 testing
|
||||
* cirrus: fix fedora names
|
||||
* update to go 1.22
|
||||
* Vendor c/common:9d025e4cb348
|
||||
* copier: handle globbing with "**" path components
|
||||
* fix(deps): update golang.org/x/exp digest to 9b4947d
|
||||
* fix(deps): update github.com/containers/luksy digest to 2e7307c
|
||||
* imagebuildah: make scratch config handling toggleable
|
||||
* fix(deps): update module github.com/docker/docker to v27.2.0+incompatible
|
||||
* Add a validation script for Makefile $(SOURCES)
|
||||
* fix(deps): update module github.com/openshift/imagebuilder to v1.2.15
|
||||
* New VMs
|
||||
* Update some godocs, use 0o to prefix an octal in a comment
|
||||
* buildah-build.1.md: expand the --layer-label description
|
||||
* fix(deps): update module github.com/containers/common to v0.60.2
|
||||
* run: fix a nil pointer dereference on FreeBSD
|
||||
* CI: enable the whitespace linter
|
||||
* Fix some govet linter warnings
|
||||
* Commit(): retry committing to local storage on storage.LayerUnknown
|
||||
* CI: enable the gofumpt linter
|
||||
* conformance: move weirdly-named files out of the repository
|
||||
* fix(deps): update module github.com/docker/docker to v27.1.2+incompatible
|
||||
* fix(deps): update module github.com/containers/common to v0.60.1
|
||||
* *: use gofmt -s, add gofmt linter
|
||||
* *: fix build tags
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.32.1
|
||||
* Add(): re-escape any globbed items that included escapes
|
||||
* conformance tests: use mirror.gcr.io for most images
|
||||
* unit tests: use test-specific policy.json and registries.conf
|
||||
* fix(deps): update module golang.org/x/sys to v0.24.0
|
||||
* Update to spun-out "github.com/containerd/platforms"
|
||||
* Bump github.com/containerd/containerd
|
||||
* test/tools/Makefile: duplicate the vendor-in-container target
|
||||
* linters: unchecked error
|
||||
* linters: don't end loop iterations with "else" when "then" would
|
||||
* linters: unused arguments shouldn't have names
|
||||
* linters: rename checkIdsGreaterThan5() to checkIDsGreaterThan5()
|
||||
* linters: don't name variables "cap"
|
||||
* `make lint`: use --timeout instead of --deadline
|
||||
* Drop the e2e test suite
|
||||
* fix(deps): update module golang.org/x/crypto to v0.26.0
|
||||
* fix(deps): update module github.com/onsi/gomega to v1.34.1
|
||||
* `make vendor-in-container`: use the caller's Go cache if it exists
|
||||
* fix(deps): fix test/tools ginkgo typo
|
||||
* fix(deps): update module github.com/onsi/ginkgo/v2 to v2.19.1
|
||||
* Update to keep up with API changes in storage
|
||||
* fix(deps): update github.com/containers/luksy digest to 1f482a9
|
||||
* install: On Debian/Ubuntu, add installation of libbtrfs-dev
|
||||
* fix(deps): update module golang.org/x/sys to v0.23.0
|
||||
* fix(deps): update golang.org/x/exp digest to 8a7402a
|
||||
* fix(deps): update module github.com/fsouza/go-dockerclient to v1.11.2
|
||||
* Use Epoch: 2 and respect the epoch in dependencies.
|
||||
* Bump to Buildah v1.38.0-dev
|
||||
* AddAndCopyOptions: add CertPath, InsecureSkipTLSVerify, Retry fields
|
||||
* Add PrependedLinkedLayers/AppendedLinkedLayers to CommitOptions
|
||||
* integration tests: teach starthttpd() about TLS and pid files
|
||||
|
||||
- Changelog for v1.37.0 (2024-07-26)
|
||||
* Bump c/storage, c/image, c/common for v1.37.0
|
||||
* "build with basename resolving user arg" tests: correct ARG use
|
||||
* bud-multiple-platform-no-run test: correct ARG use
|
||||
* imagebuildah: always have default values for $TARGET... args ready
|
||||
* bump github.com/openshift/imagebuilder to v1.2.14
|
||||
* fix(deps): update module github.com/docker/docker to v27.1.1+incompatible
|
||||
* fix(deps): update module github.com/cyphar/filepath-securejoin to v0.3.1
|
||||
* fix(deps): update module github.com/docker/docker to v27.1.0+incompatible
|
||||
* CI: use local registry, part 2 of 2
|
||||
* CI: use local registry, part 1 of 2
|
||||
* fix(deps): update module github.com/fsouza/go-dockerclient to v1.11.1
|
||||
* Revert "fix(deps): update github.com/containers/image/v5 to v5.31.1"
|
||||
* Replace libimage.LookupReferenceFunc with the manifests version
|
||||
* conformance tests: enable testing CompatVolumes
|
||||
* conformance tests: add a test that tries to chown a volume
|
||||
* imagebuildah: make traditional volume handling not the default
|
||||
* StageExecutor.prepare(): mark base image volumes for preservation
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.31.1
|
||||
* Vendor in latest containers/(common, storage, image)
|
||||
* fix(deps): update module golang.org/x/term to v0.22.0
|
||||
* fix(deps): update module golang.org/x/sys to v0.22.0
|
||||
* fix(deps): update golang.org/x/exp digest to 7f521ea
|
||||
* fix(deps): update github.com/containers/luksy digest to a8846e2
|
||||
* imagebuildah.StageExecutor.Copy(): reject new flags for now
|
||||
* bump github.com/openshift/imagebuilder to v1.2.11
|
||||
* Rework parsing of --pull flags
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.31.1
|
||||
* imagebuildah.StageExecutor.prepare(): log the --platform flag
|
||||
* CI VMs: bump
|
||||
* buildah copy: preserve owner info with --from= a container or image
|
||||
* conformance tests: enable testing CompatSetParent
|
||||
* containerImageRef.NewImageSource(): move the FROM comment to first
|
||||
* commit: set "parent" for docker format only when requested
|
||||
* Update godoc for Builder.EnsureContainerPathAs
|
||||
* fix(deps): update module github.com/spf13/cobra to v1.8.1
|
||||
* fix(deps): update module github.com/containernetworking/cni to v1.2.0
|
||||
* fix(deps): update module github.com/opencontainers/runc to v1.1.13
|
||||
* Change default for podman build to --pull missing
|
||||
* fix(deps): update module github.com/containers/common to v0.59.1
|
||||
* Clarify definition of --pull options
|
||||
* buildah: fix a nil pointer reference on FreeBSD
|
||||
* Use /var/tmp for $TMPDIR for vfs conformance jobs
|
||||
* Cirrus: run `df` during job setup
|
||||
* conformance: use quay.io/libpod/centos:7 instead of centos:8
|
||||
* Stop setting "parent" in docker format
|
||||
* conformance: check if workdir trims path separator suffixes
|
||||
* push integration test: pass password to docker login via stdin
|
||||
* Re-enable the "copy with chown" conformance test
|
||||
* healthcheck: Add support for `--start-interval`
|
||||
* fix(deps): update module github.com/docker/docker to v26.1.4+incompatible
|
||||
* fix(deps): update module github.com/containerd/containerd to v1.7.18
|
||||
* tests: set _CONTAINERS_USERNS_CONFIGURED=done for libnetwork
|
||||
* Cross-build on Fedora
|
||||
* Drop copyStringSlice() and copyStringStringMap()
|
||||
* fix(deps): update module golang.org/x/crypto to v0.24.0
|
||||
* fix(deps): update module github.com/openshift/imagebuilder to v1.2.10
|
||||
* Provide an uptime_netbsd.go
|
||||
* Spell unix as "!windows"
|
||||
* Add netbsd to lists-of-OSes
|
||||
* fix(deps): update golang.org/x/exp digest to fd00a4e
|
||||
* [skip-ci] Packit: enable c10s downstream sync
|
||||
* CI VMs: bump, to debian with cgroups v2
|
||||
* Document when BlobDirectory is overridden
|
||||
* fix secret mounts for env vars when using chroot isolation
|
||||
* Change to take a types.ImageReference arg
|
||||
* imagebuildah: Support custom image reference lookup for cache push/pull
|
||||
* fix(deps): update module github.com/onsi/ginkgo/v2 to v2.19.0
|
||||
* Bump to v1.37.0-dev
|
||||
* CI: Clarify Debian use for conformance tests
|
||||
|
||||
- Changelog for v1.36.0 (2024-05-23)
|
||||
* build: be more selective about specifying the default OS
|
||||
* Bump to c/common v0.59.0
|
||||
* Fix buildah prune --help showing the same example twice
|
||||
* fix(deps): update module github.com/onsi/ginkgo/v2 to v2.18.0
|
||||
* fix(deps): update module github.com/containers/image/v5 to v5.31.0
|
||||
* bud tests: fix breakage when vendoring into podman
|
||||
* Integration tests: fake up a replacement for nixery.dev/shell
|
||||
* copierWithSubprocess(): try to capture stderr on io.ErrClosedPipe
|
||||
* Don't expand RUN heredocs ourselves, let the shell do it
|
||||
* Don't leak temp files on failures
|
||||
* Add release note template to split dependency chores
|
||||
* fix CentOS/RHEL build - no BATS there
|
||||
* fix(deps): update module github.com/containers/luksy to v0.0.0-20240506205542-84b50f50f3ee
|
||||
* Address CVE-2024-3727
|
||||
* chore(deps): update module github.com/opencontainers/runtime-spec to v1.2.0
|
||||
* Builder.cdiSetupDevicesInSpecdefConfig(): use configured CDI dirs
|
||||
* Setting --arch should set the TARGETARCH build arg
|
||||
* fix(deps): update module golang.org/x/exp to v0.0.0-20240416160154-fe59bbe5cc7f
|
||||
* [CI:DOCS] Add link to Buildah image page to README.md
|
||||
* Don't set GOTOOLCHAIN=local
|
||||
* fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.5
|
||||
* Makefile: set GOTOOLCHAIN=local
|
||||
* Integration tests: switch some base images
|
||||
* containerImageRef.NewImageSource: merge the tar filters
|
||||
* fix(deps): update module github.com/onsi/ginkgo/v2 to v2.17.2
|
||||
* fix(deps): update module github.com/containers/luksy to v0.0.0-20240408185936-afd8e7619947
|
||||
* Disable packit builds for centos-stream+epel-next-8
|
||||
* Makefile: add missing files to $(SOURCES)
|
||||
* CI VMs: bump to new versions with tmpfs /tmp
|
||||
* chore(deps): update module golang.org/x/net to v0.23.0 [security]
|
||||
* integration test: handle new labels in "bud and test --unsetlabel"
|
||||
* Switch packit configuration to use epel-9-$arch ...
|
||||
* Give unit tests a bit more time
|
||||
* Integration tests: remove a couple of duplicated tests
|
||||
* Integration tests: whitespace tweaks
|
||||
* Integration tests: don't remove images at start or end of test
|
||||
* Integration tests: use cached images more
|
||||
* Integration tests _prefetch: use registry configs
|
||||
* internal: use fileutils.(Le|E)xists
|
||||
* pkg/parse: use fileutils.(Le|E)xists
|
||||
* buildah: use fileutils.(Le|E)xists
|
||||
* chroot: use fileutils.(Le|E)xists
|
||||
* vendor: update containers/(common|storage)
|
||||
* Fix issue/pr lock workflow
|
||||
* [CI:DOCS] Add golang 1.21 update warning
|
||||
* heredoc: honor inline COPY irrespective of ignorefiles
|
||||
* Update install.md
|
||||
* source-push: add support for --digestfile
|
||||
* Fix caching when mounting a cached stage with COPY/ADD
|
||||
* fix(deps): update github.com/containers/luksy digest to 3d2cf0e
|
||||
* Makefile: softcode `strip`, use it from env var
|
||||
* Man page updates
|
||||
* Add support for passing CDI specs to --device
|
||||
* Update comments on some API objects
|
||||
* pkg/parse.DeviceFromPath(): dereference src symlinks
|
||||
* fix(deps): update module github.com/onsi/ginkgo/v2 to v2.17.1
|
||||
|
||||
- Changelog for v1.35.0 (2024-03-06)
|
||||
* fix(deps): update module github.com/stretchr/testify to v1.9.0
|
||||
* cgroups: reuse version check from c/common
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Open a PTY using the /dev/ptmx device. The main advantage of using
|
||||
// this instead of posix_openpt is that it avoids cgo.
|
||||
func getPtyDescriptors() (int, int, error) {
|
||||
// Create a pseudo-terminal -- open a copy of the master side.
|
||||
controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("opening PTY master using /dev/ptmx: %v", err)
|
||||
}
|
||||
// Set the kernel's lock to "unlocked".
|
||||
locked := 0
|
||||
if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(controlFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 {
|
||||
return -1, -1, fmt.Errorf("unlocking PTY descriptor: %v", err)
|
||||
}
|
||||
// Get a handle for the other end.
|
||||
ptyFd, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(controlFd), unix.TIOCGPTPEER, unix.O_RDWR|unix.O_NOCTTY)
|
||||
if int(ptyFd) == -1 {
|
||||
if errno, isErrno := err.(syscall.Errno); !isErrno || (errno != syscall.EINVAL && errno != syscall.ENOTTY) {
|
||||
return -1, -1, fmt.Errorf("getting PTY descriptor: %v", err)
|
||||
}
|
||||
// EINVAL means the kernel's too old to understand TIOCGPTPEER. Try TIOCGPTN.
|
||||
ptyN, err := unix.IoctlGetInt(controlFd, unix.TIOCGPTN)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("getting PTY number: %v", err)
|
||||
}
|
||||
ptyName := fmt.Sprintf("/dev/pts/%d", ptyN)
|
||||
fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("opening PTY %q: %v", ptyName, err)
|
||||
}
|
||||
ptyFd = uintptr(fd)
|
||||
}
|
||||
return controlFd, int(ptyFd), nil
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
//go:build !linux && !(freebsd && cgo)
|
||||
// +build !linux
|
||||
// +build !freebsd !cgo
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func getPtyDescriptors() (int, int, error) {
|
||||
return -1, -1, errors.New("getPtyDescriptors not supported on this platform")
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package chroot
|
||||
|
||||
|
@ -19,6 +18,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/containers/buildah/bind"
|
||||
"github.com/containers/buildah/internal/pty"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
|
@ -49,12 +49,13 @@ func init() {
|
|||
type runUsingChrootExecSubprocOptions struct {
|
||||
Spec *specs.Spec
|
||||
BundlePath string
|
||||
NoPivot bool
|
||||
}
|
||||
|
||||
// RunUsingChroot runs a chrooted process, using some of the settings from the
|
||||
// passed-in spec, and using the specified bundlePath to hold temporary files,
|
||||
// directories, and mountpoints.
|
||||
func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
|
||||
func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer, noPivot bool) (err error) {
|
||||
var confwg sync.WaitGroup
|
||||
var homeFound bool
|
||||
for _, env := range spec.Process.Env {
|
||||
|
@ -74,7 +75,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
|
||||
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0o600); err != nil {
|
||||
return fmt.Errorf("storing runtime configuration: %w", err)
|
||||
}
|
||||
logrus.Debugf("config = %v", string(specbytes))
|
||||
|
@ -98,6 +99,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
|
|||
config, conferr := json.Marshal(runUsingChrootSubprocOptions{
|
||||
Spec: spec,
|
||||
BundlePath: bundlePath,
|
||||
NoPivot: noPivot,
|
||||
})
|
||||
if conferr != nil {
|
||||
return fmt.Errorf("encoding configuration for %q: %w", runUsingChrootCommand, conferr)
|
||||
|
@ -197,6 +199,7 @@ func runUsingChrootMain() {
|
|||
fmt.Fprintf(os.Stderr, "invalid options spec in runUsingChrootMain\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
noPivot := options.NoPivot
|
||||
|
||||
// Prepare to shuttle stdio back and forth.
|
||||
rootUID32, rootGID32, err := util.GetHostRootIDs(options.Spec)
|
||||
|
@ -215,7 +218,7 @@ func runUsingChrootMain() {
|
|||
var stderr io.Writer
|
||||
fdDesc := make(map[int]string)
|
||||
if options.Spec.Process.Terminal {
|
||||
ptyMasterFd, ptyFd, err := getPtyDescriptors()
|
||||
ptyMasterFd, ptyFd, err := pty.GetPtyDescriptors()
|
||||
if err != nil {
|
||||
logrus.Errorf("error opening PTY descriptors: %v", err)
|
||||
os.Exit(1)
|
||||
|
@ -266,7 +269,7 @@ func runUsingChrootMain() {
|
|||
logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUID, rootGID, err)
|
||||
}
|
||||
// Set permissions on the PTY.
|
||||
if err = ctty.Chmod(0620); err != nil {
|
||||
if err = ctty.Chmod(0o620); err != nil {
|
||||
logrus.Errorf("error setting permissions of container PTY: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -443,7 +446,7 @@ func runUsingChrootMain() {
|
|||
}()
|
||||
|
||||
// Set up mounts and namespaces, and run the parent subprocess.
|
||||
status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning)
|
||||
status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, noPivot, closeOnceRunning)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err)
|
||||
os.Exit(1)
|
||||
|
@ -464,7 +467,7 @@ func runUsingChrootMain() {
|
|||
// runUsingChroot, still in the grandparent process, sets up various bind
|
||||
// mounts and then runs the parent process in its own user namespace with the
|
||||
// necessary ID mappings.
|
||||
func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io.Reader, stdout, stderr io.Writer, closeOnceRunning []*os.File) (wstatus unix.WaitStatus, err error) {
|
||||
func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io.Reader, stdout, stderr io.Writer, noPivot bool, closeOnceRunning []*os.File) (wstatus unix.WaitStatus, err error) {
|
||||
var confwg sync.WaitGroup
|
||||
|
||||
// Create a new mount namespace for ourselves and bind mount everything to a new location.
|
||||
|
@ -497,9 +500,10 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{
|
||||
Spec: spec,
|
||||
BundlePath: bundlePath,
|
||||
NoPivot: noPivot,
|
||||
})
|
||||
if conferr != nil {
|
||||
fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q", runUsingChrootExecCommand)
|
||||
fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q\n", runUsingChrootExecCommand)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -526,7 +530,6 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
|
||||
if err := setPlatformUnshareOptions(spec, cmd); err != nil {
|
||||
return 1, fmt.Errorf("setting platform unshare options: %w", err)
|
||||
|
||||
}
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
cmd.Hook = func(int) error {
|
||||
|
@ -569,7 +572,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "process exited with error: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "process exited with error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -621,8 +624,10 @@ func runUsingChrootExecMain() {
|
|||
// Try to chroot into the root. Do this before we potentially
|
||||
// block the syscall via the seccomp profile. Allow the
|
||||
// platform to override this - on FreeBSD, we use a simple
|
||||
// jail to set the hostname in the container
|
||||
// jail to set the hostname in the container, and on Linux
|
||||
// we attempt to pivot_root.
|
||||
if err := createPlatformContainer(options); err != nil {
|
||||
logrus.Debugf("createPlatformContainer: %v", err)
|
||||
var oldst, newst unix.Stat_t
|
||||
if err := unix.Stat(options.Spec.Root.Path, &oldst); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error stat()ing intended root directory %q: %v\n", options.Spec.Root.Path, err)
|
||||
|
@ -697,7 +702,7 @@ func runUsingChrootExecMain() {
|
|||
}
|
||||
logrus.Debugf("setting supplemental groups")
|
||||
if err = syscall.Setgroups(gids); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
|
@ -705,7 +710,7 @@ func runUsingChrootExecMain() {
|
|||
if strings.Trim(string(setgroups), "\n") != "deny" {
|
||||
logrus.Debugf("clearing supplemental groups")
|
||||
if err = syscall.Setgroups([]int{}); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -713,7 +718,7 @@ func runUsingChrootExecMain() {
|
|||
|
||||
logrus.Debugf("setting gid")
|
||||
if err = unix.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error setting GID: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "error setting GID: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -734,7 +739,7 @@ func runUsingChrootExecMain() {
|
|||
|
||||
logrus.Debugf("setting uid")
|
||||
if err = unix.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error setting UID: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "error setting UID: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -747,7 +752,7 @@ func runUsingChrootExecMain() {
|
|||
logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH"))
|
||||
interrupted := make(chan os.Signal, 100)
|
||||
if err = cmd.Start(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "process failed to start with error: %v\n", err)
|
||||
}
|
||||
go func() {
|
||||
for range interrupted {
|
||||
|
@ -774,7 +779,7 @@ func runUsingChrootExecMain() {
|
|||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "process exited with error: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "process exited with error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
@ -13,6 +14,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/containers/buildah/pkg/jail"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
@ -39,6 +41,7 @@ var (
|
|||
type runUsingChrootSubprocOptions struct {
|
||||
Spec *specs.Spec
|
||||
BundlePath string
|
||||
NoPivot bool
|
||||
}
|
||||
|
||||
func setPlatformUnshareOptions(spec *specs.Spec, cmd *unshare.Cmd) error {
|
||||
|
@ -178,9 +181,9 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
}
|
||||
}
|
||||
target := filepath.Join(spec.Root.Path, m.Destination)
|
||||
if _, err := os.Stat(target); err != nil {
|
||||
if err := fileutils.Exists(target); err != nil {
|
||||
// If the target can't be stat()ted, check the error.
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err)
|
||||
}
|
||||
// The target isn't there yet, so create it, and make a
|
||||
|
@ -188,12 +191,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// XXX: This was copied from the linux version which supports bind mounting files.
|
||||
// Leaving it here since I plan to add this to FreeBSD's nullfs.
|
||||
if m.Type != "nullfs" || srcinfo.IsDir() {
|
||||
if err = os.MkdirAll(target, 0111); err != nil {
|
||||
if err = os.MkdirAll(target, 0o111); err != nil {
|
||||
return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
|
||||
}
|
||||
removes = append(removes, target)
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil {
|
||||
if err = os.MkdirAll(filepath.Dir(target), 0o111); err != nil {
|
||||
return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
|
||||
}
|
||||
// Don't do this until we can support file mounts in nullfs
|
||||
|
@ -211,12 +214,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// Do the bind mount.
|
||||
if !srcinfo.IsDir() {
|
||||
logrus.Debugf("emulating file mount %q on %q", m.Source, target)
|
||||
_, err := os.Stat(target)
|
||||
err := fileutils.Exists(target)
|
||||
if err == nil {
|
||||
save := saveDir(spec, target)
|
||||
if _, err := os.Stat(save); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(save, 0111)
|
||||
if err := fileutils.Exists(save); err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
err = os.MkdirAll(save, 0o111)
|
||||
}
|
||||
if err != nil {
|
||||
return undoBinds, fmt.Errorf("creating file mount save directory %q: %w", save, err)
|
||||
|
@ -224,7 +227,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
removes = append(removes, save)
|
||||
}
|
||||
savePath := filepath.Join(save, filepath.Base(target))
|
||||
if _, err := os.Stat(target); err == nil {
|
||||
if err := fileutils.Exists(target); err == nil {
|
||||
logrus.Debugf("moving %q to %q", target, savePath)
|
||||
if err := os.Rename(target, savePath); err != nil {
|
||||
return undoBinds, fmt.Errorf("moving %q to %q: %w", target, savePath, err)
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package chroot
|
||||
|
||||
|
@ -9,6 +8,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -16,10 +16,10 @@ import (
|
|||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/moby/sys/capability"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -43,11 +43,86 @@ var (
|
|||
"RLIMIT_STACK": unix.RLIMIT_STACK,
|
||||
}
|
||||
rlimitsReverseMap = map[int]string{}
|
||||
mountFlagMap = map[int]string{
|
||||
unix.MS_ACTIVE: "MS_ACTIVE",
|
||||
unix.MS_BIND: "MS_BIND",
|
||||
unix.MS_BORN: "MS_BORN",
|
||||
unix.MS_DIRSYNC: "MS_DIRSYNC",
|
||||
unix.MS_KERNMOUNT: "MS_KERNMOUNT",
|
||||
unix.MS_LAZYTIME: "MS_LAZYTIME",
|
||||
unix.MS_MANDLOCK: "MS_MANDLOCK",
|
||||
unix.MS_MOVE: "MS_MOVE",
|
||||
unix.MS_NOATIME: "MS_NOATIME",
|
||||
unix.MS_NODEV: "MS_NODEV",
|
||||
unix.MS_NODIRATIME: "MS_NODIRATIME",
|
||||
unix.MS_NOEXEC: "MS_NOEXEC",
|
||||
unix.MS_NOREMOTELOCK: "MS_NOREMOTELOCK",
|
||||
unix.MS_NOSEC: "MS_NOSEC",
|
||||
unix.MS_NOSUID: "MS_NOSUID",
|
||||
unix.MS_NOSYMFOLLOW: "MS_NOSYMFOLLOW",
|
||||
unix.MS_NOUSER: "MS_NOUSER",
|
||||
unix.MS_POSIXACL: "MS_POSIXACL",
|
||||
unix.MS_PRIVATE: "MS_PRIVATE",
|
||||
unix.MS_RDONLY: "MS_RDONLY",
|
||||
unix.MS_REC: "MS_REC",
|
||||
unix.MS_RELATIME: "MS_RELATIME",
|
||||
unix.MS_REMOUNT: "MS_REMOUNT",
|
||||
unix.MS_SHARED: "MS_SHARED",
|
||||
unix.MS_SILENT: "MS_SILENT",
|
||||
unix.MS_SLAVE: "MS_SLAVE",
|
||||
unix.MS_STRICTATIME: "MS_STRICTATIME",
|
||||
unix.MS_SUBMOUNT: "MS_SUBMOUNT",
|
||||
unix.MS_SYNCHRONOUS: "MS_SYNCHRONOUS",
|
||||
unix.MS_UNBINDABLE: "MS_UNBINDABLE",
|
||||
}
|
||||
statFlagMap = map[int]string{
|
||||
unix.ST_MANDLOCK: "ST_MANDLOCK",
|
||||
unix.ST_NOATIME: "ST_NOATIME",
|
||||
unix.ST_NODEV: "ST_NODEV",
|
||||
unix.ST_NODIRATIME: "ST_NODIRATIME",
|
||||
unix.ST_NOEXEC: "ST_NOEXEC",
|
||||
unix.ST_NOSUID: "ST_NOSUID",
|
||||
unix.ST_RDONLY: "ST_RDONLY",
|
||||
unix.ST_RELATIME: "ST_RELATIME",
|
||||
unix.ST_SYNCHRONOUS: "ST_SYNCHRONOUS",
|
||||
}
|
||||
)
|
||||
|
||||
func mountFlagNames(flags uintptr) []string {
|
||||
var names []string
|
||||
for flag, name := range mountFlagMap {
|
||||
if int(flags)&flag == flag {
|
||||
names = append(names, name)
|
||||
flags = flags &^ (uintptr(flag))
|
||||
}
|
||||
}
|
||||
if flags != 0 { // got some unknown leftovers
|
||||
names = append(names, fmt.Sprintf("%#x", flags))
|
||||
}
|
||||
slices.Sort(names)
|
||||
return names
|
||||
}
|
||||
|
||||
func statFlagNames(flags uintptr) []string {
|
||||
var names []string
|
||||
flags = flags & ^uintptr(0x20) // mask off ST_VALID
|
||||
for flag, name := range statFlagMap {
|
||||
if int(flags)&flag == flag {
|
||||
names = append(names, name)
|
||||
flags = flags &^ (uintptr(flag))
|
||||
}
|
||||
}
|
||||
if flags != 0 { // got some unknown leftovers
|
||||
names = append(names, fmt.Sprintf("%#x", flags))
|
||||
}
|
||||
slices.Sort(names)
|
||||
return names
|
||||
}
|
||||
|
||||
type runUsingChrootSubprocOptions struct {
|
||||
Spec *specs.Spec
|
||||
BundlePath string
|
||||
NoPivot bool
|
||||
UIDMappings []syscall.SysProcIDMap
|
||||
GIDMappings []syscall.SysProcIDMap
|
||||
}
|
||||
|
@ -61,14 +136,14 @@ func setPlatformUnshareOptions(spec *specs.Spec, cmd *unshare.Cmd) error {
|
|||
uidmap, gidmap := spec.Linux.UIDMappings, spec.Linux.GIDMappings
|
||||
if len(uidmap) == 0 {
|
||||
// No UID mappings are configured for the container. Borrow our parent's mappings.
|
||||
uidmap = append([]specs.LinuxIDMapping{}, hostUidmap...)
|
||||
uidmap = slices.Clone(hostUidmap)
|
||||
for i := range uidmap {
|
||||
uidmap[i].HostID = uidmap[i].ContainerID
|
||||
}
|
||||
}
|
||||
if len(gidmap) == 0 {
|
||||
// No GID mappings are configured for the container. Borrow our parent's mappings.
|
||||
gidmap = append([]specs.LinuxIDMapping{}, hostGidmap...)
|
||||
gidmap = slices.Clone(hostGidmap)
|
||||
for i := range gidmap {
|
||||
gidmap[i].HostID = gidmap[i].ContainerID
|
||||
}
|
||||
|
@ -179,39 +254,39 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
|
|||
capMap := map[capability.CapType][]string{
|
||||
capability.BOUNDING: spec.Process.Capabilities.Bounding,
|
||||
capability.EFFECTIVE: spec.Process.Capabilities.Effective,
|
||||
capability.INHERITABLE: []string{},
|
||||
capability.INHERITABLE: {},
|
||||
capability.PERMITTED: spec.Process.Capabilities.Permitted,
|
||||
capability.AMBIENT: spec.Process.Capabilities.Ambient,
|
||||
capability.AMBIENT: {},
|
||||
}
|
||||
knownCaps := capability.List()
|
||||
knownCaps := capability.ListKnown()
|
||||
noCap := capability.Cap(-1)
|
||||
for capType, capList := range capMap {
|
||||
for _, capToSet := range capList {
|
||||
cap := noCap
|
||||
for _, capSpec := range capList {
|
||||
capToSet := noCap
|
||||
for _, c := range knownCaps {
|
||||
if strings.EqualFold("CAP_"+c.String(), capToSet) {
|
||||
cap = c
|
||||
if strings.EqualFold("CAP_"+c.String(), capSpec) {
|
||||
capToSet = c
|
||||
break
|
||||
}
|
||||
}
|
||||
if cap == noCap {
|
||||
return fmt.Errorf("mapping capability %q to a number", capToSet)
|
||||
if capToSet == noCap {
|
||||
return fmt.Errorf("mapping capability %q to a number", capSpec)
|
||||
}
|
||||
caps.Set(capType, cap)
|
||||
caps.Set(capType, capToSet)
|
||||
}
|
||||
for _, capToSet := range keepCaps {
|
||||
cap := noCap
|
||||
for _, capSpec := range keepCaps {
|
||||
capToSet := noCap
|
||||
for _, c := range knownCaps {
|
||||
if strings.EqualFold("CAP_"+c.String(), capToSet) {
|
||||
cap = c
|
||||
if strings.EqualFold("CAP_"+c.String(), capSpec) {
|
||||
capToSet = c
|
||||
break
|
||||
}
|
||||
}
|
||||
if cap == noCap {
|
||||
return fmt.Errorf("mapping capability %q to a number", capToSet)
|
||||
if capToSet == noCap {
|
||||
return fmt.Errorf("mapping capability %q to a number", capSpec)
|
||||
}
|
||||
if currentCaps.Get(capType, cap) {
|
||||
caps.Set(capType, cap)
|
||||
if currentCaps.Get(capType, capToSet) {
|
||||
caps.Set(capType, capToSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -226,7 +301,56 @@ func makeRlimit(limit specs.POSIXRlimit) unix.Rlimit {
|
|||
}
|
||||
|
||||
func createPlatformContainer(options runUsingChrootExecSubprocOptions) error {
|
||||
return errors.New("unsupported createPlatformContainer")
|
||||
if options.NoPivot {
|
||||
return errors.New("not using pivot_root()")
|
||||
}
|
||||
// borrowing a technique from runc, who credit the LXC maintainers for this
|
||||
// open descriptors for the old and new root directories so that we can use fchdir()
|
||||
oldRootFd, err := unix.Open("/", unix.O_DIRECTORY, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening host root directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := unix.Close(oldRootFd); err != nil {
|
||||
logrus.Warnf("closing host root directory: %v", err)
|
||||
}
|
||||
}()
|
||||
newRootFd, err := unix.Open(options.Spec.Root.Path, unix.O_DIRECTORY, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening container root directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := unix.Close(newRootFd); err != nil {
|
||||
logrus.Warnf("closing container root directory: %v", err)
|
||||
}
|
||||
}()
|
||||
// change to the new root directory
|
||||
if err := unix.Fchdir(newRootFd); err != nil {
|
||||
return fmt.Errorf("changing to container root directory: %w", err)
|
||||
}
|
||||
// this makes the current directory the root directory. not actually
|
||||
// sure what happens to the other one
|
||||
if err := unix.PivotRoot(".", "."); err != nil {
|
||||
return fmt.Errorf("pivot_root: %w", err)
|
||||
}
|
||||
// go back and clean up the old one
|
||||
if err := unix.Fchdir(oldRootFd); err != nil {
|
||||
return fmt.Errorf("changing to host root directory: %w", err)
|
||||
}
|
||||
// make sure we only unmount things under this tree
|
||||
if err := unix.Mount(".", ".", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("tweaking mount flags on host root directory before unmounting from mount namespace: %w", err)
|
||||
}
|
||||
// detach this (unnamed?) old directory
|
||||
if err := unix.Unmount(".", unix.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("unmounting host root directory in mount namespace: %w", err)
|
||||
}
|
||||
// go back to a named root directory
|
||||
if err := unix.Fchdir(newRootFd); err != nil {
|
||||
return fmt.Errorf("changing to container root directory at last: %w", err)
|
||||
}
|
||||
logrus.Debugf("pivot_root()ed into %q", options.Spec.Root.Path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mountFlagsForFSFlags(fsFlags uintptr) uintptr {
|
||||
|
@ -302,7 +426,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
subDev := filepath.Join(spec.Root.Path, "/dev")
|
||||
if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.Mkdir(subDev, 0755)
|
||||
err = os.Mkdir(subDev, 0o755)
|
||||
if err == nil {
|
||||
err = unix.Mount("/dev", subDev, "bind", devFlags, "")
|
||||
}
|
||||
|
@ -326,7 +450,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
subProc := filepath.Join(spec.Root.Path, "/proc")
|
||||
if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.Mkdir(subProc, 0755)
|
||||
err = os.Mkdir(subProc, 0o755)
|
||||
if err == nil {
|
||||
err = unix.Mount("/proc", subProc, "bind", procFlags, "")
|
||||
}
|
||||
|
@ -341,7 +465,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
subSys := filepath.Join(spec.Root.Path, "/sys")
|
||||
if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.Mkdir(subSys, 0755)
|
||||
err = os.Mkdir(subSys, 0o755)
|
||||
if err == nil {
|
||||
err = unix.Mount("/sys", subSys, "bind", sysFlags, "")
|
||||
}
|
||||
|
@ -364,9 +488,9 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
|
||||
msg := fmt.Sprintf("could not bind mount %q, skipping: %v", m.Mountpoint, err)
|
||||
if strings.HasPrefix(m.Mountpoint, "/sys") {
|
||||
logrus.Infof(msg)
|
||||
logrus.Info(msg)
|
||||
} else {
|
||||
logrus.Warningf(msg)
|
||||
logrus.Warning(msg)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -433,15 +557,15 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// The target isn't there yet, so create it. If the source is a directory,
|
||||
// we need a directory, otherwise we need a non-directory (i.e., a file).
|
||||
if srcinfo.IsDir() {
|
||||
if err = os.MkdirAll(target, 0755); err != nil {
|
||||
if err = os.MkdirAll(target, 0o755); err != nil {
|
||||
return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
|
||||
}
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
if err = os.MkdirAll(filepath.Dir(target), 0o755); err != nil {
|
||||
return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
|
||||
}
|
||||
var file *os.File
|
||||
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil {
|
||||
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0o755); err != nil {
|
||||
return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
|
||||
}
|
||||
file.Close()
|
||||
|
@ -518,16 +642,21 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
if effectiveImportantFlags != expectedImportantFlags {
|
||||
// Do a remount to try to get the desired flags to stick.
|
||||
effectiveUnimportantFlags := uintptr(fs.Flags) & ^possibleImportantFlags
|
||||
if err = unix.Mount(target, target, m.Type, unix.MS_REMOUNT|bindFlags|requestFlags|mountFlagsForFSFlags(effectiveUnimportantFlags), ""); err != nil {
|
||||
return undoBinds, fmt.Errorf("remounting %q in mount namespace with flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err)
|
||||
remountFlags := unix.MS_REMOUNT | bindFlags | requestFlags | mountFlagsForFSFlags(effectiveUnimportantFlags)
|
||||
// If we are requesting a read-only mount, add any possibleImportantFlags present in fs.Flags to remountFlags.
|
||||
if requestFlags&unix.ST_RDONLY == unix.ST_RDONLY {
|
||||
remountFlags |= uintptr(fs.Flags) & possibleImportantFlags
|
||||
}
|
||||
if err = unix.Mount(target, target, m.Type, remountFlags, ""); err != nil {
|
||||
return undoBinds, fmt.Errorf("remounting %q in mount namespace with flags %v instead of %v: %w", target, mountFlagNames(requestFlags), statFlagNames(effectiveImportantFlags), err)
|
||||
}
|
||||
// Check if the desired flags stuck.
|
||||
if err = unix.Statfs(target, &fs); err != nil {
|
||||
return undoBinds, fmt.Errorf("checking if directory %q was remounted with requested flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err)
|
||||
return undoBinds, fmt.Errorf("checking if directory %q was remounted with requested flags %v instead of %v: %w", target, mountFlagNames(requestFlags), statFlagNames(effectiveImportantFlags), err)
|
||||
}
|
||||
newEffectiveImportantFlags := uintptr(fs.Flags) & importantFlags
|
||||
if newEffectiveImportantFlags != expectedImportantFlags {
|
||||
return undoBinds, fmt.Errorf("unable to remount %q with requested flags %#x instead of %#x, just got %#x back", target, requestFlags, effectiveImportantFlags, newEffectiveImportantFlags)
|
||||
return undoBinds, fmt.Errorf("unable to remount %q with requested flags %v instead of %v, just got %v back", target, mountFlagNames(requestFlags), statFlagNames(effectiveImportantFlags), statFlagNames(newEffectiveImportantFlags))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -589,7 +718,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// Create an empty directory for to use for masking directories.
|
||||
roEmptyDir := filepath.Join(bundlePath, "empty")
|
||||
if len(spec.Linux.MaskedPaths) > 0 {
|
||||
if err := os.Mkdir(roEmptyDir, 0700); err != nil {
|
||||
if err := os.Mkdir(roEmptyDir, 0o700); err != nil {
|
||||
return undoBinds, fmt.Errorf("creating empty directory %q: %w", roEmptyDir, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStatFlagNames(t *testing.T) {
|
||||
var names []string
|
||||
var flags int
|
||||
for flag := range statFlagMap {
|
||||
flags |= flag
|
||||
names = append(names, statFlagMap[flag])
|
||||
assert.Equal(t, []string{statFlagMap[flag]}, statFlagNames(uintptr(flag)))
|
||||
}
|
||||
slices.Sort(names)
|
||||
assert.Equal(t, names, statFlagNames(uintptr(flags)))
|
||||
}
|
||||
|
||||
func TestMountFlagNames(t *testing.T) {
|
||||
var names []string
|
||||
var flags int
|
||||
for flag := range mountFlagMap {
|
||||
flags |= flag
|
||||
names = append(names, mountFlagMap[flag])
|
||||
assert.Equal(t, []string{mountFlagMap[flag]}, mountFlagNames(uintptr(flag)))
|
||||
}
|
||||
slices.Sort(names)
|
||||
assert.Equal(t, names, mountFlagNames(uintptr(flags)))
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package chroot
|
||||
|
||||
|
@ -10,6 +9,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -37,7 +36,7 @@ func TestMain(m *testing.M) {
|
|||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundleDir string), verify func(t *testing.T, report *types.TestReport)) {
|
||||
func testMinimalWithPivot(t *testing.T, noPivot bool, modify func(g *generate.Generator, rootDir, bundleDir string), verify func(t *testing.T, report *types.TestReport)) {
|
||||
t.Helper()
|
||||
g, err := generate.New("linux")
|
||||
if err != nil {
|
||||
|
@ -50,17 +49,17 @@ func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundl
|
|||
// t.TempDir returns /tmp/TestName/001.
|
||||
// /tmp/TestName/001 has permission 0777, but /tmp/TestName is 0700
|
||||
tempDir := t.TempDir()
|
||||
if err = os.Chmod(filepath.Dir(tempDir), 0711); err != nil {
|
||||
if err = os.Chmod(filepath.Dir(tempDir), 0o711); err != nil {
|
||||
t.Fatalf("error loosening permissions on %q: %v", tempDir, err)
|
||||
}
|
||||
|
||||
rootDir := filepath.Join(tempDir, "root")
|
||||
if err := os.Mkdir(rootDir, 0711); err != nil {
|
||||
if err := os.Mkdir(rootDir, 0o711); err != nil {
|
||||
t.Fatalf("os.Mkdir(%q): %v", rootDir, err)
|
||||
}
|
||||
|
||||
rootTmpDir := filepath.Join(rootDir, "tmp")
|
||||
if err := os.Mkdir(rootTmpDir, 01777); err != nil {
|
||||
if err := os.Mkdir(rootTmpDir, 0o1777); err != nil {
|
||||
t.Fatalf("os.Mkdir(%q): %v", rootTmpDir, err)
|
||||
}
|
||||
|
||||
|
@ -70,7 +69,7 @@ func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundl
|
|||
t.Fatalf("open(%q): %v", specPath, err)
|
||||
}
|
||||
defer specBinarySource.Close()
|
||||
specBinary, err := os.OpenFile(filepath.Join(rootDir, reportCommand), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0711)
|
||||
specBinary, err := os.OpenFile(filepath.Join(rootDir, reportCommand), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o711)
|
||||
if err != nil {
|
||||
t.Fatalf("open(%q): %v", filepath.Join(rootDir, reportCommand), err)
|
||||
}
|
||||
|
@ -84,7 +83,7 @@ func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundl
|
|||
g.SetProcessArgs([]string{"/" + reportCommand})
|
||||
|
||||
bundleDir := filepath.Join(tempDir, "bundle")
|
||||
if err := os.Mkdir(bundleDir, 0700); err != nil {
|
||||
if err := os.Mkdir(bundleDir, 0o700); err != nil {
|
||||
t.Fatalf("os.Mkdir(%q): %v", bundleDir, err)
|
||||
}
|
||||
|
||||
|
@ -101,8 +100,8 @@ func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundl
|
|||
}
|
||||
|
||||
output := new(bytes.Buffer)
|
||||
if err := RunUsingChroot(g.Config, bundleDir, "/", new(bytes.Buffer), output, output); err != nil {
|
||||
t.Fatalf("run: %v: %s", err, output.String())
|
||||
if err := RunUsingChroot(g.Config, bundleDir, "/", new(bytes.Buffer), output, output, noPivot); err != nil {
|
||||
t.Fatalf("run(noPivot=%v): %v: %s", noPivot, err, output.String())
|
||||
}
|
||||
|
||||
var report types.TestReport
|
||||
|
@ -115,7 +114,16 @@ func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundl
|
|||
}
|
||||
}
|
||||
|
||||
func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundleDir string), verify func(t *testing.T, report *types.TestReport)) {
|
||||
for _, noPivot := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("noPivot=%v", noPivot), func(t *testing.T) {
|
||||
testMinimalWithPivot(t, noPivot, modify, verify)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoop(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
|
@ -123,24 +131,26 @@ func TestNoop(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMinimalSkeleton(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(_ *generate.Generator, _, _ string) {
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
func(_ *testing.T, _ *types.TestReport) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestProcessTerminal(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
for _, terminal := range []bool{false, true} {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.SetProcessTerminal(terminal)
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
|
@ -153,12 +163,13 @@ func TestProcessTerminal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessConsoleSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
for _, size := range [][2]uint{{80, 25}, {132, 50}} {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.SetProcessTerminal(true)
|
||||
g.SetProcessConsoleSize(size[0], size[1])
|
||||
},
|
||||
|
@ -175,12 +186,13 @@ func TestProcessConsoleSize(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessUser(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
for _, id := range []uint32{0, 1000} {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.SetProcessUID(id)
|
||||
g.SetProcessGID(id + 1)
|
||||
g.AddProcessAdditionalGid(id + 2)
|
||||
|
@ -198,20 +210,19 @@ func TestProcessUser(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
e := fmt.Sprintf("PARENT_TEST_PID=%d", unix.Getpid())
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.ClearProcessEnv()
|
||||
g.AddProcessEnv("PARENT_TEST_PID", strconv.Itoa(unix.Getpid()))
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
for _, ev := range report.Spec.Process.Env {
|
||||
if ev == e {
|
||||
return
|
||||
}
|
||||
if slices.Contains(report.Spec.Process.Env, e) {
|
||||
return
|
||||
}
|
||||
t.Fatalf("expected environment variable %q", e)
|
||||
},
|
||||
|
@ -219,12 +230,13 @@ func TestProcessEnv(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessCwd(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
if err := os.Mkdir(filepath.Join(rootDir, "/no-such-directory"), 0700); err != nil {
|
||||
func(g *generate.Generator, rootDir, _ string) {
|
||||
if err := os.Mkdir(filepath.Join(rootDir, "/no-such-directory"), 0o700); err != nil {
|
||||
t.Fatalf("mkdir(%q): %v", filepath.Join(rootDir, "/no-such-directory"), err)
|
||||
}
|
||||
g.SetProcessCwd("/no-such-directory")
|
||||
|
@ -238,11 +250,12 @@ func TestProcessCwd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessCapabilities(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.ClearProcessCapabilities()
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
|
@ -252,7 +265,7 @@ func TestProcessCapabilities(t *testing.T) {
|
|||
},
|
||||
)
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.ClearProcessCapabilities()
|
||||
if err := g.AddProcessCapabilityEffective("CAP_IPC_LOCK"); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
|
@ -282,12 +295,13 @@ func TestProcessCapabilities(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessRlimits(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
for _, limit := range []uint64{100 * 1024 * 1024 * 1024, 200 * 1024 * 1024 * 1024, unix.RLIM_INFINITY} {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.ClearProcessRlimits()
|
||||
if limit != unix.RLIM_INFINITY {
|
||||
g.AddProcessRlimits("rlimit_as", limit, limit)
|
||||
|
@ -300,7 +314,7 @@ func TestProcessRlimits(t *testing.T) {
|
|||
rlim = &report.Spec.Process.Rlimits[i]
|
||||
}
|
||||
}
|
||||
if limit == unix.RLIM_INFINITY && !(rlim == nil || (rlim.Soft == unix.RLIM_INFINITY && rlim.Hard == unix.RLIM_INFINITY)) {
|
||||
if limit == unix.RLIM_INFINITY && rlim != nil && (rlim.Soft != unix.RLIM_INFINITY || rlim.Hard != unix.RLIM_INFINITY) {
|
||||
t.Fatalf("wasn't supposed to set limit on number of open files: %#v", rlim)
|
||||
}
|
||||
if limit != unix.RLIM_INFINITY && rlim == nil {
|
||||
|
@ -320,6 +334,7 @@ func TestProcessRlimits(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessNoNewPrivileges(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
|
@ -328,7 +343,7 @@ func TestProcessNoNewPrivileges(t *testing.T) {
|
|||
}
|
||||
for _, nope := range []bool{false, true} {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.SetProcessNoNewPrivileges(nope)
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
|
@ -341,12 +356,13 @@ func TestProcessNoNewPrivileges(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcessOOMScoreAdj(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
for _, adj := range []int{0, 1, 2, 3} {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.SetProcessOOMScoreAdj(adj)
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
|
@ -363,12 +379,13 @@ func TestProcessOOMScoreAdj(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHostname(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
hostname := fmt.Sprintf("host%d", unix.Getpid())
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.SetHostname(hostname)
|
||||
},
|
||||
func(t *testing.T, report *types.TestReport) {
|
||||
|
@ -380,12 +397,13 @@ func TestHostname(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMounts(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
t.Run("tmpfs", func(t *testing.T) {
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.AddMount(specs.Mount{
|
||||
Source: "tmpfs",
|
||||
Destination: "/was-not-there-before",
|
||||
|
@ -432,7 +450,8 @@ func TestMounts(t *testing.T) {
|
|||
name: "nosuid",
|
||||
destination: "/nosuid",
|
||||
options: []string{"nosuid"},
|
||||
reject: []string{"suid"}},
|
||||
reject: []string{"suid"},
|
||||
},
|
||||
{
|
||||
name: "nodev,noexec",
|
||||
destination: "/nodev,noexec",
|
||||
|
@ -485,7 +504,7 @@ func TestMounts(t *testing.T) {
|
|||
tmpfsFlags, tmpfsOptions := mount.ParseOptions(tmpfsOptions)
|
||||
require.NoErrorf(t, unix.Mount("none", tmpfsMount, "tmpfs", uintptr(tmpfsFlags), tmpfsOptions), "error mounting a tmpfs with flags=%#x,options=%q at %s", tmpfsFlags, tmpfsOptions, tmpfsMount)
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
fsType := bind.fsType
|
||||
if fsType == "" {
|
||||
fsType = "bind"
|
||||
|
@ -539,11 +558,12 @@ func TestMounts(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLinuxIDMapping(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.ClearLinuxUIDMappings()
|
||||
g.ClearLinuxGIDMappings()
|
||||
g.AddLinuxUIDMapping(uint32(unix.Getuid()), 0, 1)
|
||||
|
@ -576,11 +596,12 @@ func TestLinuxIDMapping(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLinuxIDMappingShift(t *testing.T) {
|
||||
t.Parallel()
|
||||
if unix.Getuid() != 0 {
|
||||
t.Skip("tests need to be run as root")
|
||||
}
|
||||
testMinimal(t,
|
||||
func(g *generate.Generator, rootDir, bundleDir string) {
|
||||
func(g *generate.Generator, _, _ string) {
|
||||
g.ClearLinuxUIDMappings()
|
||||
g.ClearLinuxGIDMappings()
|
||||
g.AddLinuxUIDMapping(uint32(unix.Getuid())+1, 0, 1)
|
||||
|
|
|
@ -1,20 +1,15 @@
|
|||
//go:build linux && seccomp
|
||||
// +build linux,seccomp
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/common/pkg/seccomp"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
libseccomp "github.com/seccomp/libseccomp-golang"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const seccompAvailable = true
|
||||
|
||||
// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start.
|
||||
func setSeccomp(spec *specs.Spec) error {
|
||||
logrus.Debugf("setting seccomp configuration")
|
||||
|
@ -179,27 +174,3 @@ func setSeccomp(spec *specs.Spec) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
||||
switch seccompProfilePath {
|
||||
case "unconfined":
|
||||
spec.Linux.Seccomp = nil
|
||||
case "":
|
||||
seccompConfig, err := seccomp.GetDefaultProfile(spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading default seccomp profile failed: %w", err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
default:
|
||||
seccompProfile, err := os.ReadFile(seccompProfilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening seccomp profile failed: %w", err)
|
||||
}
|
||||
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build freebsd && seccomp
|
||||
// +build freebsd,seccomp
|
||||
|
||||
package chroot
|
||||
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
//go:build linux && seccomp
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/common/pkg/seccomp"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const seccompAvailable = true
|
||||
|
||||
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
||||
switch seccompProfilePath {
|
||||
case "unconfined":
|
||||
spec.Linux.Seccomp = nil
|
||||
case "":
|
||||
seccompConfig, err := seccomp.GetDefaultProfile(spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading default seccomp profile failed: %w", err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
default:
|
||||
seccompProfile, err := os.ReadFile(seccompProfilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening seccomp profile failed: %w", err)
|
||||
}
|
||||
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
//go:build (!linux && !freebsd) || !seccomp
|
||||
// +build !linux,!freebsd !seccomp
|
||||
|
||||
package chroot
|
||||
|
||||
|
@ -9,19 +8,9 @@ import (
|
|||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const seccompAvailable = false
|
||||
|
||||
func setSeccomp(spec *specs.Spec) error {
|
||||
if spec.Linux.Seccomp != nil {
|
||||
return errors.New("configured a seccomp filter without seccomp support?")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
||||
if spec.Linux != nil {
|
||||
// runtime-tools may have supplied us with a default filter
|
||||
spec.Linux.Seccomp = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
//go:build (!linux && !freebsd) || !seccomp
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const seccompAvailable = false
|
||||
|
||||
func setupSeccomp(spec *specs.Spec, _ string) error {
|
||||
if spec.Linux != nil {
|
||||
// runtime-tools may have supplied us with a default filter
|
||||
spec.Linux.Seccomp = nil
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package chroot
|
||||
|
||||
|
@ -8,7 +7,6 @@ import (
|
|||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -16,7 +14,7 @@ import (
|
|||
func setSelinuxLabel(spec *specs.Spec) error {
|
||||
logrus.Debugf("setting selinux label")
|
||||
if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
|
||||
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
|
||||
if err := selinux.SetExecLabel(spec.Process.SelinuxLabel); err != nil {
|
||||
return fmt.Errorf("setting process label to %q: %w", spec.Process.SelinuxLabel, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build !linux && !freebsd
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package chroot
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build !linux && !freebsd
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package chroot
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -37,6 +38,10 @@ type addCopyResults struct {
|
|||
certDir string
|
||||
retry int
|
||||
retryDelay string
|
||||
excludes []string
|
||||
parents bool
|
||||
timestamp string
|
||||
link bool
|
||||
}
|
||||
|
||||
func createCommand(addCopy string, desc string, short string, opts *addCopyResults) *cobra.Command {
|
||||
|
@ -48,7 +53,8 @@ func createCommand(addCopy string, desc string, short string, opts *addCopyResul
|
|||
return addAndCopyCmd(cmd, args, strings.ToUpper(addCopy), *opts)
|
||||
},
|
||||
Example: `buildah ` + addCopy + ` containerID '/myapp/app.conf'
|
||||
buildah ` + addCopy + ` containerID '/myapp/app.conf' '/myapp/app.conf'`,
|
||||
buildah ` + addCopy + ` containerID 'app.conf' '/myapp/app.conf'
|
||||
buildah ` + addCopy + ` containerID 'app.conf' 'drop-in.conf' '/myapp/app.conf.d/'`,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
}
|
||||
}
|
||||
|
@ -64,14 +70,12 @@ func applyFlagVars(flags *pflag.FlagSet, opts *addCopyResults) {
|
|||
if err := flags.MarkHidden("blob-cache"); err != nil {
|
||||
panic(fmt.Sprintf("error marking blob-cache as hidden: %v", err))
|
||||
}
|
||||
flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access registries")
|
||||
if err := flags.MarkHidden("cert-dir"); err != nil {
|
||||
panic(fmt.Sprintf("error marking cert-dir as hidden: %v", err))
|
||||
}
|
||||
flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access registries and sources in HTTPS locations")
|
||||
flags.StringVar(&opts.checksum, "checksum", "", "checksum the HTTP source content")
|
||||
flags.StringVar(&opts.chown, "chown", "", "set the user and group ownership of the destination content")
|
||||
flags.StringVar(&opts.chmod, "chmod", "", "set the access permissions of the destination content")
|
||||
flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing registries when pulling images")
|
||||
flags.BoolVar(&opts.link, "link", false, "enable layer caching for this operation (creates an independent layer)")
|
||||
if err := flags.MarkHidden("creds"); err != nil {
|
||||
panic(fmt.Sprintf("error marking creds as hidden: %v", err))
|
||||
}
|
||||
|
@ -80,15 +84,13 @@ func applyFlagVars(flags *pflag.FlagSet, opts *addCopyResults) {
|
|||
if err := flags.MarkHidden("decryption-key"); err != nil {
|
||||
panic(fmt.Sprintf("error marking decryption-key as hidden: %v", err))
|
||||
}
|
||||
flags.StringSliceVar(&opts.excludes, "exclude", nil, "exclude pattern when copying files")
|
||||
flags.StringVar(&opts.ignoreFile, "ignorefile", "", "path to .containerignore file")
|
||||
flags.StringVar(&opts.contextdir, "contextdir", "", "context directory path")
|
||||
flags.IntVar(&opts.retry, "retry", cli.MaxPullPushRetries, "number of times to retry in case of failure when performing pull")
|
||||
flags.StringVar(&opts.retryDelay, "retry-delay", cli.PullPushRetryDelay.String(), "delay between retries in case of pull failures")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output a digest of the newly-added/copied content")
|
||||
flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing registries when pulling images. TLS verification cannot be used when talking to an insecure registry.")
|
||||
if err := flags.MarkHidden("tls-verify"); err != nil {
|
||||
panic(fmt.Sprintf("error marking tls-verify as hidden: %v", err))
|
||||
}
|
||||
flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing registries when pulling images, and when retrieving sources from HTTPS URLs. TLS verification cannot be used when talking to an insecure registry.")
|
||||
flags.BoolVarP(&opts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pulling image")
|
||||
if err := flags.MarkHidden("remove-signatures"); err != nil {
|
||||
panic(fmt.Sprintf("error marking remove-signatures as hidden: %v", err))
|
||||
|
@ -97,6 +99,7 @@ func applyFlagVars(flags *pflag.FlagSet, opts *addCopyResults) {
|
|||
if err := flags.MarkHidden("signature-policy"); err != nil {
|
||||
panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
|
||||
}
|
||||
flags.StringVar(&opts.timestamp, "timestamp", "", "set timestamps on new content to `seconds` after the epoch")
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -119,6 +122,7 @@ func init() {
|
|||
|
||||
copyFlags := copyCommand.Flags()
|
||||
applyFlagVars(copyFlags, ©Opts)
|
||||
copyFlags.BoolVar(©Opts.parents, "parents", false, "preserve leading directories in the paths of items being copied")
|
||||
|
||||
rootCmd.AddCommand(addCommand)
|
||||
rootCmd.AddCommand(copyCommand)
|
||||
|
@ -160,13 +164,14 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyRe
|
|||
return errors.New("--ignorefile option requires that you specify a context dir using --contextdir")
|
||||
}
|
||||
|
||||
systemContext, err := parse.SystemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building system context: %w", err)
|
||||
}
|
||||
|
||||
var preserveOwnership bool
|
||||
if iopts.from != "" {
|
||||
if from, err = openBuilder(getContext(), store, iopts.from); err != nil && errors.Is(err, storage.ErrContainerUnknown) {
|
||||
systemContext, err2 := parse.SystemContextFromOptions(c)
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("building system context: %w", err2)
|
||||
}
|
||||
|
||||
decryptConfig, err2 := cli.DecryptConfig(iopts.decryptionKeys)
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("unable to obtain decrypt config: %w", err2)
|
||||
|
@ -221,6 +226,7 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyRe
|
|||
}
|
||||
}()
|
||||
idMappingOptions = &from.IDMappingOptions
|
||||
preserveOwnership = true
|
||||
contextdir = filepath.Join(fromMountPoint, iopts.contextdir)
|
||||
for i := range args {
|
||||
args[i] = filepath.Join(fromMountPoint, args[i])
|
||||
|
@ -234,12 +240,32 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyRe
|
|||
|
||||
builder.ContentDigester.Restart()
|
||||
|
||||
var timestamp *time.Time
|
||||
if iopts.timestamp != "" {
|
||||
u, err := strconv.ParseInt(iopts.timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing timestamp value %q: %w", iopts.timestamp, err)
|
||||
}
|
||||
t := time.Unix(u, 0).UTC()
|
||||
timestamp = &t
|
||||
}
|
||||
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Chmod: iopts.chmod,
|
||||
Chown: iopts.chown,
|
||||
Checksum: iopts.checksum,
|
||||
ContextDir: contextdir,
|
||||
IDMappingOptions: idMappingOptions,
|
||||
Chmod: iopts.chmod,
|
||||
Chown: iopts.chown,
|
||||
PreserveOwnership: preserveOwnership,
|
||||
Checksum: iopts.checksum,
|
||||
ContextDir: contextdir,
|
||||
Excludes: iopts.excludes,
|
||||
IDMappingOptions: idMappingOptions,
|
||||
// These next two fields are set based on command line flags
|
||||
// with more generic-sounding names.
|
||||
CertPath: systemContext.DockerCertPath,
|
||||
InsecureSkipTLSVerify: systemContext.DockerInsecureSkipTLSVerify,
|
||||
MaxRetries: iopts.retry,
|
||||
Parents: iopts.parents,
|
||||
Timestamp: timestamp,
|
||||
Link: iopts.link,
|
||||
}
|
||||
if iopts.contextdir != "" {
|
||||
var excludes []string
|
||||
|
@ -248,7 +274,14 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyRe
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options.Excludes = excludes
|
||||
options.Excludes = append(excludes, options.Excludes...)
|
||||
}
|
||||
if iopts.retryDelay != "" {
|
||||
retryDelay, err := time.ParseDuration(iopts.retryDelay)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse value provided %q as --retry-delay: %w", iopts.retryDelay, err)
|
||||
}
|
||||
options.RetryDelay = retryDelay
|
||||
}
|
||||
|
||||
extractLocalArchives := verb == "ADD"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/buildah/imagebuildah"
|
||||
|
@ -71,9 +72,9 @@ func init() {
|
|||
|
||||
func buildCmd(c *cobra.Command, inputArgs []string, iopts buildahcli.BuildOptions) error {
|
||||
if c.Flag("logfile").Changed {
|
||||
logfile, err := os.OpenFile(iopts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
|
||||
logfile, err := os.OpenFile(iopts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("opening log file: %w", err)
|
||||
}
|
||||
iopts.Logwriter = logfile
|
||||
defer iopts.Logwriter.Close()
|
||||
|
|
|
@ -5,11 +5,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/internal"
|
||||
"github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/util"
|
||||
|
@ -39,6 +41,8 @@ type commitInputOptions struct {
|
|||
manifest string
|
||||
omitTimestamp bool
|
||||
timestamp int64
|
||||
sourceDateEpoch string
|
||||
rewriteTimestamp bool
|
||||
quiet bool
|
||||
referenceTime string
|
||||
rm bool
|
||||
|
@ -62,6 +66,9 @@ type commitInputOptions struct {
|
|||
encryptLayers []int
|
||||
unsetenvs []string
|
||||
addFile []string
|
||||
unsetAnnotation []string
|
||||
annotation []string
|
||||
createdAnnotation bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -83,7 +90,6 @@ func init() {
|
|||
commitCommand.SetUsageTemplate(UsageTemplate())
|
||||
commitListFlagSet(commitCommand, &opts)
|
||||
rootCmd.AddCommand(commitCommand)
|
||||
|
||||
}
|
||||
|
||||
func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
|
||||
|
@ -118,14 +124,21 @@ func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
|
|||
flags.StringVar(&opts.iidfile, "iidfile", "", "write the image ID to the file")
|
||||
_ = cmd.RegisterFlagCompletionFunc("iidfile", completion.AutocompleteDefault)
|
||||
flags.BoolVar(&opts.omitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
|
||||
flags.Int64Var(&opts.timestamp, "timestamp", 0, "set created timestamp to epoch seconds to allow for deterministic builds, defaults to current time")
|
||||
sourceDateEpochUsageDefault := "current time"
|
||||
if v := os.Getenv(internal.SourceDateEpochName); v != "" {
|
||||
sourceDateEpochUsageDefault = fmt.Sprintf("%q", v)
|
||||
}
|
||||
flags.StringVar(&opts.sourceDateEpoch, "source-date-epoch", os.Getenv(internal.SourceDateEpochName), "set new timestamps in image info to `seconds` after the epoch, defaults to "+sourceDateEpochUsageDefault)
|
||||
_ = cmd.RegisterFlagCompletionFunc("source-date-epoch", completion.AutocompleteNone)
|
||||
flags.BoolVar(&opts.rewriteTimestamp, "rewrite-timestamp", false, "set timestamps in layer to no later than the value for --source-date-epoch")
|
||||
flags.Int64Var(&opts.timestamp, "timestamp", 0, "set new timestamps in image info and layer to `seconds` after the epoch, defaults to current times")
|
||||
_ = cmd.RegisterFlagCompletionFunc("timestamp", completion.AutocompleteNone)
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when writing images")
|
||||
flags.StringVar(&opts.referenceTime, "reference-time", "", "set the timestamp on the image to match the named `file`")
|
||||
_ = cmd.RegisterFlagCompletionFunc("reference-time", completion.AutocompleteNone)
|
||||
|
||||
flags.StringVar(&opts.pull, "pull", "true", "pull SBOM scanner images from the registry if newer or not present in store, if false, only pull SBOM scanner images if not present, if always, pull SBOM scanner images even if the named images are present in store, if never, only use images present in store if available")
|
||||
flags.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
|
||||
flags.Lookup("pull").NoOptDefVal = "true" // allow `--pull ` to be set to `true` as expected.
|
||||
|
||||
flags.BoolVar(&opts.pullAlways, "pull-always", false, "pull the image even if the named image is present in store")
|
||||
if err := flags.MarkHidden("pull-always"); err != nil {
|
||||
|
@ -177,6 +190,11 @@ func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
|
|||
|
||||
flags.StringSliceVar(&opts.unsetenvs, "unsetenv", nil, "unset env from final image")
|
||||
_ = cmd.RegisterFlagCompletionFunc("unsetenv", completion.AutocompleteNone)
|
||||
flags.StringSliceVar(&opts.unsetAnnotation, "unsetannotation", nil, "unset annotation when inheriting annotations from base image")
|
||||
_ = cmd.RegisterFlagCompletionFunc("unsetannotation", completion.AutocompleteNone)
|
||||
flags.StringArrayVar(&opts.annotation, "annotation", []string{}, "set metadata for an image (default [])")
|
||||
_ = cmd.RegisterFlagCompletionFunc("annotation", completion.AutocompleteNone)
|
||||
flags.BoolVar(&opts.createdAnnotation, "created-annotation", true, `set an "org.opencontainers.image.created" annotation in the image`)
|
||||
}
|
||||
|
||||
func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error {
|
||||
|
@ -245,11 +263,6 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
|
|||
}
|
||||
}
|
||||
|
||||
// Add builder identity information.
|
||||
if iopts.identityLabel {
|
||||
builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
|
||||
}
|
||||
|
||||
encConfig, encLayers, err := cli.EncryptConfig(iopts.encryptionKeys, iopts.encryptLayers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to obtain encryption config: %w", err)
|
||||
|
@ -306,6 +319,9 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
|
|||
OverrideChanges: iopts.changes,
|
||||
OverrideConfig: overrideConfig,
|
||||
ExtraImageContent: addFiles,
|
||||
UnsetAnnotations: iopts.unsetAnnotation,
|
||||
Annotations: iopts.annotation,
|
||||
CreatedAnnotation: types.NewOptionalBool(iopts.createdAnnotation),
|
||||
}
|
||||
exclusiveFlags := 0
|
||||
if c.Flag("reference-time").Changed {
|
||||
|
@ -318,6 +334,16 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
|
|||
timestamp := finfo.ModTime().UTC()
|
||||
options.HistoryTimestamp = ×tamp
|
||||
}
|
||||
if iopts.sourceDateEpoch != "" {
|
||||
exclusiveFlags++
|
||||
sourceDateEpochVal, err := strconv.ParseInt(iopts.sourceDateEpoch, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing source date epoch %q: %w", iopts.sourceDateEpoch, err)
|
||||
}
|
||||
sourceDateEpoch := time.Unix(sourceDateEpochVal, 0).UTC()
|
||||
options.SourceDateEpoch = &sourceDateEpoch
|
||||
}
|
||||
options.RewriteTimestamp = iopts.rewriteTimestamp
|
||||
if c.Flag("timestamp").Changed {
|
||||
exclusiveFlags++
|
||||
timestamp := time.Unix(iopts.timestamp, 0).UTC()
|
||||
|
@ -328,6 +354,25 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
|
|||
timestamp := time.Unix(0, 0).UTC()
|
||||
options.HistoryTimestamp = ×tamp
|
||||
}
|
||||
if exclusiveFlags > 1 {
|
||||
return errors.New("cannot use more then one timestamp option at at time")
|
||||
}
|
||||
|
||||
// Add builder identity information.
|
||||
var identityLabel types.OptionalBool
|
||||
if c.Flag("identity-label").Changed {
|
||||
identityLabel = types.NewOptionalBool(iopts.identityLabel)
|
||||
}
|
||||
switch identityLabel {
|
||||
case types.OptionalBoolTrue:
|
||||
builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
|
||||
case types.OptionalBoolFalse:
|
||||
// nothing - don't clear it if there's a value set in the base image
|
||||
default:
|
||||
if options.HistoryTimestamp == nil && options.SourceDateEpoch == nil {
|
||||
builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
|
||||
}
|
||||
}
|
||||
|
||||
if iopts.cwOptions != "" {
|
||||
confidentialWorkloadOptions, err := parse.GetConfidentialWorkloadOptions(iopts.cwOptions)
|
||||
|
@ -353,10 +398,6 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
|
|||
options.SBOMScanOptions = sbomOptions
|
||||
}
|
||||
|
||||
if exclusiveFlags > 1 {
|
||||
return errors.New("can not use more then one timestamp option at at time")
|
||||
}
|
||||
|
||||
if !iopts.quiet {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
|
|
@ -5,12 +5,9 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/common/pkg/umask"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
is "github.com/containers/image/v5/storage"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
|
@ -20,10 +17,8 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
// configuration, including customizations made in containers.conf
|
||||
needToShutdownStore = false
|
||||
)
|
||||
// configuration, including customizations made in containers.conf
|
||||
var needToShutdownStore = false
|
||||
|
||||
func getStore(c *cobra.Command) (storage.Store, error) {
|
||||
if err := setXDGRuntimeDir(); err != nil {
|
||||
|
@ -156,46 +151,6 @@ func openImage(ctx context.Context, sc *types.SystemContext, store storage.Store
|
|||
return builder, nil
|
||||
}
|
||||
|
||||
func getDateAndDigestAndSize(ctx context.Context, sys *types.SystemContext, store storage.Store, storeImage storage.Image) (time.Time, string, int64, error) {
|
||||
created := time.Time{}
|
||||
is.Transport.SetStore(store)
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, storeImage.ID)
|
||||
if err != nil {
|
||||
return created, "", -1, err
|
||||
}
|
||||
img, err := storeRef.NewImageSource(ctx, nil)
|
||||
if err != nil {
|
||||
return created, "", -1, err
|
||||
}
|
||||
defer img.Close()
|
||||
imgSize, sizeErr := store.ImageSize(storeImage.ID)
|
||||
if sizeErr != nil {
|
||||
imgSize = -1
|
||||
}
|
||||
manifestBytes, _, manifestErr := img.GetManifest(ctx, nil)
|
||||
manifestDigest := ""
|
||||
if manifestErr == nil && len(manifestBytes) > 0 {
|
||||
mDigest, err := manifest.Digest(manifestBytes)
|
||||
manifestErr = err
|
||||
if manifestErr == nil {
|
||||
manifestDigest = mDigest.String()
|
||||
}
|
||||
}
|
||||
inspectable, inspectableErr := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(img, nil))
|
||||
if inspectableErr == nil {
|
||||
inspectInfo, inspectErr := inspectable.Inspect(ctx)
|
||||
if inspectErr == nil && inspectInfo != nil && inspectInfo.Created != nil {
|
||||
created = *inspectInfo.Created
|
||||
}
|
||||
}
|
||||
if sizeErr != nil {
|
||||
err = sizeErr
|
||||
} else if manifestErr != nil {
|
||||
err = manifestErr
|
||||
}
|
||||
return created, manifestDigest, imgSize, err
|
||||
}
|
||||
|
||||
// getContext returns a context.TODO
|
||||
func getContext() context.Context {
|
||||
return context.TODO()
|
||||
|
|
|
@ -7,8 +7,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
is "github.com/containers/image/v5/storage"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -18,7 +16,10 @@ import (
|
|||
var (
|
||||
signaturePolicyPath = ""
|
||||
storeOptions, _ = storage.DefaultStoreOptions()
|
||||
testSystemContext = types.SystemContext{}
|
||||
testSystemContext = types.SystemContext{
|
||||
SignaturePolicyPath: "../../tests/policy.json",
|
||||
SystemRegistriesConfPath: "../../tests/registries.conf",
|
||||
}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -49,7 +50,7 @@ func TestGetStore(t *testing.T) {
|
|||
failTestIfNotRoot(t)
|
||||
testCmd := &cobra.Command{
|
||||
Use: "test",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
_, err := getStore(cmd)
|
||||
return err
|
||||
},
|
||||
|
@ -74,31 +75,6 @@ func TestGetStore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetSize(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
pullTestImage(t)
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, err = getDateAndDigestAndSize(getContext(), &testSystemContext, store, images[0])
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func failTestIfNotRoot(t *testing.T) {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
|
@ -107,30 +83,3 @@ func failTestIfNotRoot(t *testing.T) {
|
|||
t.Fatal("tests will fail unless run as root")
|
||||
}
|
||||
}
|
||||
|
||||
func pullTestImage(t *testing.T) string {
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
commonOpts := &define.CommonBuildOptions{
|
||||
LabelOpts: nil,
|
||||
}
|
||||
options := buildah.BuilderOptions{
|
||||
FromImage: "busybox:latest",
|
||||
SignaturePolicyPath: signaturePolicyPath,
|
||||
CommonBuildOpts: commonOpts,
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
|
||||
b, err := buildah.NewBuilder(getContext(), store, options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := b.FromImageID
|
||||
err = b.Delete()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
|
|
@ -18,36 +18,38 @@ import (
|
|||
)
|
||||
|
||||
type configResults struct {
|
||||
addHistory bool
|
||||
annotation []string
|
||||
arch string
|
||||
author string
|
||||
cmd string
|
||||
comment string
|
||||
createdBy string
|
||||
domainName string
|
||||
entrypoint string
|
||||
env []string
|
||||
healthcheck string
|
||||
healthcheckInterval string
|
||||
healthcheckRetries int
|
||||
healthcheckStartPeriod string
|
||||
healthcheckTimeout string
|
||||
historyComment string
|
||||
hostname string
|
||||
label []string
|
||||
onbuild []string
|
||||
os string
|
||||
osfeature []string
|
||||
osversion string
|
||||
ports []string
|
||||
shell string
|
||||
stopSignal string
|
||||
user string
|
||||
variant string
|
||||
volume []string
|
||||
workingDir string
|
||||
unsetLabels []string
|
||||
addHistory bool
|
||||
annotation []string
|
||||
arch string
|
||||
author string
|
||||
cmd string
|
||||
comment string
|
||||
createdBy string
|
||||
domainName string
|
||||
entrypoint string
|
||||
env []string
|
||||
healthcheck string
|
||||
healthcheckInterval string
|
||||
healthcheckRetries int
|
||||
healthcheckStartPeriod string
|
||||
healthcheckStartInterval string
|
||||
healthcheckTimeout string
|
||||
historyComment string
|
||||
hostname string
|
||||
label []string
|
||||
onbuild []string
|
||||
os string
|
||||
osfeature []string
|
||||
osversion string
|
||||
ports []string
|
||||
shell string
|
||||
stopSignal string
|
||||
user string
|
||||
variant string
|
||||
volume []string
|
||||
workingDir string
|
||||
unsetLabels []string
|
||||
unsetAnnotations []string
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -84,6 +86,7 @@ func init() {
|
|||
flags.StringVar(&opts.healthcheckInterval, "healthcheck-interval", "", "set the `interval` between runs of the `healthcheck` command for the target image")
|
||||
flags.IntVar(&opts.healthcheckRetries, "healthcheck-retries", 0, "set the `number` of times the `healthcheck` command has to fail")
|
||||
flags.StringVar(&opts.healthcheckStartPeriod, "healthcheck-start-period", "", "set the amount of `time` to wait after starting a container before a failed `healthcheck` command will count as a failure")
|
||||
flags.StringVar(&opts.healthcheckStartInterval, "healthcheck-start-interval", "", "set the time between health checks during the start period. Only available with format `docker`")
|
||||
flags.StringVar(&opts.healthcheckTimeout, "healthcheck-timeout", "", "set the maximum amount of `time` to wait for a `healthcheck` command for the target image")
|
||||
flags.StringVar(&opts.historyComment, "history-comment", "", "set a `comment` for the history of the target image")
|
||||
flags.StringVar(&opts.hostname, "hostname", "", "set a host`name` for containers based on image")
|
||||
|
@ -100,9 +103,9 @@ func init() {
|
|||
flags.StringSliceVarP(&opts.volume, "volume", "v", []string{}, "add default `volume` path to be created for containers based on image (default [])")
|
||||
flags.StringVar(&opts.workingDir, "workingdir", "", "set working `directory` for containers based on image")
|
||||
flags.StringSliceVar(&opts.unsetLabels, "unsetlabel", nil, "remove image configuration label")
|
||||
flags.StringSliceVar(&opts.unsetAnnotations, "unsetannotation", nil, "remove image configuration annotation")
|
||||
|
||||
rootCmd.AddCommand(configCommand)
|
||||
|
||||
}
|
||||
|
||||
func updateCmd(builder *buildah.Builder, cmd string) error {
|
||||
|
@ -153,7 +156,7 @@ func updateEntrypoint(builder *buildah.Builder, entrypoint string) {
|
|||
builder.SetEntrypoint(entrypointSpec)
|
||||
}
|
||||
|
||||
func conditionallyAddHistory(builder *buildah.Builder, c *cobra.Command, createdByFmt string, args ...interface{}) {
|
||||
func conditionallyAddHistory(builder *buildah.Builder, c *cobra.Command, createdByFmt string, args ...any) {
|
||||
history := buildahcli.DefaultHistory()
|
||||
if c.Flag("add-history").Changed {
|
||||
history, _ = c.Flags().GetBool("add-history")
|
||||
|
@ -308,6 +311,10 @@ func updateConfig(builder *buildah.Builder, c *cobra.Command, iopts configResult
|
|||
for _, key := range iopts.unsetLabels {
|
||||
builder.UnsetLabel(key)
|
||||
}
|
||||
// unset annotation if any
|
||||
for _, key := range iopts.unsetAnnotations {
|
||||
builder.UnsetAnnotation(key)
|
||||
}
|
||||
if c.Flag("workingdir").Changed {
|
||||
builder.SetWorkDir(iopts.workingDir)
|
||||
conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) WORKDIR %s", iopts.workingDir)
|
||||
|
@ -384,8 +391,7 @@ func updateHealthcheck(builder *buildah.Builder, c *cobra.Command, iopts configR
|
|||
if c.Flag("healthcheck-retries").Changed {
|
||||
healthcheck.Retries = iopts.healthcheckRetries
|
||||
args = args + "--retries=" + strconv.Itoa(iopts.healthcheckRetries) + " "
|
||||
//args = fmt.Sprintf("%s --retries=%d ", args, iopts.healthcheckRetries)
|
||||
|
||||
// args = fmt.Sprintf("%s --retries=%d ", args, iopts.healthcheckRetries)
|
||||
}
|
||||
if c.Flag("healthcheck-start-period").Changed {
|
||||
duration, err := time.ParseDuration(iopts.healthcheckStartPeriod)
|
||||
|
@ -395,6 +401,14 @@ func updateHealthcheck(builder *buildah.Builder, c *cobra.Command, iopts configR
|
|||
healthcheck.StartPeriod = duration
|
||||
args = args + "--start-period=" + iopts.healthcheckStartPeriod + " "
|
||||
}
|
||||
if c.Flag("healthcheck-start-interval").Changed {
|
||||
duration, err := time.ParseDuration(iopts.healthcheckStartInterval)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing --healthcheck-start-interval %q: %w", iopts.healthcheckStartInterval, err)
|
||||
}
|
||||
healthcheck.StartInterval = duration
|
||||
args = args + "--start-interval=" + iopts.healthcheckStartInterval + " "
|
||||
}
|
||||
if c.Flag("healthcheck-timeout").Changed {
|
||||
duration, err := time.ParseDuration(iopts.healthcheckTimeout)
|
||||
if err != nil {
|
||||
|
|
|
@ -4,15 +4,12 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/pkg/formats"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/common/pkg/formats"
|
||||
"github.com/containers/storage"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -76,7 +73,7 @@ func init() {
|
|||
Aliases: []string{"list", "ls", "ps"},
|
||||
Short: "List working containers and their base images",
|
||||
Long: containersDescription,
|
||||
//Flags: sortFlags(containersFlags),
|
||||
// Flags: sortFlags(containersFlags),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return containersCmd(cmd, args, opts)
|
||||
},
|
||||
|
@ -168,11 +165,13 @@ func outputContainers(store storage.Store, opts containerOptions, params *contai
|
|||
continue
|
||||
}
|
||||
if opts.json {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: builder.ContainerID,
|
||||
JSONContainers = append(JSONContainers, jsonContainer{
|
||||
ID: builder.ContainerID,
|
||||
Builder: true,
|
||||
ImageID: builder.FromImageID,
|
||||
ImageName: image,
|
||||
ContainerName: builder.Container})
|
||||
ContainerName: builder.Container,
|
||||
})
|
||||
continue
|
||||
}
|
||||
output := containerOutputParams{
|
||||
|
@ -208,11 +207,13 @@ func outputContainers(store storage.Store, opts containerOptions, params *contai
|
|||
continue
|
||||
}
|
||||
if opts.json {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: container.ID,
|
||||
JSONContainers = append(JSONContainers, jsonContainer{
|
||||
ID: container.ID,
|
||||
Builder: ours,
|
||||
ImageID: container.ImageID,
|
||||
ImageName: imageNameForID(container.ImageID),
|
||||
ContainerName: name})
|
||||
ContainerName: name,
|
||||
})
|
||||
continue
|
||||
}
|
||||
output := containerOutputParams{
|
||||
|
@ -249,35 +250,15 @@ func outputContainers(store storage.Store, opts containerOptions, params *contai
|
|||
return nil
|
||||
}
|
||||
|
||||
func containersToGeneric(templParams []containerOutputParams) (genericParams []interface{}) {
|
||||
func containersToGeneric(templParams []containerOutputParams) (genericParams []any) {
|
||||
if len(templParams) > 0 {
|
||||
for _, v := range templParams {
|
||||
genericParams = append(genericParams, interface{}(v))
|
||||
genericParams = append(genericParams, any(v))
|
||||
}
|
||||
}
|
||||
return genericParams
|
||||
}
|
||||
|
||||
func containerOutputUsingTemplate(format string, params containerOutputParams) error {
|
||||
if matched, err := regexp.MatchString("{{.*}}", format); err != nil {
|
||||
return fmt.Errorf("validating format provided: %s: %w", format, err)
|
||||
} else if !matched {
|
||||
return fmt.Errorf("invalid format provided: %s", format)
|
||||
}
|
||||
|
||||
tmpl, err := template.New("container").Parse(format)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Template parsing error: %w", err)
|
||||
}
|
||||
|
||||
err = tmpl.Execute(os.Stdout, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerOutputUsingFormatString(truncate bool, params containerOutputParams) {
|
||||
if truncate {
|
||||
fmt.Printf("%-12.12s %-8s %-12.12s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, util.TruncateString(params.ImageName, 32), params.ContainerName)
|
||||
|
|
|
@ -5,66 +5,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestContainerTemplateOutputValidFormat(t *testing.T) {
|
||||
params := containerOutputParams{
|
||||
ContainerID: "e477836657bb",
|
||||
Builder: " ",
|
||||
ImageID: "f975c5035748",
|
||||
ImageName: "test/image:latest",
|
||||
ContainerName: "test-container",
|
||||
}
|
||||
|
||||
formatString := "Container ID: {{.ContainerID}}"
|
||||
expectedString := "Container ID: " + params.ContainerID
|
||||
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return containerOutputUsingTemplate(formatString, params)
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if strings.TrimSpace(output) != expectedString {
|
||||
t.Errorf("Errorf with template output:\nExpected: %s\nReceived: %s\n", expectedString, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerTemplateOutputInvalidFormat(t *testing.T) {
|
||||
params := containerOutputParams{
|
||||
ContainerID: "e477836657bb",
|
||||
Builder: " ",
|
||||
ImageID: "f975c5035748",
|
||||
ImageName: "test/image:latest",
|
||||
ContainerName: "test-container",
|
||||
}
|
||||
|
||||
formatString := "ContainerID"
|
||||
|
||||
err := containerOutputUsingTemplate(formatString, params)
|
||||
if err == nil || err.Error() != "invalid format provided: ContainerID" {
|
||||
t.Fatalf("expected error invalid format")
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerTemplateOutputNonexistentField(t *testing.T) {
|
||||
params := containerOutputParams{
|
||||
ContainerID: "e477836657bb",
|
||||
Builder: " ",
|
||||
ImageID: "f975c5035748",
|
||||
ImageName: "test/image:latest",
|
||||
ContainerName: "test-container",
|
||||
}
|
||||
|
||||
formatString := "{{.ID}}"
|
||||
|
||||
err := containerOutputUsingTemplate(formatString, params)
|
||||
if err == nil || !strings.Contains(err.Error(), "can't evaluate field ID") {
|
||||
t.Fatalf("expected error nonexistent field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerFormatStringOutput(t *testing.T) {
|
||||
params := containerOutputParams{
|
||||
ContainerID: "e477836657bb",
|
||||
|
@ -110,25 +53,6 @@ func TestContainerHeaderOutput(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func captureOutputWithError(f func() error) (string, error) {
|
||||
old := os.Stdout
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
os.Stdout = w
|
||||
|
||||
if err := f(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
w.Close()
|
||||
os.Stdout = old
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r) //nolint
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
// Captures output so that it can be compared to expected values
|
||||
func captureOutput(f func()) string {
|
||||
old := os.Stdout
|
||||
|
|
|
@ -21,8 +21,8 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func dumpBoltCmd(c *cobra.Command, args []string) error {
|
||||
db, err := bolt.Open(args[0], 0600, &bolt.Options{ReadOnly: true})
|
||||
func dumpBoltCmd(_ *cobra.Command, args []string) error {
|
||||
db, err := bolt.Open(args[0], 0o600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening database %q: %w", args[0], err)
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/common/pkg/auth"
|
||||
|
@ -70,8 +69,13 @@ func init() {
|
|||
flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
|
||||
flags.StringVarP(&opts.format, "format", "f", defaultFormat(), "`format` of the image manifest and metadata")
|
||||
flags.StringVar(&opts.name, "name", "", "`name` for the working container")
|
||||
flags.StringVar(&opts.pull, "pull", "true", "pull images from the registry if newer or not present in store, if false, only pull images if not present, if always, pull images even if the named images are present in store, if never, only use images present in store if available")
|
||||
flags.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
|
||||
flags.StringVar(&opts.pull, "pull", "missing", `pull images from the registry values:
|
||||
always: pull images even if the named images are present in store,
|
||||
missing: pull images if the named images are not present in store,
|
||||
never: only use images present in store if available,
|
||||
newer: only pull images when newer images exist on the registry than those in the store.`)
|
||||
|
||||
flags.Lookup("pull").NoOptDefVal = "true" // allow `--pull ` to be set to `true` as expected.
|
||||
|
||||
flags.BoolVar(&opts.pullAlways, "pull-always", false, "pull the image even if the named image is present in store")
|
||||
if err := flags.MarkHidden("pull-always"); err != nil {
|
||||
|
@ -248,14 +252,6 @@ func fromCmd(c *cobra.Command, args []string, iopts fromReply) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
devices := define.ContainerDevices{}
|
||||
for _, device := range append(defaultContainerConfig.Containers.Devices.Get(), iopts.Devices...) {
|
||||
dev, err := parse.DeviceFromPath(device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
devices = append(devices, dev...)
|
||||
}
|
||||
|
||||
capabilities, err := defaultContainerConfig.Capabilities("", iopts.CapAdd, iopts.CapDrop)
|
||||
if err != nil {
|
||||
|
@ -288,9 +284,10 @@ func fromCmd(c *cobra.Command, args []string, iopts fromReply) error {
|
|||
CommonBuildOpts: commonOpts,
|
||||
Format: format,
|
||||
BlobDirectory: iopts.BlobCache,
|
||||
Devices: devices,
|
||||
DeviceSpecs: iopts.Devices,
|
||||
MaxPullRetries: iopts.Retry,
|
||||
OciDecryptConfig: decConfig,
|
||||
CDIConfigDir: iopts.CDIConfigDir,
|
||||
}
|
||||
|
||||
if iopts.RetryDelay != "" {
|
||||
|
@ -315,7 +312,7 @@ func fromCmd(c *cobra.Command, args []string, iopts fromReply) error {
|
|||
|
||||
if iopts.cidfile != "" {
|
||||
filePath := iopts.cidfile
|
||||
if err := os.WriteFile(filePath, []byte(builder.ContainerID), 0644); err != nil {
|
||||
if err := os.WriteFile(filePath, []byte(builder.ContainerID), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write container ID file %q: %w", filePath, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,9 +10,9 @@ import (
|
|||
"time"
|
||||
|
||||
buildahcli "github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/buildah/pkg/formats"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/common/libimage"
|
||||
"github.com/containers/common/pkg/formats"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -142,11 +142,29 @@ func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
|
|||
options.Filters = append(options.Filters, "intermediate=false")
|
||||
}
|
||||
|
||||
images, err := runtime.ListImages(ctx, args, options)
|
||||
images, err := runtime.ListImages(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
imagesMatchName, err := runtime.ListImagesByNames(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
imagesIDs := map[string]struct{}{}
|
||||
for _, image := range imagesMatchName {
|
||||
imagesIDs[image.ID()] = struct{}{}
|
||||
}
|
||||
var imagesMatchNameAndFilter []*libimage.Image
|
||||
for _, image := range images {
|
||||
if _, ok := imagesIDs[image.ID()]; ok {
|
||||
imagesMatchNameAndFilter = append(imagesMatchNameAndFilter, image)
|
||||
}
|
||||
}
|
||||
images = imagesMatchNameAndFilter
|
||||
}
|
||||
|
||||
if iopts.quiet && iopts.format != "" {
|
||||
return errors.New("quiet and format are mutually exclusive")
|
||||
}
|
||||
|
@ -171,7 +189,7 @@ func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
|
|||
|
||||
func outputHeader(opts imageOptions) string {
|
||||
if opts.format != "" {
|
||||
return strings.Replace(opts.format, `\t`, "\t", -1)
|
||||
return strings.ReplaceAll(opts.format, `\t`, "\t")
|
||||
}
|
||||
if opts.quiet {
|
||||
return formats.IDString
|
||||
|
@ -309,10 +327,10 @@ func truncateID(id string, truncate bool) string {
|
|||
return id
|
||||
}
|
||||
|
||||
func imagesToGeneric(templParams []imageOutputParams) (genericParams []interface{}) {
|
||||
func imagesToGeneric(templParams []imageOutputParams) (genericParams []any) {
|
||||
if len(templParams) > 0 {
|
||||
for _, v := range templParams {
|
||||
genericParams = append(genericParams, interface{}(v))
|
||||
genericParams = append(genericParams, any(v))
|
||||
}
|
||||
}
|
||||
return genericParams
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
)
|
||||
|
||||
func TestSizeFormatting(t *testing.T) {
|
||||
t.Parallel()
|
||||
size := formattedSize(0)
|
||||
if size != "0 B" {
|
||||
t.Errorf("Error formatting size: expected '%s' got '%s'", "0 B", size)
|
||||
|
@ -22,6 +23,7 @@ func TestSizeFormatting(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMatchWithTag(t *testing.T) {
|
||||
t.Parallel()
|
||||
isMatch := matchesReference("gcr.io/pause:latest", "pause:latest")
|
||||
if !isMatch {
|
||||
t.Error("expected match, got not match")
|
||||
|
@ -34,6 +36,7 @@ func TestMatchWithTag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoMatchesReferenceWithTag(t *testing.T) {
|
||||
t.Parallel()
|
||||
isMatch := matchesReference("gcr.io/pause:latest", "redis:latest")
|
||||
if isMatch {
|
||||
t.Error("expected no match, got match")
|
||||
|
@ -46,6 +49,7 @@ func TestNoMatchesReferenceWithTag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMatchesReferenceWithoutTag(t *testing.T) {
|
||||
t.Parallel()
|
||||
isMatch := matchesReference("gcr.io/pause:latest", "pause")
|
||||
if !isMatch {
|
||||
t.Error("expected match, got not match")
|
||||
|
@ -58,6 +62,7 @@ func TestMatchesReferenceWithoutTag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoMatchesReferenceWithoutTag(t *testing.T) {
|
||||
t.Parallel()
|
||||
isMatch := matchesReference("gcr.io/pause:latest", "redis")
|
||||
if isMatch {
|
||||
t.Error("expected no match, got match")
|
||||
|
|
|
@ -6,10 +6,10 @@ import (
|
|||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/common/pkg/formats"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
@ -28,7 +28,7 @@ func init() {
|
|||
Use: "info",
|
||||
Short: "Display Buildah system information",
|
||||
Long: infoDescription,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return infoCmd(cmd, opts)
|
||||
},
|
||||
Args: cobra.NoArgs,
|
||||
|
@ -43,7 +43,7 @@ func init() {
|
|||
}
|
||||
|
||||
func infoCmd(c *cobra.Command, iopts infoResults) error {
|
||||
info := map[string]interface{}{}
|
||||
info := map[string]any{}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
|
@ -71,9 +71,9 @@ func infoCmd(c *cobra.Command, iopts infoResults) error {
|
|||
} else if !matched {
|
||||
return fmt.Errorf("invalid format provided: %s", format)
|
||||
}
|
||||
t, err := template.New("format").Parse(format)
|
||||
t, err := formats.NewParse("info", format)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Template parsing error: %w", err)
|
||||
return fmt.Errorf("template parsing error: %w", err)
|
||||
}
|
||||
if err = t.Execute(os.Stdout, info); err != nil {
|
||||
return err
|
||||
|
@ -92,8 +92,8 @@ func infoCmd(c *cobra.Command, iopts infoResults) error {
|
|||
}
|
||||
|
||||
// top-level "debug" info
|
||||
func debugInfo() map[string]interface{} {
|
||||
info := map[string]interface{}{}
|
||||
func debugInfo() map[string]any {
|
||||
info := map[string]any{}
|
||||
info["compiler"] = runtime.Compiler
|
||||
info["go version"] = runtime.Version()
|
||||
info["buildah version"] = define.Version
|
||||
|
|
|
@ -6,11 +6,11 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
buildahcli "github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/common/pkg/formats"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
@ -113,9 +113,9 @@ func inspectCmd(c *cobra.Command, args []string, iopts inspectResults) error {
|
|||
} else if !matched {
|
||||
return fmt.Errorf("invalid format provided: %s", format)
|
||||
}
|
||||
t, err := template.New("format").Parse(format)
|
||||
t, err := formats.NewParse("inspect", format)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Template parsing error: %w", err)
|
||||
return fmt.Errorf("template parsing error: %w", err)
|
||||
}
|
||||
if err = t.Execute(os.Stdout, out); err != nil {
|
||||
return err
|
||||
|
|
|
@ -45,13 +45,13 @@ type globalFlags struct {
|
|||
var rootCmd = &cobra.Command{
|
||||
Use: "buildah",
|
||||
Long: "A tool that facilitates building OCI images",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return before(cmd)
|
||||
},
|
||||
PersistentPostRunE: func(cmd *cobra.Command, args []string) error {
|
||||
PersistentPostRunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return after(cmd)
|
||||
},
|
||||
SilenceUsage: true,
|
||||
|
@ -65,14 +65,11 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
var (
|
||||
defaultStoreDriverOptions []string
|
||||
)
|
||||
var defaultStoreDriverOptions []string
|
||||
storageOptions, err := storage.DefaultStoreOptions()
|
||||
if err != nil {
|
||||
logrus.Errorf(err.Error())
|
||||
logrus.Error(err.Error())
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
|
||||
if len(storageOptions.GraphDriverOptions) > 0 {
|
||||
|
@ -82,15 +79,15 @@ func init() {
|
|||
|
||||
defaultContainerConfig, err = config.Default()
|
||||
if err != nil {
|
||||
logrus.Errorf(err.Error())
|
||||
logrus.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
defaultContainerConfig.CheckCgroupsAndAdjustConfig()
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
// Disable the implicit `completion` command in cobra.
|
||||
rootCmd.CompletionOptions.DisableDefaultCmd = true
|
||||
//rootCmd.TraverseChildren = true
|
||||
// Hide the implicit `completion` command in cobra.
|
||||
rootCmd.CompletionOptions.HiddenDefaultCmd = true
|
||||
// rootCmd.TraverseChildren = true
|
||||
rootCmd.Version = fmt.Sprintf("%s (image-spec %s, runtime-spec %s)", define.Version, ispecs.Version, rspecs.Version)
|
||||
rootCmd.PersistentFlags().BoolVar(&globalFlagResults.Debug, "debug", false, "print debugging information")
|
||||
// TODO Need to allow for environment variable
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/containers/common/libimage/manifests"
|
||||
"github.com/containers/common/pkg/auth"
|
||||
cp "github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
"github.com/hashicorp/go-multierror"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -39,7 +39,7 @@ type manifestCreateOpts struct {
|
|||
|
||||
type manifestAddOpts struct {
|
||||
authfile, certDir, creds, os, arch, variant, osVersion string
|
||||
features, osFeatures, annotations []string
|
||||
features, osFeatures, annotations, artifactAnnotations []string
|
||||
tlsVerify, insecure, all bool
|
||||
artifact, artifactExcludeTitles bool
|
||||
artifactType, artifactLayerType string
|
||||
|
@ -147,8 +147,9 @@ func init() {
|
|||
flags.StringVar(&manifestAddOpts.artifactConfigType, "artifact-config-type", imgspecv1.DescriptorEmptyJSON.MediaType, "artifact config media type")
|
||||
flags.StringVar(&manifestAddOpts.artifactConfigFile, "artifact-config", "", "artifact config file")
|
||||
flags.StringVar(&manifestAddOpts.artifactLayerType, "artifact-layer-type", "", "artifact layer media type")
|
||||
flags.BoolVar(&manifestAddOpts.artifactExcludeTitles, "artifact-exclude-titles", false, fmt.Sprintf(`refrain from setting %q annotations on "layers"`, v1.AnnotationTitle))
|
||||
flags.BoolVar(&manifestAddOpts.artifactExcludeTitles, "artifact-exclude-titles", false, fmt.Sprintf(`refrain from setting %q annotations on "layers"`, imgspecv1.AnnotationTitle))
|
||||
flags.StringVar(&manifestAddOpts.artifactSubject, "artifact-subject", "", "artifact subject reference")
|
||||
flags.StringSliceVar(&manifestAddOpts.artifactAnnotations, "artifact-annotation", nil, "artifact annotation")
|
||||
flags.StringVar(&manifestAddOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
|
||||
flags.StringVar(&manifestAddOpts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
|
||||
flags.StringVar(&manifestAddOpts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
|
||||
|
@ -246,7 +247,7 @@ func init() {
|
|||
manifestPushCommand.SetUsageTemplate(UsageTemplate())
|
||||
flags = manifestPushCommand.Flags()
|
||||
flags.BoolVar(&manifestPushOpts.rm, "rm", false, "remove the manifest list if push succeeds")
|
||||
flags.BoolVar(&manifestPushOpts.all, "all", false, "also push the images in the list")
|
||||
flags.BoolVar(&manifestPushOpts.all, "all", true, "also push the images in the list")
|
||||
flags.StringVar(&manifestPushOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
|
||||
flags.StringVar(&manifestPushOpts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
|
||||
flags.StringVar(&manifestPushOpts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
|
||||
|
@ -289,7 +290,7 @@ func init() {
|
|||
|
||||
func manifestExistsCmd(c *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("At least a name must be specified for the list")
|
||||
return errors.New("at least a name must be specified for the list")
|
||||
}
|
||||
name := args[0]
|
||||
|
||||
|
@ -320,7 +321,7 @@ func manifestExistsCmd(c *cobra.Command, args []string) error {
|
|||
|
||||
func manifestCreateCmd(c *cobra.Command, args []string, opts manifestCreateOpts) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("At least a name must be specified for the list")
|
||||
return errors.New("at least a name must be specified for the list")
|
||||
}
|
||||
listImageSpec := args[0]
|
||||
imageSpecs := args[1:]
|
||||
|
@ -431,26 +432,23 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
|
|||
artifactSpec := []string{}
|
||||
switch len(args) {
|
||||
case 0, 1:
|
||||
return errors.New("At least a list image and an image or artifact to add must be specified")
|
||||
case 2:
|
||||
return errors.New("at least a list image and an image or artifact to add must be specified")
|
||||
default:
|
||||
listImageSpec = args[0]
|
||||
if listImageSpec == "" {
|
||||
return fmt.Errorf(`Invalid image name "%s"`, args[0])
|
||||
return fmt.Errorf("invalid image name %q", args[0])
|
||||
}
|
||||
if opts.artifact {
|
||||
artifactSpec = args[1:]
|
||||
} else {
|
||||
if len(args) > 2 {
|
||||
return errors.New("too many arguments: expected list and image add to list")
|
||||
}
|
||||
imageSpec = args[1]
|
||||
if imageSpec == "" {
|
||||
return fmt.Errorf(`Invalid image name "%s"`, args[1])
|
||||
return fmt.Errorf("invalid image name %q", args[1])
|
||||
}
|
||||
}
|
||||
default:
|
||||
if opts.artifact {
|
||||
artifactSpec = args[1:]
|
||||
} else {
|
||||
return errors.New("Too many arguments: expected list and image add to list")
|
||||
}
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
|
@ -526,6 +524,13 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
|
|||
options.ConfigDescriptor.Size = -1
|
||||
options.ConfigFile = opts.artifactConfigFile
|
||||
}
|
||||
if len(opts.artifactAnnotations) > 0 {
|
||||
options.Annotations = make(map[string]string, len(opts.artifactAnnotations))
|
||||
for _, annotation := range opts.artifactAnnotations {
|
||||
k, v, _ := strings.Cut(annotation, "=")
|
||||
options.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
options.ExcludeTitles = opts.artifactExcludeTitles
|
||||
instanceDigest, err = list.AddArtifact(getContext(), systemContext, options, artifactSpec...)
|
||||
if err != nil {
|
||||
|
@ -534,7 +539,7 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
|
|||
}
|
||||
} else {
|
||||
var changedArtifactFlags []string
|
||||
for _, artifactOption := range []string{"artifact-type", "artifact-config", "artifact-config-type", "artifact-layer-type", "artifact-subject", "artifact-exclude-titles"} {
|
||||
for _, artifactOption := range []string{"artifact-type", "artifact-config", "artifact-config-type", "artifact-layer-type", "artifact-subject", "artifact-exclude-titles", "artifact-annotation"} {
|
||||
if c.Flags().Changed(artifactOption) {
|
||||
changedArtifactFlags = append(changedArtifactFlags, "--"+artifactOption)
|
||||
}
|
||||
|
@ -623,24 +628,24 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
|
|||
return err
|
||||
}
|
||||
|
||||
func manifestRemoveCmd(c *cobra.Command, args []string, opts manifestRemoveOpts) error {
|
||||
func manifestRemoveCmd(c *cobra.Command, args []string, _ manifestRemoveOpts) error {
|
||||
listImageSpec := ""
|
||||
var instanceDigest digest.Digest
|
||||
var instanceSpec string
|
||||
switch len(args) {
|
||||
case 0, 1:
|
||||
return errors.New("At least a list image and one or more instance digests must be specified")
|
||||
return errors.New("at least a list image and one or more instance digests must be specified")
|
||||
case 2:
|
||||
listImageSpec = args[0]
|
||||
if listImageSpec == "" {
|
||||
return fmt.Errorf(`Invalid image name "%s"`, args[0])
|
||||
return fmt.Errorf(`invalid image name "%s"`, args[0])
|
||||
}
|
||||
instanceSpec = args[1]
|
||||
if instanceSpec == "" {
|
||||
return fmt.Errorf(`Invalid instance "%s"`, args[1])
|
||||
return fmt.Errorf(`invalid instance "%s"`, args[1])
|
||||
}
|
||||
default:
|
||||
return errors.New("At least two arguments are necessary: list and digest of instance to remove from list")
|
||||
return errors.New("at least two arguments are necessary: list and digest of instance to remove from list")
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
|
@ -671,23 +676,23 @@ func manifestRemoveCmd(c *cobra.Command, args []string, opts manifestRemoveOpts)
|
|||
if err != nil {
|
||||
if instanceRef, err = alltransports.ParseImageName(util.DefaultTransport + instanceSpec); err != nil {
|
||||
if instanceRef, _, err = util.FindImage(store, "", systemContext, instanceSpec); err != nil {
|
||||
return fmt.Errorf(`Invalid instance "%s": %v`, instanceSpec, err)
|
||||
return fmt.Errorf(`invalid instance "%s": %v`, instanceSpec, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx := getContext()
|
||||
instanceImg, err := instanceRef.NewImageSource(ctx, systemContext)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Reading image instance: %w", err)
|
||||
return fmt.Errorf("reading image instance: %w", err)
|
||||
}
|
||||
defer instanceImg.Close()
|
||||
manifestBytes, _, err := instanceImg.GetManifest(ctx, nil)
|
||||
manifestBytes, _, err := image.UnparsedInstance(instanceImg, nil).Manifest(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Reading image instance manifest: %w", err)
|
||||
return fmt.Errorf("reading image instance manifest: %w", err)
|
||||
}
|
||||
d, err = manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Digesting image instance manifest: %w", err)
|
||||
return fmt.Errorf("digesting image instance manifest: %w", err)
|
||||
}
|
||||
}
|
||||
instanceDigest = d
|
||||
|
@ -746,29 +751,29 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
|
|||
}
|
||||
switch len(args) {
|
||||
case 0:
|
||||
return errors.New("At least a list image must be specified")
|
||||
return errors.New("at least a list image must be specified")
|
||||
case 1:
|
||||
listImageSpec = args[0]
|
||||
if listImageSpec == "" {
|
||||
return fmt.Errorf(`Invalid image name "%s"`, args[0])
|
||||
return fmt.Errorf(`invalid image name "%s"`, args[0])
|
||||
}
|
||||
if !opts.index {
|
||||
return errors.New(`Expected an instance digest, image name, or artifact name`)
|
||||
return errors.New(`expected an instance digest, image name, or artifact name`)
|
||||
}
|
||||
case 2:
|
||||
listImageSpec = args[0]
|
||||
if listImageSpec == "" {
|
||||
return fmt.Errorf(`Invalid image name "%s"`, args[0])
|
||||
return fmt.Errorf(`invalid image name "%s"`, args[0])
|
||||
}
|
||||
if opts.index {
|
||||
return fmt.Errorf(`Did not expect image or artifact name "%s" when modifying the entire index`, args[1])
|
||||
return fmt.Errorf(`did not expect image or artifact name "%s" when modifying the entire index`, args[1])
|
||||
}
|
||||
instanceSpec = args[1]
|
||||
if instanceSpec == "" {
|
||||
return fmt.Errorf(`Invalid instance digest, image name, or artifact name "%s"`, instanceSpec)
|
||||
return fmt.Errorf(`invalid instance digest, image name, or artifact name "%s"`, instanceSpec)
|
||||
}
|
||||
default:
|
||||
return errors.New("Expected either a list name and --index or a list name and an image digest or image name or artifact name")
|
||||
return errors.New("expected either a list name and --index or a list name and an image digest or image name or artifact name")
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
|
@ -811,23 +816,23 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
|
|||
if instanceRef, err = alltransports.ParseImageName(util.DefaultTransport + instanceSpec); err != nil {
|
||||
// check if the local image exists
|
||||
if instanceRef, _, err = util.FindImage(store, "", systemContext, instanceSpec); err != nil {
|
||||
return fmt.Errorf(`Invalid instance "%s": %v`, instanceSpec, err)
|
||||
return fmt.Errorf(`invalid instance "%s": %v`, instanceSpec, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx := getContext()
|
||||
instanceImg, err := instanceRef.NewImageSource(ctx, systemContext)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Reading image instance: %w", err)
|
||||
return fmt.Errorf("reading image instance: %w", err)
|
||||
}
|
||||
defer instanceImg.Close()
|
||||
manifestBytes, _, err := instanceImg.GetManifest(ctx, nil)
|
||||
manifestBytes, _, err := image.UnparsedInstance(instanceImg, nil).Manifest(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Reading image instance manifest: %w", err)
|
||||
return fmt.Errorf("reading image instance manifest: %w", err)
|
||||
}
|
||||
d, err = manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Digesting image instance manifest: %w", err)
|
||||
return fmt.Errorf("digesting image instance manifest: %w", err)
|
||||
}
|
||||
}
|
||||
instance = d
|
||||
|
@ -917,7 +922,7 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
|
|||
}
|
||||
defer src.Close()
|
||||
|
||||
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
|
||||
manifestBytes, manifestType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while trying to read artifact subject manifest: %v", err)
|
||||
return err
|
||||
|
@ -958,14 +963,14 @@ func manifestInspectCmd(c *cobra.Command, args []string, opts manifestInspectOpt
|
|||
imageSpec := ""
|
||||
switch len(args) {
|
||||
case 0:
|
||||
return errors.New("At least a source list ID must be specified")
|
||||
return errors.New("at least a source list ID must be specified")
|
||||
case 1:
|
||||
imageSpec = args[0]
|
||||
if imageSpec == "" {
|
||||
return fmt.Errorf(`Invalid image name "%s"`, imageSpec)
|
||||
return fmt.Errorf(`invalid image name "%s"`, imageSpec)
|
||||
}
|
||||
default:
|
||||
return errors.New("Only one argument is necessary for inspect: an image name")
|
||||
return errors.New("only one argument is necessary for inspect: an image name")
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
|
@ -1058,7 +1063,7 @@ func manifestInspect(ctx context.Context, store storage.Store, systemContext *ty
|
|||
}
|
||||
defer src.Close()
|
||||
|
||||
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
|
||||
manifestBytes, manifestType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
|
||||
if err != nil {
|
||||
appendErr(fmt.Errorf("loading manifest %q: %w", transports.ImageName(ref), err))
|
||||
continue
|
||||
|
@ -1087,7 +1092,7 @@ func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error {
|
|||
destSpec := ""
|
||||
switch len(args) {
|
||||
case 0:
|
||||
return errors.New("At least a source list ID must be specified")
|
||||
return errors.New("at least a source list ID must be specified")
|
||||
case 1:
|
||||
listImageSpec = args[0]
|
||||
destSpec = "docker://" + listImageSpec
|
||||
|
@ -1095,7 +1100,7 @@ func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error {
|
|||
listImageSpec = args[0]
|
||||
destSpec = args[1]
|
||||
default:
|
||||
return errors.New("Only two arguments are necessary to push: source and destination")
|
||||
return errors.New("only two arguments are necessary to push: source and destination")
|
||||
}
|
||||
if listImageSpec == "" {
|
||||
return fmt.Errorf(`invalid image name "%s"`, listImageSpec)
|
||||
|
@ -1207,7 +1212,7 @@ func manifestPush(systemContext *types.SystemContext, store storage.Store, listI
|
|||
}
|
||||
|
||||
if opts.digestfile != "" {
|
||||
if err = os.WriteFile(opts.digestfile, []byte(digest.String()), 0644); err != nil {
|
||||
if err = os.WriteFile(opts.digestfile, []byte(digest.String()), 0o644); err != nil {
|
||||
return util.GetFailureCause(err, fmt.Errorf("failed to write digest to file %q: %w", opts.digestfile, err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var (
|
||||
passwdDescription = `Generate a password hash using golang.org/x/crypto/bcrypt.`
|
||||
passwdCommand = &cobra.Command{
|
||||
Use: "passwd",
|
||||
Short: "Generate a password hash",
|
||||
Long: passwdDescription,
|
||||
RunE: passwdCmd,
|
||||
Example: `buildah passwd testpassword`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
func passwdCmd(c *cobra.Command, args []string) error {
|
||||
passwd, err := bcrypt.GenerateFromPassword([]byte(args[0]), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(passwd))
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(passwdCommand)
|
||||
}
|
|
@ -31,7 +31,7 @@ Cleanup intermediate images as well as build and mount cache.`
|
|||
return pruneCmd(cmd, args, opts)
|
||||
},
|
||||
Example: `buildah prune
|
||||
buildah prune`,
|
||||
buildah prune --force`,
|
||||
}
|
||||
pruneCommand.SetUsageTemplate(UsageTemplate())
|
||||
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/pkg/cli"
|
||||
|
@ -136,7 +135,7 @@ func pushCmd(c *cobra.Command, args []string, iopts pushOptions) error {
|
|||
return fmt.Errorf(`invalid image name "%s"`, args[0])
|
||||
}
|
||||
default:
|
||||
return errors.New("Only two arguments are necessary to push: source and destination")
|
||||
return errors.New("only two arguments are necessary to push: source and destination")
|
||||
}
|
||||
|
||||
compress := define.Gzip
|
||||
|
@ -255,7 +254,7 @@ func pushCmd(c *cobra.Command, args []string, iopts pushOptions) error {
|
|||
logrus.Debugf("Successfully pushed %s with digest %s", transports.ImageName(dest), digest.String())
|
||||
|
||||
if iopts.digestfile != "" {
|
||||
if err = os.WriteFile(iopts.digestfile, []byte(digest.String()), 0644); err != nil {
|
||||
if err = os.WriteFile(iopts.digestfile, []byte(digest.String()), 0o644); err != nil {
|
||||
return util.GetFailureCause(err, fmt.Errorf("failed to write digest to file %q: %w", iopts.digestfile, err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func renameCmd(c *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
if build, err := openBuilder(getContext(), store, newName); err == nil {
|
||||
return fmt.Errorf("The container name %q is already in use by container %q", newName, build.ContainerID)
|
||||
return fmt.Errorf("the container name %q is already in use by container %q", newName, build.ContainerID)
|
||||
}
|
||||
|
||||
err = store.SetNames(builder.ContainerID, []string{newName})
|
||||
|
|
|
@ -86,7 +86,6 @@ func rmCmd(c *cobra.Command, args []string, iopts rmResults) error {
|
|||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
|
||||
}
|
||||
return lastError
|
||||
}
|
||||
|
|
|
@ -7,32 +7,37 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/internal/tmpdir"
|
||||
"github.com/containers/buildah/internal/volumes"
|
||||
buildahcli "github.com/containers/buildah/pkg/cli"
|
||||
"github.com/containers/buildah/pkg/overlay"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type runInputOptions struct {
|
||||
addHistory bool
|
||||
capAdd []string
|
||||
capDrop []string
|
||||
contextDir string
|
||||
env []string
|
||||
hostname string
|
||||
isolation string
|
||||
mounts []string
|
||||
runtime string
|
||||
runtimeFlag []string
|
||||
noHostname bool
|
||||
noHosts bool
|
||||
noPivot bool
|
||||
terminal bool
|
||||
volumes []string
|
||||
workingDir string
|
||||
addHistory bool
|
||||
capAdd []string
|
||||
capDrop []string
|
||||
cdiConfigDir string
|
||||
contextDir string
|
||||
devices []string
|
||||
env []string
|
||||
hostname string
|
||||
isolation string
|
||||
mounts []string
|
||||
runtime string
|
||||
runtimeFlag []string
|
||||
noHostname bool
|
||||
noHosts bool
|
||||
noPivot bool
|
||||
terminal bool
|
||||
volumes []string
|
||||
workingDir string
|
||||
*buildahcli.NameSpaceResults
|
||||
}
|
||||
|
||||
|
@ -51,7 +56,6 @@ func init() {
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.NameSpaceResults = &namespaceResults
|
||||
return runCmd(cmd, args, opts)
|
||||
|
||||
},
|
||||
Example: `buildah run containerID -- ps -auxw
|
||||
buildah run --terminal containerID /bin/bash
|
||||
|
@ -64,7 +68,10 @@ func init() {
|
|||
flags.BoolVar(&opts.addHistory, "add-history", false, "add an entry for this operation to the image's history. Use BUILDAH_HISTORY environment variable to override. (default false)")
|
||||
flags.StringSliceVar(&opts.capAdd, "cap-add", []string{}, "add the specified capability (default [])")
|
||||
flags.StringSliceVar(&opts.capDrop, "cap-drop", []string{}, "drop the specified capability (default [])")
|
||||
flags.StringVar(&opts.cdiConfigDir, "cdi-config-dir", "", "`directory` of CDI configuration files")
|
||||
_ = flags.MarkHidden("cdi-config-dir")
|
||||
flags.StringVar(&opts.contextDir, "contextdir", "", "context directory path")
|
||||
flags.StringArrayVar(&opts.devices, "device", []string{}, "additional devices to provide")
|
||||
flags.StringArrayVarP(&opts.env, "env", "e", []string{}, "add environment variable to be set temporarily when running command (default [])")
|
||||
flags.StringVar(&opts.hostname, "hostname", "", "set the hostname inside of the container")
|
||||
flags.StringVar(&opts.isolation, "isolation", "", "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
|
||||
|
@ -103,6 +110,16 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
|
|||
return errors.New("command must be specified")
|
||||
}
|
||||
|
||||
tmpDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildahvolume")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating temporary directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove(tmpDir); err != nil {
|
||||
logrus.Debugf("removing should-be-empty temporary directory %q: %v", tmpDir, err)
|
||||
}
|
||||
}()
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -156,6 +173,8 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
|
|||
AddCapabilities: iopts.capAdd,
|
||||
DropCapabilities: iopts.capDrop,
|
||||
WorkingDir: iopts.workingDir,
|
||||
DeviceSpecs: iopts.devices,
|
||||
CDIConfigDir: iopts.cdiConfigDir,
|
||||
}
|
||||
|
||||
if c.Flag("terminal").Changed {
|
||||
|
@ -172,14 +191,30 @@ func runCmd(c *cobra.Command, args []string, iopts runInputOptions) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("building system context: %w", err)
|
||||
}
|
||||
mounts, mountedImages, targetLocks, err := volumes.GetVolumes(systemContext, store, iopts.volumes, iopts.mounts, iopts.contextDir, iopts.workingDir)
|
||||
mounts, mountedImages, intermediateMounts, _, targetLocks, err := volumes.GetVolumes(systemContext, store, builder.MountLabel, iopts.volumes, iopts.mounts, iopts.contextDir, builder.IDMappingOptions, iopts.workingDir, tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer volumes.UnlockLockArray(targetLocks)
|
||||
defer func() {
|
||||
if err := overlay.CleanupContent(tmpDir); err != nil {
|
||||
logrus.Debugf("unmounting overlay mounts under %q: %v", tmpDir, err)
|
||||
}
|
||||
for _, intermediateMount := range intermediateMounts {
|
||||
if err := mount.Unmount(intermediateMount); err != nil {
|
||||
logrus.Debugf("unmounting mount %q: %v", intermediateMount, err)
|
||||
}
|
||||
if err := os.Remove(intermediateMount); err != nil {
|
||||
logrus.Debugf("removing should-be-empty mount directory %q: %v", intermediateMount, err)
|
||||
}
|
||||
}
|
||||
for _, mountedImage := range mountedImages {
|
||||
if _, err := store.UnmountImage(mountedImage, false); err != nil {
|
||||
logrus.Debugf("unmounting image %q: %v", mountedImage, err)
|
||||
}
|
||||
}
|
||||
volumes.UnlockLockArray(targetLocks)
|
||||
}()
|
||||
options.Mounts = mounts
|
||||
// Run() will automatically clean them up.
|
||||
options.ExternalImageMounts = mountedImages
|
||||
options.CgroupManager = globalFlagResults.CgroupManager
|
||||
|
||||
runerr := builder.Run(args, options)
|
||||
|
|
|
@ -17,7 +17,7 @@ var (
|
|||
Use: "source",
|
||||
Short: "Manage source containers",
|
||||
Long: sourceDescription,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ var (
|
|||
Short: "Create a source image",
|
||||
Long: sourceCreateDescription,
|
||||
Example: "buildah source create /tmp/fedora:latest-source",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
return source.Create(context.Background(), args[0], sourceCreateOptions)
|
||||
},
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ var (
|
|||
Short: "Add a source artifact to a source image",
|
||||
Long: sourceAddDescription,
|
||||
Example: "buildah source add /tmp/fedora sources.tar.gz",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
return source.Add(context.Background(), args[0], args[1], sourceAddOptions)
|
||||
},
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ var (
|
|||
Short: "Pull a source image from a registry to a specified path",
|
||||
Long: sourcePullDescription,
|
||||
Example: "buildah source pull quay.io/sourceimage/example:latest /tmp/sourceimage:latest",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
return source.Pull(context.Background(), args[0], args[1], sourcePullOptions)
|
||||
},
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ var (
|
|||
Short: "Push a source image from a specified path to a registry",
|
||||
Long: sourcePushDescription,
|
||||
Example: "buildah source push /tmp/sourceimage:latest quay.io/sourceimage/example:latest",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
return source.Push(context.Background(), args[0], args[1], sourcePushOptions)
|
||||
},
|
||||
}
|
||||
|
@ -122,6 +122,7 @@ func init() {
|
|||
sourceCommand.AddCommand(sourcePushCommand)
|
||||
sourcePushFlags := sourcePushCommand.Flags()
|
||||
sourcePushFlags.StringVar(&sourcePushOptions.Credentials, "creds", "", "use `[username[:password]]` for accessing the registry")
|
||||
sourcePushFlags.StringVar(&sourcePushOptions.DigestFile, "digestfile", "", "after copying the artifact, write the digest of the resulting image to the file")
|
||||
sourcePushFlags.BoolVar(&sourcePushOptions.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
|
||||
sourcePushFlags.BoolVarP(&sourcePushOptions.Quiet, "quiet", "q", false, "don't output push progress information")
|
||||
}
|
||||
|
|
|
@ -30,10 +30,8 @@ func init() {
|
|||
}
|
||||
|
||||
func umountCmd(c *cobra.Command, args []string) error {
|
||||
umountAll := false
|
||||
if c.Flag("all").Changed {
|
||||
umountAll = true
|
||||
}
|
||||
umountAll := c.Flag("all").Changed
|
||||
|
||||
umountContainerErrStr := "error unmounting container"
|
||||
if len(args) == 0 && !umountAll {
|
||||
return errors.New("at least one container ID must be specified")
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
|
@ -12,9 +11,9 @@ import (
|
|||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/moby/sys/capability"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -139,7 +138,7 @@ func debugCapabilities() {
|
|||
logrus.Errorf("error loading our current capabilities: %v", err)
|
||||
return
|
||||
}
|
||||
knownCaps := capability.List()
|
||||
knownCaps := capability.ListKnown()
|
||||
effective := make([]string, 0, len(knownCaps))
|
||||
for i := range knownCaps {
|
||||
have := pid.Get(capability.EFFECTIVE, knownCaps[i])
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/platforms"
|
||||
cniversion "github.com/containernetworking/cni/pkg/version"
|
||||
"github.com/containers/buildah/define"
|
||||
iversion "github.com/containers/image/v5/version"
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
//Overwritten at build time
|
||||
// Overwritten at build time
|
||||
var (
|
||||
GitCommit string
|
||||
buildInfo string
|
||||
|
@ -44,12 +44,12 @@ type versionOptions struct {
|
|||
func init() {
|
||||
var opts versionOptions
|
||||
|
||||
//cli command to print out the version info of buildah
|
||||
// cli command to print out the version info of buildah
|
||||
versionCommand := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Display the Buildah version information",
|
||||
Long: "Displays Buildah version information.",
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
return versionCmd(opts)
|
||||
},
|
||||
Args: cobra.NoArgs,
|
||||
|
@ -67,7 +67,7 @@ func versionCmd(opts versionOptions) error {
|
|||
var err error
|
||||
buildTime := int64(0)
|
||||
if buildInfo != "" {
|
||||
//converting unix time from string to int64
|
||||
// converting unix time from string to int64
|
||||
buildTime, err = strconv.ParseInt(buildInfo, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -106,7 +106,7 @@ func versionCmd(opts versionOptions) error {
|
|||
fmt.Println("image Version: ", version.ImageVersion)
|
||||
fmt.Println("Git Commit: ", version.GitCommit)
|
||||
|
||||
//Prints out the build time in readable format
|
||||
// Prints out the build time in readable format
|
||||
fmt.Println("Built: ", version.Built)
|
||||
fmt.Println("OS/Arch: ", version.OsArch)
|
||||
fmt.Println("BuildPlatform: ", version.BuildPlatform)
|
||||
|
|
128
commit.go
128
commit.go
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -24,13 +25,15 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// BuilderIdentityAnnotation is the name of the annotation key containing
|
||||
// the name and version of the producer of the image stored as an
|
||||
// annotation on commit.
|
||||
// BuilderIdentityAnnotation is the name of the label which will be set
|
||||
// to contain the name and version of the producer of the image at
|
||||
// commit-time. (N.B. yes, the constant's name includes "Annotation",
|
||||
// but it's added as a label.)
|
||||
BuilderIdentityAnnotation = "io.buildah.version"
|
||||
)
|
||||
|
||||
|
@ -56,9 +59,20 @@ type CommitOptions struct {
|
|||
// ReportWriter is an io.Writer which will be used to log the writing
|
||||
// of the new image.
|
||||
ReportWriter io.Writer
|
||||
// HistoryTimestamp is the timestamp used when creating new items in the
|
||||
// image's history. If unset, the current time will be used.
|
||||
// HistoryTimestamp specifies a timestamp to use for the image's
|
||||
// created-on date, the corresponding field in new history entries, and
|
||||
// the timestamps to set on contents in new layer diffs. If left
|
||||
// unset, the current time is used for the configuration and manifest,
|
||||
// and timestamps of layer contents are used as-is.
|
||||
HistoryTimestamp *time.Time
|
||||
// SourceDateEpoch specifies a timestamp to use for the image's
|
||||
// created-on date and the corresponding field in new history entries.
|
||||
// If left unset, the current time is used for the configuration and
|
||||
// manifest.
|
||||
SourceDateEpoch *time.Time
|
||||
// RewriteTimestamp, if set, forces timestamps in generated layers to
|
||||
// not be later than the SourceDateEpoch, if it is set.
|
||||
RewriteTimestamp bool
|
||||
// github.com/containers/image/types SystemContext to hold credentials
|
||||
// and other authentication/authorization information.
|
||||
SystemContext *types.SystemContext
|
||||
|
@ -80,9 +94,18 @@ type CommitOptions struct {
|
|||
// EmptyLayer tells the builder to omit the diff for the working
|
||||
// container.
|
||||
EmptyLayer bool
|
||||
// OmitLayerHistoryEntry tells the builder to omit the diff for the
|
||||
// working container and to not add an entry in the commit history. By
|
||||
// default, the rest of the image's history is preserved, subject to
|
||||
// the OmitHistory setting. N.B.: setting this flag, without any
|
||||
// PrependedEmptyLayers, AppendedEmptyLayers, PrependedLinkedLayers, or
|
||||
// AppendedLinkedLayers will more or less produce a copy of the base
|
||||
// image.
|
||||
OmitLayerHistoryEntry bool
|
||||
// OmitTimestamp forces epoch 0 as created timestamp to allow for
|
||||
// deterministic, content-addressable builds.
|
||||
// Deprecated use HistoryTimestamp instead.
|
||||
// Deprecated: use HistoryTimestamp or SourceDateEpoch (possibly with
|
||||
// RewriteTimestamp) instead.
|
||||
OmitTimestamp bool
|
||||
// SignBy is the fingerprint of a GPG key to use for signing the image.
|
||||
SignBy string
|
||||
|
@ -108,7 +131,8 @@ type CommitOptions struct {
|
|||
// contents of a rootfs.
|
||||
ConfidentialWorkloadOptions ConfidentialWorkloadOptions
|
||||
// UnsetEnvs is a list of environments to not add to final image.
|
||||
// Deprecated: use UnsetEnv() before committing instead.
|
||||
// Deprecated: use UnsetEnv() before committing, or set OverrideChanges
|
||||
// instead.
|
||||
UnsetEnvs []string
|
||||
// OverrideConfig is an optional Schema2Config which can override parts
|
||||
// of the working container's configuration for the image that is being
|
||||
|
@ -119,25 +143,59 @@ type CommitOptions struct {
|
|||
// OverrideConfig is applied.
|
||||
OverrideChanges []string
|
||||
// ExtraImageContent is a map which describes additional content to add
|
||||
// to the committed image. The map's keys are filesystem paths in the
|
||||
// image and the corresponding values are the paths of files whose
|
||||
// contents will be used in their place. The contents will be owned by
|
||||
// 0:0 and have mode 0644. Currently only accepts regular files.
|
||||
// to the new layer in the committed image. The map's keys are
|
||||
// filesystem paths in the image and the corresponding values are the
|
||||
// paths of files whose contents will be used in their place. The
|
||||
// contents will be owned by 0:0 and have mode 0o644. Currently only
|
||||
// accepts regular files.
|
||||
ExtraImageContent map[string]string
|
||||
// SBOMScanOptions encapsulates options which control whether or not we
|
||||
// run scanners on the rootfs that we're about to commit, and how.
|
||||
SBOMScanOptions []SBOMScanOptions
|
||||
// CompatSetParent causes the "parent" field to be set when committing
|
||||
// the image in Docker format. Newer BuildKit-based builds don't set
|
||||
// this field.
|
||||
CompatSetParent types.OptionalBool
|
||||
// CompatLayerOmissions causes the "/dev", "/proc", and "/sys"
|
||||
// directories to be omitted from the layer diff and related output, as
|
||||
// the classic builder did. Newer BuildKit-based builds include them
|
||||
// in the built image by default.
|
||||
CompatLayerOmissions types.OptionalBool
|
||||
// PrependedLinkedLayers and AppendedLinkedLayers are combinations of
|
||||
// history entries and locations of either directory trees (if
|
||||
// directories, per os.Stat()) or uncompressed layer blobs which should
|
||||
// be added to the image at commit-time. The order of these relative
|
||||
// to PrependedEmptyLayers and AppendedEmptyLayers, and relative to the
|
||||
// corresponding members in the Builder object, in the committed image
|
||||
// is not guaranteed.
|
||||
PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer
|
||||
// UnsetAnnotations is a list of annotations (names only) to withhold
|
||||
// from the image.
|
||||
UnsetAnnotations []string
|
||||
// Annotations is a list of annotations (in the form "key=value") to
|
||||
// add to the image.
|
||||
Annotations []string
|
||||
// CreatedAnnotation controls whether or not an "org.opencontainers.image.created"
|
||||
// annotation is present in the output image.
|
||||
CreatedAnnotation types.OptionalBool
|
||||
}
|
||||
|
||||
var (
|
||||
// storageAllowedPolicyScopes overrides the policy for local storage
|
||||
// to ensure that we can read images from it.
|
||||
storageAllowedPolicyScopes = signature.PolicyTransportScopes{
|
||||
"": []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
)
|
||||
// LinkedLayer combines a history entry with the location of either a directory
|
||||
// tree (if it's a directory, per os.Stat()) or an uncompressed layer blob
|
||||
// which should be added to the image at commit-time. The BlobPath and
|
||||
// History.EmptyLayer fields should be considered mutually-exclusive.
|
||||
type LinkedLayer struct {
|
||||
History v1.History // history entry to add
|
||||
BlobPath string // corresponding uncompressed blob file (layer as a tar archive), or directory tree to archive
|
||||
}
|
||||
|
||||
// storageAllowedPolicyScopes overrides the policy for local storage
|
||||
// to ensure that we can read images from it.
|
||||
var storageAllowedPolicyScopes = signature.PolicyTransportScopes{
|
||||
"": []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
|
||||
// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
|
||||
// variable, if it's set. The contents are expected to be a JSON-encoded
|
||||
|
@ -252,8 +310,9 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
|
|||
// if commit was successful and the image destination was local.
|
||||
func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
|
||||
var (
|
||||
imgID string
|
||||
src types.ImageReference
|
||||
imgID string
|
||||
src types.ImageReference
|
||||
destinationTimestamp *time.Time
|
||||
)
|
||||
|
||||
// If we weren't given a name, build a destination reference using a
|
||||
|
@ -266,11 +325,15 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
// work twice.
|
||||
if options.OmitTimestamp {
|
||||
if options.HistoryTimestamp != nil {
|
||||
return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
|
||||
return imgID, nil, "", fmt.Errorf("OmitTimestamp and HistoryTimestamp can not be used together")
|
||||
}
|
||||
timestamp := time.Unix(0, 0).UTC()
|
||||
options.HistoryTimestamp = ×tamp
|
||||
}
|
||||
destinationTimestamp = options.HistoryTimestamp
|
||||
if options.SourceDateEpoch != nil {
|
||||
destinationTimestamp = options.SourceDateEpoch
|
||||
}
|
||||
nameToRemove := ""
|
||||
if dest == nil {
|
||||
nameToRemove = stringid.GenerateRandomID() + "-tmp"
|
||||
|
@ -325,7 +388,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest))
|
||||
|
||||
// If we need to scan the rootfs, do it now.
|
||||
options.ExtraImageContent = copyStringStringMap(options.ExtraImageContent)
|
||||
options.ExtraImageContent = maps.Clone(options.ExtraImageContent)
|
||||
var extraImageContent, extraLocalContent map[string]string
|
||||
if len(options.SBOMScanOptions) != 0 {
|
||||
var scansDirectory string
|
||||
|
@ -339,9 +402,15 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
}
|
||||
}()
|
||||
}
|
||||
for k, v := range extraImageContent {
|
||||
if _, set := options.ExtraImageContent[k]; !set {
|
||||
options.ExtraImageContent[k] = v
|
||||
if len(extraImageContent) > 0 {
|
||||
if options.ExtraImageContent == nil {
|
||||
options.ExtraImageContent = make(map[string]string, len(extraImageContent))
|
||||
}
|
||||
// merge in the scanner-generated content
|
||||
for k, v := range extraImageContent {
|
||||
if _, set := options.ExtraImageContent[k]; !set {
|
||||
options.ExtraImageContent[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +456,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
}
|
||||
|
||||
var manifestBytes []byte
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil, destinationTimestamp), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
return imgID, nil, "", fmt.Errorf("copying layers and metadata for container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
// If we've got more names to attach, and we know how to do that for
|
||||
|
@ -428,7 +497,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
logrus.Debugf("removing %v from assigned names to image %q", nameToRemove, img.ID)
|
||||
}
|
||||
if options.IIDFile != "" {
|
||||
if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil {
|
||||
if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0o644); err != nil {
|
||||
return imgID, nil, "", err
|
||||
}
|
||||
}
|
||||
|
@ -477,7 +546,6 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
return imgID, nil, "", err
|
||||
}
|
||||
logrus.Debugf("added imgID %s to manifestID %s", imgID, manifestID)
|
||||
|
||||
}
|
||||
return imgID, ref, manifestDigest, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,570 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
ociLayout "github.com/containers/image/v5/oci/layout"
|
||||
imageStorage "github.com/containers/image/v5/storage"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
storageTypes "github.com/containers/storage/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func makeFile(t *testing.T, base string, size int64) string {
|
||||
t.Helper()
|
||||
fn := filepath.Join(t.TempDir(), base)
|
||||
f, err := os.Create(fn)
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
if size == 0 {
|
||||
size = 512
|
||||
}
|
||||
_, err = io.CopyN(f, rand.Reader, size)
|
||||
require.NoErrorf(t, err, "writing payload file %d", base)
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
func TestCommitLinkedLayers(t *testing.T) {
|
||||
// This test cannot be parallelized as this uses NewBuilder()
|
||||
// which eventually and indirectly accesses a global variable
|
||||
// defined in `go-selinux`, this must be fixed at `go-selinux`
|
||||
// or builder must enable sometime of locking mechanism i.e if
|
||||
// routine is creating Builder other's must wait for it.
|
||||
// Tracked here: https://github.com/containers/buildah/issues/5967
|
||||
ctx := context.TODO()
|
||||
now := time.Now()
|
||||
|
||||
graphDriverName := os.Getenv("STORAGE_DRIVER")
|
||||
if graphDriverName == "" {
|
||||
graphDriverName = "vfs"
|
||||
}
|
||||
t.Logf("using storage driver %q", graphDriverName)
|
||||
store, err := storage.GetStore(storageTypes.StoreOptions{
|
||||
RunRoot: t.TempDir(),
|
||||
GraphRoot: t.TempDir(),
|
||||
GraphDriverName: graphDriverName,
|
||||
})
|
||||
require.NoError(t, err, "initializing storage")
|
||||
t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
|
||||
|
||||
imageName := func(i int) string { return fmt.Sprintf("image%d", i) }
|
||||
makeFile := func(base string, size int64) string {
|
||||
return makeFile(t, base, size)
|
||||
}
|
||||
makeArchive := func(base string, size int64) string {
|
||||
t.Helper()
|
||||
file := makeFile(base, size)
|
||||
archiveDir := t.TempDir()
|
||||
st, err := os.Stat(file)
|
||||
require.NoError(t, err)
|
||||
archiveName := filepath.Join(archiveDir, filepath.Base(file))
|
||||
f, err := os.Create(archiveName)
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
tw := tar.NewWriter(f)
|
||||
defer tw.Close()
|
||||
hdr, err := tar.FileInfoHeader(st, "")
|
||||
require.NoErrorf(t, err, "building tar header for %s", file)
|
||||
err = tw.WriteHeader(hdr)
|
||||
require.NoErrorf(t, err, "writing tar header for %s", file)
|
||||
f, err = os.Open(file)
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
_, err = io.Copy(tw, f)
|
||||
require.NoErrorf(t, err, "writing tar payload for %s", file)
|
||||
return archiveName
|
||||
}
|
||||
layerNumber := 0
|
||||
|
||||
// Build a from-scratch image with one layer.
|
||||
builderOptions := BuilderOptions{
|
||||
FromImage: "scratch",
|
||||
NamespaceOptions: []NamespaceOption{{
|
||||
Name: string(rspec.NetworkNamespace),
|
||||
Host: true,
|
||||
}},
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
b, err := NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder")
|
||||
b.SetCreatedBy(imageName(layerNumber))
|
||||
firstFile := makeFile("file0", 0)
|
||||
err = b.Add("/", false, AddAndCopyOptions{}, firstFile)
|
||||
require.NoError(t, err, "adding", firstFile)
|
||||
commitOptions := CommitOptions{
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
ref, err := imageStorage.Transport.ParseStoreReference(store, imageName(layerNumber))
|
||||
require.NoError(t, err, "parsing reference for to-be-committed image", imageName(layerNumber))
|
||||
_, _, _, err = b.Commit(ctx, ref, commitOptions)
|
||||
require.NoError(t, err, "committing", imageName(layerNumber))
|
||||
|
||||
// Build another image based on the first with not much in its layer.
|
||||
builderOptions.FromImage = imageName(layerNumber)
|
||||
layerNumber++
|
||||
b, err = NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder")
|
||||
b.SetCreatedBy(imageName(layerNumber))
|
||||
secondFile := makeFile("file1", 0)
|
||||
err = b.Add("/", false, AddAndCopyOptions{}, secondFile)
|
||||
require.NoError(t, err, "adding", secondFile)
|
||||
commitOptions = CommitOptions{
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
ref, err = imageStorage.Transport.ParseStoreReference(store, imageName(layerNumber))
|
||||
require.NoError(t, err, "parsing reference for to-be-committed image", imageName(layerNumber))
|
||||
_, _, _, err = b.Commit(ctx, ref, commitOptions)
|
||||
require.NoError(t, err, "committing", imageName(layerNumber))
|
||||
|
||||
// Build a third image with two layers on either side of its read-write layer.
|
||||
builderOptions.FromImage = imageName(layerNumber)
|
||||
layerNumber++
|
||||
b, err = NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder")
|
||||
thirdFile := makeFile("file2", 0)
|
||||
fourthArchiveFile := makeArchive("file3", 0)
|
||||
fifthFile := makeFile("file4", 0)
|
||||
sixthFile := makeFile("file5", 0)
|
||||
seventhArchiveFile := makeArchive("file6", 0)
|
||||
eighthFile := makeFile("file7", 0)
|
||||
ninthArchiveFile := makeArchive("file8", 0)
|
||||
err = b.Add("/", false, AddAndCopyOptions{}, sixthFile)
|
||||
require.NoError(t, err, "adding", sixthFile)
|
||||
b.SetCreatedBy(imageName(layerNumber + 3))
|
||||
b.AddPrependedLinkedLayer(nil, imageName(layerNumber), "", "", filepath.Dir(thirdFile))
|
||||
commitOptions = CommitOptions{
|
||||
PrependedLinkedLayers: []LinkedLayer{
|
||||
{
|
||||
BlobPath: fourthArchiveFile,
|
||||
History: v1.History{
|
||||
Created: &now,
|
||||
CreatedBy: imageName(layerNumber + 1),
|
||||
},
|
||||
},
|
||||
{
|
||||
BlobPath: filepath.Dir(fifthFile),
|
||||
History: v1.History{
|
||||
Created: &now,
|
||||
CreatedBy: imageName(layerNumber + 2),
|
||||
},
|
||||
},
|
||||
},
|
||||
AppendedLinkedLayers: []LinkedLayer{
|
||||
{
|
||||
BlobPath: seventhArchiveFile,
|
||||
History: v1.History{
|
||||
Created: &now,
|
||||
CreatedBy: imageName(layerNumber + 4),
|
||||
},
|
||||
},
|
||||
{
|
||||
BlobPath: filepath.Dir(eighthFile),
|
||||
History: v1.History{
|
||||
Created: &now,
|
||||
CreatedBy: imageName(layerNumber + 5),
|
||||
},
|
||||
},
|
||||
},
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
b.AddAppendedLinkedLayer(nil, imageName(layerNumber+6), "", "", ninthArchiveFile)
|
||||
ref, err = imageStorage.Transport.ParseStoreReference(store, imageName(layerNumber))
|
||||
require.NoErrorf(t, err, "parsing reference for to-be-committed image %q", imageName(layerNumber))
|
||||
_, _, _, err = b.Commit(ctx, ref, commitOptions)
|
||||
require.NoErrorf(t, err, "committing %q", imageName(layerNumber))
|
||||
|
||||
// Build one last image based on the previous one.
|
||||
builderOptions.FromImage = imageName(layerNumber)
|
||||
layerNumber += 7
|
||||
b, err = NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder")
|
||||
b.SetCreatedBy(imageName(layerNumber))
|
||||
tenthFile := makeFile("file9", 0)
|
||||
err = b.Add("/", false, AddAndCopyOptions{}, tenthFile)
|
||||
require.NoError(t, err, "adding", tenthFile)
|
||||
commitOptions = CommitOptions{
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
ref, err = imageStorage.Transport.ParseStoreReference(store, imageName(layerNumber))
|
||||
require.NoError(t, err, "parsing reference for to-be-committed image", imageName(layerNumber))
|
||||
_, _, _, err = b.Commit(ctx, ref, commitOptions)
|
||||
require.NoError(t, err, "committing", imageName(layerNumber))
|
||||
|
||||
// Get set to examine this image. At this point, each history entry
|
||||
// should just have "image%d" as its CreatedBy field, and each layer
|
||||
// should have the corresponding file (and nothing else) in it.
|
||||
src, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "opening image source")
|
||||
defer src.Close()
|
||||
img, err := ref.NewImage(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "opening image")
|
||||
defer img.Close()
|
||||
config, err := img.OCIConfig(ctx)
|
||||
require.NoError(t, err, "reading config in OCI format")
|
||||
require.Len(t, config.History, 10, "history length")
|
||||
for i := range config.History {
|
||||
require.Equal(t, fmt.Sprintf("image%d", i), config.History[i].CreatedBy, "history createdBy is off")
|
||||
}
|
||||
require.Len(t, config.RootFS.DiffIDs, 10, "diffID list")
|
||||
|
||||
layerContents := func(archive io.ReadCloser) []string {
|
||||
var contents []string
|
||||
defer archive.Close()
|
||||
tr := tar.NewReader(archive)
|
||||
entry, err := tr.Next()
|
||||
for entry != nil {
|
||||
contents = append(contents, entry.Name)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
entry, err = tr.Next()
|
||||
}
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
return contents
|
||||
}
|
||||
infos, err := img.LayerInfosForCopy(ctx)
|
||||
require.NoError(t, err, "getting layer infos")
|
||||
require.Len(t, infos, 10)
|
||||
for i, blobInfo := range infos {
|
||||
func() {
|
||||
t.Helper()
|
||||
rc, _, err := src.GetBlob(ctx, blobInfo, nil)
|
||||
require.NoError(t, err, "getting blob", i)
|
||||
defer rc.Close()
|
||||
contents := layerContents(rc)
|
||||
require.Len(t, contents, 1)
|
||||
require.Equal(t, fmt.Sprintf("file%d", i), contents[0])
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitCompression(t *testing.T) {
|
||||
// This test cannot be parallelized as this uses NewBuilder()
|
||||
// which eventually and indirectly accesses a global variable
|
||||
// defined in `go-selinux`, this must be fixed at `go-selinux`
|
||||
// or builder must enable sometime of locking mechanism i.e if
|
||||
// routine is creating Builder other's must wait for it.
|
||||
// Tracked here: https://github.com/containers/buildah/issues/5967
|
||||
ctx := context.TODO()
|
||||
|
||||
graphDriverName := os.Getenv("STORAGE_DRIVER")
|
||||
if graphDriverName == "" {
|
||||
graphDriverName = "vfs"
|
||||
}
|
||||
t.Logf("using storage driver %q", graphDriverName)
|
||||
store, err := storage.GetStore(storageTypes.StoreOptions{
|
||||
RunRoot: t.TempDir(),
|
||||
GraphRoot: t.TempDir(),
|
||||
GraphDriverName: graphDriverName,
|
||||
})
|
||||
require.NoError(t, err, "initializing storage")
|
||||
t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
|
||||
|
||||
builderOptions := BuilderOptions{
|
||||
FromImage: "scratch",
|
||||
NamespaceOptions: []NamespaceOption{{
|
||||
Name: string(rspec.NetworkNamespace),
|
||||
Host: true,
|
||||
}},
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
b, err := NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder")
|
||||
payload := makeFile(t, "file0", 0)
|
||||
b.SetCreatedBy("ADD file0 in /")
|
||||
err = b.Add("/", false, AddAndCopyOptions{}, payload)
|
||||
require.NoError(t, err, "adding", payload)
|
||||
for _, compressor := range []struct {
|
||||
compression archive.Compression
|
||||
name string
|
||||
expectError bool
|
||||
layerMediaType string
|
||||
}{
|
||||
{archive.Uncompressed, "uncompressed", false, v1.MediaTypeImageLayer},
|
||||
{archive.Gzip, "gzip", false, v1.MediaTypeImageLayerGzip},
|
||||
{archive.Bzip2, "bz2", true, ""},
|
||||
{archive.Xz, "xz", true, ""},
|
||||
{archive.Zstd, "zstd", false, v1.MediaTypeImageLayerZstd},
|
||||
} {
|
||||
t.Run(compressor.name, func(t *testing.T) {
|
||||
var ref types.ImageReference
|
||||
commitOptions := CommitOptions{
|
||||
PreferredManifestType: v1.MediaTypeImageManifest,
|
||||
SystemContext: &testSystemContext,
|
||||
Compression: compressor.compression,
|
||||
}
|
||||
imageName := compressor.name
|
||||
ref, err := imageStorage.Transport.ParseStoreReference(store, imageName)
|
||||
require.NoErrorf(t, err, "parsing reference for to-be-committed local image %q", imageName)
|
||||
_, _, _, err = b.Commit(ctx, ref, commitOptions)
|
||||
if compressor.expectError {
|
||||
require.Errorf(t, err, "committing local image %q", imageName)
|
||||
} else {
|
||||
require.NoErrorf(t, err, "committing local image %q", imageName)
|
||||
}
|
||||
imageName = t.TempDir()
|
||||
ref, err = ociLayout.Transport.ParseReference(imageName)
|
||||
require.NoErrorf(t, err, "parsing reference for to-be-committed oci layout %q", imageName)
|
||||
_, _, _, err = b.Commit(ctx, ref, commitOptions)
|
||||
if compressor.expectError {
|
||||
require.Errorf(t, err, "committing oci layout %q", imageName)
|
||||
return
|
||||
}
|
||||
require.NoErrorf(t, err, "committing oci layout %q", imageName)
|
||||
src, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoErrorf(t, err, "reading oci layout %q", imageName)
|
||||
defer src.Close()
|
||||
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
|
||||
require.NoErrorf(t, err, "reading manifest from oci layout %q", imageName)
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "manifest type from oci layout %q looked wrong", imageName)
|
||||
parsedManifest, err := manifest.OCI1FromManifest(manifestBytes)
|
||||
require.NoErrorf(t, err, "parsing manifest from oci layout %q", imageName)
|
||||
require.Lenf(t, parsedManifest.Layers, 1, "expected exactly one layer in oci layout %q", imageName)
|
||||
require.Equalf(t, compressor.layerMediaType, parsedManifest.Layers[0].MediaType, "expected the layer media type to reflect compression in oci layout %q", imageName)
|
||||
blobReadCloser, _, err := src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: parsedManifest.Layers[0].Digest,
|
||||
MediaType: parsedManifest.Layers[0].MediaType,
|
||||
}, nil)
|
||||
require.NoErrorf(t, err, "reading the first layer from oci layout %q", imageName)
|
||||
defer blobReadCloser.Close()
|
||||
blob, err := io.ReadAll(blobReadCloser)
|
||||
require.NoErrorf(t, err, "consuming the first layer from oci layout %q", imageName)
|
||||
require.Equalf(t, compressor.compression, archive.DetectCompression(blob), "detected compression looks wrong for layer in oci layout %q")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitEmpty(t *testing.T) {
|
||||
// This test cannot be parallelized as this uses NewBuilder()
|
||||
// which eventually and indirectly accesses a global variable
|
||||
// defined in `go-selinux`, this must be fixed at `go-selinux`
|
||||
// or builder must enable sometime of locking mechanism i.e if
|
||||
// routine is creating Builder other's must wait for it.
|
||||
// Tracked here: https://github.com/containers/buildah/issues/5967
|
||||
ctx := context.TODO()
|
||||
|
||||
graphDriverName := os.Getenv("STORAGE_DRIVER")
|
||||
if graphDriverName == "" {
|
||||
graphDriverName = "vfs"
|
||||
}
|
||||
t.Logf("using storage driver %q", graphDriverName)
|
||||
store, err := storage.GetStore(storageTypes.StoreOptions{
|
||||
RunRoot: t.TempDir(),
|
||||
GraphRoot: t.TempDir(),
|
||||
GraphDriverName: graphDriverName,
|
||||
})
|
||||
require.NoError(t, err, "initializing storage")
|
||||
t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
|
||||
|
||||
builderOptions := BuilderOptions{
|
||||
FromImage: "scratch",
|
||||
NamespaceOptions: []NamespaceOption{{
|
||||
Name: string(rspec.NetworkNamespace),
|
||||
Host: true,
|
||||
}},
|
||||
SystemContext: &testSystemContext,
|
||||
}
|
||||
b, err := NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder")
|
||||
|
||||
committedLayoutDir := t.TempDir()
|
||||
committedRef, err := ociLayout.ParseReference(committedLayoutDir)
|
||||
require.NoError(t, err, "parsing reference to where we're committing a basic image")
|
||||
_, _, _, err = b.Commit(ctx, committedRef, CommitOptions{})
|
||||
require.NoError(t, err, "committing with default settings")
|
||||
|
||||
committedImg, err := committedRef.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "preparing to read committed image")
|
||||
defer committedImg.Close()
|
||||
committedManifestBytes, committedManifestType, err := committedImg.GetManifest(ctx, nil)
|
||||
require.NoError(t, err, "reading manifest from committed image")
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, committedManifestType, "unexpected manifest type")
|
||||
committedManifest, err := manifest.FromBlob(committedManifestBytes, committedManifestType)
|
||||
require.NoError(t, err, "parsing manifest from committed image")
|
||||
require.Equalf(t, 1, len(committedManifest.LayerInfos()), "expected one layer in manifest")
|
||||
configReadCloser, _, err := committedImg.GetBlob(ctx, committedManifest.ConfigInfo(), nil)
|
||||
require.NoError(t, err, "reading config blob from committed image")
|
||||
defer configReadCloser.Close()
|
||||
var committedImage v1.Image
|
||||
err = json.NewDecoder(configReadCloser).Decode(&committedImage)
|
||||
require.NoError(t, err, "parsing config blob from committed image")
|
||||
require.Equalf(t, 1, len(committedImage.History), "expected one history entry")
|
||||
require.Falsef(t, committedImage.History[0].EmptyLayer, "expected lone history entry to not be marked as an empty layer")
|
||||
require.Equalf(t, 1, len(committedImage.RootFS.DiffIDs), "expected one rootfs layer")
|
||||
|
||||
t.Run("emptylayer", func(t *testing.T) {
|
||||
options := CommitOptions{
|
||||
EmptyLayer: true,
|
||||
}
|
||||
layoutDir := t.TempDir()
|
||||
ref, err := ociLayout.ParseReference(layoutDir)
|
||||
require.NoError(t, err, "parsing reference to image we're going to commit with EmptyLayer")
|
||||
_, _, _, err = b.Commit(ctx, ref, options)
|
||||
require.NoError(t, err, "committing with EmptyLayer = true")
|
||||
img, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "preparing to read committed image")
|
||||
defer img.Close()
|
||||
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
|
||||
require.NoError(t, err, "reading manifest from committed image")
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
|
||||
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
|
||||
require.NoError(t, err, "parsing manifest from committed image")
|
||||
require.Zerof(t, len(parsedManifest.LayerInfos()), "expected no layers in manifest")
|
||||
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
|
||||
require.NoError(t, err, "reading config blob from committed image")
|
||||
defer configReadCloser.Close()
|
||||
var image v1.Image
|
||||
err = json.NewDecoder(configReadCloser).Decode(&image)
|
||||
require.NoError(t, err, "parsing config blob from committed image")
|
||||
require.Equalf(t, 1, len(image.History), "expected one history entry")
|
||||
require.Truef(t, image.History[0].EmptyLayer, "expected lone history entry to be marked as an empty layer")
|
||||
})
|
||||
|
||||
t.Run("omitlayerhistoryentry", func(t *testing.T) {
|
||||
options := CommitOptions{
|
||||
OmitLayerHistoryEntry: true,
|
||||
}
|
||||
layoutDir := t.TempDir()
|
||||
ref, err := ociLayout.ParseReference(layoutDir)
|
||||
require.NoError(t, err, "parsing reference to image we're going to commit with OmitLayerHistoryEntry")
|
||||
_, _, _, err = b.Commit(ctx, ref, options)
|
||||
require.NoError(t, err, "committing with OmitLayerHistoryEntry = true")
|
||||
img, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "preparing to read committed image")
|
||||
defer img.Close()
|
||||
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
|
||||
require.NoError(t, err, "reading manifest from committed image")
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
|
||||
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
|
||||
require.NoError(t, err, "parsing manifest from committed image")
|
||||
require.Equalf(t, 0, len(parsedManifest.LayerInfos()), "expected no layers in manifest")
|
||||
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
|
||||
require.NoError(t, err, "reading config blob from committed image")
|
||||
defer configReadCloser.Close()
|
||||
var image v1.Image
|
||||
err = json.NewDecoder(configReadCloser).Decode(&image)
|
||||
require.NoError(t, err, "parsing config blob from committed image")
|
||||
require.Equalf(t, 0, len(image.History), "expected no history entries")
|
||||
require.Equalf(t, 0, len(image.RootFS.DiffIDs), "expected no diff IDs")
|
||||
})
|
||||
|
||||
builderOptions.FromImage = transports.ImageName(committedRef)
|
||||
b, err = NewBuilder(ctx, store, builderOptions)
|
||||
require.NoError(t, err, "creating builder from committed base image")
|
||||
|
||||
t.Run("derived-emptylayer", func(t *testing.T) {
|
||||
options := CommitOptions{
|
||||
EmptyLayer: true,
|
||||
}
|
||||
layoutDir := t.TempDir()
|
||||
ref, err := ociLayout.ParseReference(layoutDir)
|
||||
require.NoError(t, err, "parsing reference to image we're going to commit with EmptyLayer")
|
||||
_, _, _, err = b.Commit(ctx, ref, options)
|
||||
require.NoError(t, err, "committing with EmptyLayer = true")
|
||||
img, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "preparing to read committed image")
|
||||
defer img.Close()
|
||||
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
|
||||
require.NoError(t, err, "reading manifest from committed image")
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
|
||||
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
|
||||
require.NoError(t, err, "parsing manifest from committed image")
|
||||
require.Equalf(t, len(committedManifest.LayerInfos()), len(parsedManifest.LayerInfos()), "expected no new layers in manifest")
|
||||
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
|
||||
require.NoError(t, err, "reading config blob from committed image")
|
||||
defer configReadCloser.Close()
|
||||
var image v1.Image
|
||||
err = json.NewDecoder(configReadCloser).Decode(&image)
|
||||
require.NoError(t, err, "parsing config blob from committed image")
|
||||
require.Equalf(t, len(committedImage.History)+1, len(image.History), "expected one new history entry")
|
||||
require.Equalf(t, len(committedImage.RootFS.DiffIDs), len(image.RootFS.DiffIDs), "expected no new diff IDs")
|
||||
require.Truef(t, image.History[1].EmptyLayer, "expected new history entry to be marked as an empty layer")
|
||||
})
|
||||
|
||||
t.Run("derived-omitlayerhistoryentry", func(t *testing.T) {
|
||||
options := CommitOptions{
|
||||
OmitLayerHistoryEntry: true,
|
||||
}
|
||||
layoutDir := t.TempDir()
|
||||
ref, err := ociLayout.ParseReference(layoutDir)
|
||||
require.NoError(t, err, "parsing reference to image we're going to commit with OmitLayerHistoryEntry")
|
||||
_, _, _, err = b.Commit(ctx, ref, options)
|
||||
require.NoError(t, err, "committing with OmitLayerHistoryEntry = true")
|
||||
img, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "preparing to read committed image")
|
||||
defer img.Close()
|
||||
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
|
||||
require.NoError(t, err, "reading manifest from committed image")
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
|
||||
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
|
||||
require.NoError(t, err, "parsing manifest from committed image")
|
||||
require.Equalf(t, len(committedManifest.LayerInfos()), len(parsedManifest.LayerInfos()), "expected no new layers in manifest")
|
||||
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
|
||||
require.NoError(t, err, "reading config blob from committed image")
|
||||
defer configReadCloser.Close()
|
||||
var image v1.Image
|
||||
err = json.NewDecoder(configReadCloser).Decode(&image)
|
||||
require.NoError(t, err, "parsing config blob from committed image")
|
||||
require.Equalf(t, len(committedImage.History), len(image.History), "expected no new history entry")
|
||||
require.Equalf(t, len(committedImage.RootFS.DiffIDs), len(image.RootFS.DiffIDs), "expected no new diff IDs")
|
||||
})
|
||||
|
||||
t.Run("derived-synthetic", func(t *testing.T) {
|
||||
randomDir := t.TempDir()
|
||||
randomFile, err := os.CreateTemp(randomDir, "file")
|
||||
require.NoError(t, err, "creating a temporary file")
|
||||
layerDigest := digest.Canonical.Digester()
|
||||
_, err = io.CopyN(io.MultiWriter(layerDigest.Hash(), randomFile), rand.Reader, 512)
|
||||
require.NoError(t, err, "writing a temporary file")
|
||||
require.NoError(t, randomFile.Close(), "closing temporary file")
|
||||
options := CommitOptions{
|
||||
OmitLayerHistoryEntry: true,
|
||||
AppendedLinkedLayers: []LinkedLayer{{
|
||||
History: v1.History{
|
||||
CreatedBy: "yolo",
|
||||
}, // history entry to add
|
||||
BlobPath: randomFile.Name(),
|
||||
}},
|
||||
}
|
||||
layoutDir := t.TempDir()
|
||||
ref, err := ociLayout.ParseReference(layoutDir)
|
||||
require.NoErrorf(t, err, "parsing reference for to-be-committed image with externally-controlled changes")
|
||||
_, _, _, err = b.Commit(ctx, ref, options)
|
||||
require.NoError(t, err, "committing with OmitLayerHistoryEntry = true")
|
||||
img, err := ref.NewImageSource(ctx, &testSystemContext)
|
||||
require.NoError(t, err, "preparing to read committed image")
|
||||
defer img.Close()
|
||||
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
|
||||
require.NoError(t, err, "reading manifest from committed image")
|
||||
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
|
||||
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
|
||||
require.NoError(t, err, "parsing manifest from committed image")
|
||||
require.Equalf(t, len(committedManifest.LayerInfos())+1, len(parsedManifest.LayerInfos()), "expected one new layer in manifest")
|
||||
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
|
||||
require.NoError(t, err, "reading config blob from committed image")
|
||||
defer configReadCloser.Close()
|
||||
var image v1.Image
|
||||
err = json.NewDecoder(configReadCloser).Decode(&image)
|
||||
require.NoError(t, err, "decoding image config")
|
||||
require.Equalf(t, len(committedImage.History)+1, len(image.History), "expected one new history entry")
|
||||
require.Equalf(t, len(committedImage.RootFS.DiffIDs)+1, len(image.RootFS.DiffIDs), "expected one new diff ID")
|
||||
require.Equalf(t, layerDigest.Digest(), image.RootFS.DiffIDs[len(image.RootFS.DiffIDs)-1], "expected new diff ID to match the randomly-generated layer")
|
||||
})
|
||||
}
|
42
common.go
42
common.go
|
@ -2,8 +2,8 @@ package buildah
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
|
@ -12,9 +12,11 @@ import (
|
|||
cp "github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/signature"
|
||||
is "github.com/containers/image/v5/storage"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
)
|
||||
|
||||
|
@ -25,7 +27,7 @@ const (
|
|||
DOCKER = define.DOCKER
|
||||
)
|
||||
|
||||
func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig) *cp.Options {
|
||||
func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig, destinationTimestamp *time.Time) *cp.Options {
|
||||
sourceCtx := getSystemContext(store, nil, "")
|
||||
if sourceSystemContext != nil {
|
||||
*sourceCtx = *sourceSystemContext
|
||||
|
@ -45,6 +47,7 @@ func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemCon
|
|||
OciEncryptConfig: ociEncryptConfig,
|
||||
OciDecryptConfig: ociDecryptConfig,
|
||||
OciEncryptLayers: ociEncryptLayers,
|
||||
DestinationTimestamp: destinationTimestamp,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,7 +62,7 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
|
|||
if store != nil {
|
||||
if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() {
|
||||
userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf")
|
||||
if _, err := os.Stat(userRegistriesFile); err == nil {
|
||||
if err := fileutils.Exists(userRegistriesFile); err == nil {
|
||||
sc.SystemRegistriesConfPath = userRegistriesFile
|
||||
}
|
||||
}
|
||||
|
@ -67,22 +70,31 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
|
|||
return sc
|
||||
}
|
||||
|
||||
func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
|
||||
func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, maybeWrappedDest, maybeWrappedSrc, directDest types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
|
||||
return retryCopyImageWithOptions(ctx, policyContext, maybeWrappedDest, maybeWrappedSrc, directDest, copyOptions, maxRetries, retryDelay, true)
|
||||
}
|
||||
|
||||
func retryCopyImageWithOptions(ctx context.Context, policyContext *signature.PolicyContext, maybeWrappedDest, maybeWrappedSrc, directDest types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration, retryOnLayerUnknown bool) ([]byte, error) {
|
||||
var (
|
||||
manifestBytes []byte
|
||||
err error
|
||||
lastErr error
|
||||
)
|
||||
err = retry.RetryIfNecessary(ctx, func() error {
|
||||
manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
|
||||
if registry != nil && registry.Transport().Name() != docker.Transport.Name() {
|
||||
lastErr = err
|
||||
return nil
|
||||
}
|
||||
err = retry.IfNecessary(ctx, func() error {
|
||||
manifestBytes, err = cp.Image(ctx, policyContext, maybeWrappedDest, maybeWrappedSrc, copyOptions)
|
||||
return err
|
||||
}, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay})
|
||||
if lastErr != nil {
|
||||
err = lastErr
|
||||
}
|
||||
}, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay, IsErrorRetryable: func(err error) bool {
|
||||
if retryOnLayerUnknown && directDest.Transport().Name() == is.Transport.Name() && errors.Is(err, storage.ErrLayerUnknown) {
|
||||
// we were trying to reuse a layer that belonged to an
|
||||
// image that was deleted at just the right (worst
|
||||
// possible) time? yeah, try again
|
||||
return true
|
||||
}
|
||||
if directDest.Transport().Name() != docker.Transport.Name() {
|
||||
// if we're not talking to a registry, then nah
|
||||
return false
|
||||
}
|
||||
// hand it off to the default should-this-be-retried logic
|
||||
return retry.IsErrorRetryable(err)
|
||||
}})
|
||||
return manifestBytes, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cp "github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/signature"
|
||||
imageStorage "github.com/containers/image/v5/storage"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/storage"
|
||||
storageTypes "github.com/containers/storage/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testRetryCopyImageWrappedStore struct {
|
||||
phantomImageID string
|
||||
storage.Store
|
||||
}
|
||||
|
||||
func (ts *testRetryCopyImageWrappedStore) CreateImage(id string, names []string, layer, metadata string, options *storage.ImageOptions) (*storage.Image, error) {
|
||||
if id == ts.phantomImageID {
|
||||
if img, err := ts.Store.Image(id); img != nil && err == nil {
|
||||
// i'm another thread somewhere
|
||||
if _, err := ts.Store.DeleteImage(id, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return ts.Store.CreateImage(id, names, layer, metadata, options)
|
||||
}
|
||||
|
||||
func TestRetryCopyImage(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
graphDriverName := os.Getenv("STORAGE_DRIVER")
|
||||
if graphDriverName == "" {
|
||||
graphDriverName = "vfs"
|
||||
}
|
||||
store, err := storage.GetStore(storageTypes.StoreOptions{
|
||||
RunRoot: t.TempDir(),
|
||||
GraphRoot: t.TempDir(),
|
||||
GraphDriverName: graphDriverName,
|
||||
})
|
||||
require.NoError(t, err, "initializing storage")
|
||||
t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
|
||||
|
||||
// construct an "image" that can be pulled into local storage
|
||||
var layerBuffer bytes.Buffer
|
||||
tw := tar.NewWriter(&layerBuffer)
|
||||
err = tw.WriteHeader(&tar.Header{
|
||||
Name: "rootfile",
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: 1234,
|
||||
})
|
||||
require.NoError(t, err, "writing header for archive")
|
||||
_, err = tw.Write(make([]byte, 1234))
|
||||
require.NoError(t, err, "writing empty file to archive")
|
||||
require.NoError(t, tw.Close(), "finishing layer")
|
||||
layerDigest := digest.Canonical.FromBytes(layerBuffer.Bytes())
|
||||
imageConfig := v1.Image{
|
||||
RootFS: v1.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{layerDigest},
|
||||
},
|
||||
}
|
||||
imageConfigBytes, err := json.Marshal(&imageConfig)
|
||||
require.NoError(t, err, "marshalling image configuration blob")
|
||||
imageConfigDigest := digest.Canonical.FromBytes(imageConfigBytes)
|
||||
imageManifest := v1.Manifest{
|
||||
Versioned: ispec.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
MediaType: v1.MediaTypeImageManifest,
|
||||
Config: v1.Descriptor{
|
||||
MediaType: v1.MediaTypeImageConfig,
|
||||
Size: int64(len(imageConfigBytes)),
|
||||
Digest: digest.FromBytes(imageConfigBytes),
|
||||
},
|
||||
Layers: []v1.Descriptor{
|
||||
{
|
||||
MediaType: v1.MediaTypeImageLayer,
|
||||
Size: int64(layerBuffer.Len()),
|
||||
Digest: layerDigest,
|
||||
},
|
||||
},
|
||||
}
|
||||
imageManifestBytes, err := json.Marshal(&imageManifest)
|
||||
require.NoError(t, err, "marshalling image manifest")
|
||||
imageManifestDigest := digest.Canonical.FromBytes(imageManifestBytes)
|
||||
|
||||
// write it to an oci layout
|
||||
ociDir := t.TempDir()
|
||||
blobbyDir := filepath.Join(ociDir, "blobs")
|
||||
require.NoError(t, os.Mkdir(blobbyDir, 0o700))
|
||||
blobDir := filepath.Join(blobbyDir, layerDigest.Algorithm().String())
|
||||
require.NoError(t, os.Mkdir(blobDir, 0o700))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(blobDir, layerDigest.Encoded()), layerBuffer.Bytes(), 0o600), "writing layer")
|
||||
require.NoError(t, os.WriteFile(filepath.Join(blobDir, imageConfigDigest.Encoded()), imageConfigBytes, 0o600), "writing image config")
|
||||
require.NoError(t, os.WriteFile(filepath.Join(blobDir, imageManifestDigest.Encoded()), imageManifestBytes, 0o600), "writing manifest")
|
||||
imageIndex := v1.Index{
|
||||
Versioned: ispec.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
MediaType: v1.MediaTypeImageIndex,
|
||||
Manifests: []v1.Descriptor{
|
||||
{
|
||||
MediaType: v1.MediaTypeImageManifest,
|
||||
Digest: imageManifestDigest,
|
||||
Size: int64(len(imageManifestBytes)),
|
||||
},
|
||||
},
|
||||
}
|
||||
imageIndexBytes, err := json.Marshal(&imageIndex)
|
||||
require.NoError(t, err, "marshalling image index")
|
||||
require.NoError(t, os.WriteFile(filepath.Join(ociDir, v1.ImageIndexFile), imageIndexBytes, 0o600), "writing image index")
|
||||
imageLayout := v1.ImageLayout{
|
||||
Version: v1.ImageLayoutVersion,
|
||||
}
|
||||
imageLayoutBytes, err := json.Marshal(&imageLayout)
|
||||
require.NoError(t, err, "marshalling image layout")
|
||||
require.NoError(t, os.WriteFile(filepath.Join(ociDir, v1.ImageLayoutFile), imageLayoutBytes, 0o600), "writing image layout")
|
||||
|
||||
// pull the image, twice, just to make sure nothing weird happens
|
||||
srcRef, err := alltransports.ParseImageName("oci:" + ociDir)
|
||||
require.NoError(t, err, "building reference to image layout")
|
||||
destRef, err := imageStorage.Transport.NewStoreReference(store, nil, imageConfigDigest.Encoded())
|
||||
require.NoError(t, err, "building reference to image in store")
|
||||
policy, err := signature.NewPolicyFromFile("tests/policy.json")
|
||||
require.NoError(t, err, "reading signature policy")
|
||||
policyContext, err := signature.NewPolicyContext(policy)
|
||||
require.NoError(t, err, "building policy context")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, policyContext.Destroy(), "destroying policy context")
|
||||
})
|
||||
_, err = retryCopyImage(ctx, policyContext, destRef, srcRef, destRef, &cp.Options{}, 3, 1*time.Second)
|
||||
require.NoError(t, err, "copying image")
|
||||
_, err = retryCopyImage(ctx, policyContext, destRef, srcRef, destRef, &cp.Options{}, 3, 1*time.Second)
|
||||
require.NoError(t, err, "copying image")
|
||||
|
||||
// now make something weird happen
|
||||
wrappedStore := &testRetryCopyImageWrappedStore{
|
||||
phantomImageID: imageConfigDigest.Encoded(),
|
||||
Store: store,
|
||||
}
|
||||
wrappedDestRef, err := imageStorage.Transport.NewStoreReference(wrappedStore, nil, imageConfigDigest.Encoded())
|
||||
require.NoError(t, err, "building wrapped reference")
|
||||
|
||||
// copy with retry-on-storage-layer-unknown = false: expect an error
|
||||
// (if it succeeds, either the test is broken, or we can remove this
|
||||
// case from the retry function)
|
||||
_, err = retryCopyImageWithOptions(ctx, policyContext, wrappedDestRef, srcRef, wrappedDestRef, &cp.Options{}, 3, 1*time.Second, false)
|
||||
require.ErrorIs(t, err, storage.ErrLayerUnknown, "copying image")
|
||||
|
||||
// copy with retry-on-storage-layer-unknown = true: expect no error
|
||||
_, err = retryCopyImageWithOptions(ctx, policyContext, wrappedDestRef, srcRef, wrappedDestRef, &cp.Options{}, 3, 1*time.Second, true)
|
||||
require.NoError(t, err, "copying image")
|
||||
}
|
156
config.go
156
config.go
|
@ -4,11 +4,13 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/docker"
|
||||
internalUtil "github.com/containers/buildah/internal/util"
|
||||
|
@ -19,13 +21,12 @@ import (
|
|||
"github.com/containers/storage/pkg/stringid"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format
|
||||
// (either as it exists, or converting the image if necessary), and unmarshals it into dest.
|
||||
// NOTE: The MIME type is of the _manifest_, not of the _config_ that is returned.
|
||||
func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error {
|
||||
func unmarshalConvertedConfig(ctx context.Context, dest any, img types.Image, wantedManifestMIMEType string) error {
|
||||
_, actualManifestMIMEType, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
|
||||
|
@ -60,7 +61,7 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.SystemContext) error {
|
||||
func (b *Builder) initConfig(ctx context.Context, sys *types.SystemContext, img types.Image, options *BuilderOptions) error {
|
||||
if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one.
|
||||
rawManifest, manifestMIMEType, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
|
@ -91,8 +92,26 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
|
|||
if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil {
|
||||
return fmt.Errorf("parsing OCI manifest %q: %w", string(b.Manifest), err)
|
||||
}
|
||||
for k, v := range v1Manifest.Annotations {
|
||||
b.ImageAnnotations[k] = v
|
||||
if len(v1Manifest.Annotations) > 0 {
|
||||
if b.ImageAnnotations == nil {
|
||||
b.ImageAnnotations = make(map[string]string, len(v1Manifest.Annotations))
|
||||
}
|
||||
maps.Copy(b.ImageAnnotations, v1Manifest.Annotations)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if options == nil || options.CompatScratchConfig != types.OptionalBoolTrue {
|
||||
b.Docker = docker.V2Image{
|
||||
V1Image: docker.V1Image{
|
||||
Config: &docker.Config{
|
||||
WorkingDir: "/",
|
||||
},
|
||||
},
|
||||
}
|
||||
b.OCIv1 = ociv1.Image{
|
||||
Config: ociv1.ImageConfig{
|
||||
WorkingDir: "/",
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -116,27 +135,21 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
|
|||
if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
|
||||
b.OCIv1.Created = &now
|
||||
}
|
||||
currentPlatformSpecification := platforms.DefaultSpec()
|
||||
if b.OS() == "" {
|
||||
if sys != nil && sys.OSChoice != "" {
|
||||
b.SetOS(sys.OSChoice)
|
||||
} else {
|
||||
b.SetOS(runtime.GOOS)
|
||||
b.SetOS(currentPlatformSpecification.OS)
|
||||
}
|
||||
}
|
||||
if b.Architecture() == "" {
|
||||
if sys != nil && sys.ArchitectureChoice != "" {
|
||||
b.SetArchitecture(sys.ArchitectureChoice)
|
||||
} else {
|
||||
b.SetArchitecture(runtime.GOARCH)
|
||||
}
|
||||
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
|
||||
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
|
||||
b.SetArchitecture(ps.Architecture)
|
||||
b.SetVariant(ps.Variant)
|
||||
}
|
||||
if b.Variant() == "" {
|
||||
if sys != nil && sys.VariantChoice != "" {
|
||||
b.SetVariant(sys.VariantChoice)
|
||||
} else {
|
||||
b.SetArchitecture(currentPlatformSpecification.Architecture)
|
||||
b.SetVariant(currentPlatformSpecification.Variant)
|
||||
}
|
||||
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
|
||||
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
|
||||
|
@ -158,7 +171,7 @@ func (b *Builder) setupLogger() {
|
|||
|
||||
// Annotations returns a set of key-value pairs from the image's manifest.
|
||||
func (b *Builder) Annotations() map[string]string {
|
||||
return copyStringStringMap(b.ImageAnnotations)
|
||||
return maps.Clone(b.ImageAnnotations)
|
||||
}
|
||||
|
||||
// SetAnnotation adds or overwrites a key's value from the image's manifest.
|
||||
|
@ -180,7 +193,7 @@ func (b *Builder) UnsetAnnotation(key string) {
|
|||
// ClearAnnotations removes all keys and their values from the image's
|
||||
// manifest.
|
||||
func (b *Builder) ClearAnnotations() {
|
||||
b.ImageAnnotations = map[string]string{}
|
||||
b.ImageAnnotations = nil
|
||||
}
|
||||
|
||||
// CreatedBy returns a description of how this image was built.
|
||||
|
@ -223,7 +236,7 @@ func (b *Builder) SetOSVersion(version string) {
|
|||
// OSFeatures returns a list of OS features which the container, or a container
|
||||
// built using an image built from this container, depends on the OS supplying.
|
||||
func (b *Builder) OSFeatures() []string {
|
||||
return copyStringSlice(b.OCIv1.OSFeatures)
|
||||
return slices.Clone(b.OCIv1.OSFeatures)
|
||||
}
|
||||
|
||||
// SetOSFeature adds a feature of the OS which the container, or a container
|
||||
|
@ -327,7 +340,7 @@ func (b *Builder) SetUser(spec string) {
|
|||
|
||||
// OnBuild returns the OnBuild value from the container.
|
||||
func (b *Builder) OnBuild() []string {
|
||||
return copyStringSlice(b.Docker.Config.OnBuild)
|
||||
return slices.Clone(b.Docker.Config.OnBuild)
|
||||
}
|
||||
|
||||
// ClearOnBuild removes all values from the OnBuild structure
|
||||
|
@ -363,7 +376,7 @@ func (b *Builder) SetWorkDir(there string) {
|
|||
// Shell returns the default shell for running commands in the
|
||||
// container, or in a container built using an image built from this container.
|
||||
func (b *Builder) Shell() []string {
|
||||
return copyStringSlice(b.Docker.Config.Shell)
|
||||
return slices.Clone(b.Docker.Config.Shell)
|
||||
}
|
||||
|
||||
// SetShell sets the default shell for running
|
||||
|
@ -376,13 +389,13 @@ func (b *Builder) SetShell(shell []string) {
|
|||
b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell)
|
||||
}
|
||||
|
||||
b.Docker.Config.Shell = copyStringSlice(shell)
|
||||
b.Docker.Config.Shell = slices.Clone(shell)
|
||||
}
|
||||
|
||||
// Env returns a list of key-value pairs to be set when running commands in the
|
||||
// container, or in a container built using an image built from this container.
|
||||
func (b *Builder) Env() []string {
|
||||
return copyStringSlice(b.OCIv1.Config.Env)
|
||||
return slices.Clone(b.OCIv1.Config.Env)
|
||||
}
|
||||
|
||||
// SetEnv adds or overwrites a value to the set of environment strings which
|
||||
|
@ -432,22 +445,22 @@ func (b *Builder) ClearEnv() {
|
|||
// set, to use when running a container built from an image built from this
|
||||
// container.
|
||||
func (b *Builder) Cmd() []string {
|
||||
return copyStringSlice(b.OCIv1.Config.Cmd)
|
||||
return slices.Clone(b.OCIv1.Config.Cmd)
|
||||
}
|
||||
|
||||
// SetCmd sets the default command, or command parameters if an Entrypoint is
|
||||
// set, to use when running a container built from an image built from this
|
||||
// container.
|
||||
func (b *Builder) SetCmd(cmd []string) {
|
||||
b.OCIv1.Config.Cmd = copyStringSlice(cmd)
|
||||
b.Docker.Config.Cmd = copyStringSlice(cmd)
|
||||
b.OCIv1.Config.Cmd = slices.Clone(cmd)
|
||||
b.Docker.Config.Cmd = slices.Clone(cmd)
|
||||
}
|
||||
|
||||
// Entrypoint returns the command to be run for containers built from images
|
||||
// built from this container.
|
||||
func (b *Builder) Entrypoint() []string {
|
||||
if len(b.OCIv1.Config.Entrypoint) > 0 {
|
||||
return copyStringSlice(b.OCIv1.Config.Entrypoint)
|
||||
return slices.Clone(b.OCIv1.Config.Entrypoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -455,14 +468,14 @@ func (b *Builder) Entrypoint() []string {
|
|||
// SetEntrypoint sets the command to be run for in containers built from images
|
||||
// built from this container.
|
||||
func (b *Builder) SetEntrypoint(ep []string) {
|
||||
b.OCIv1.Config.Entrypoint = copyStringSlice(ep)
|
||||
b.Docker.Config.Entrypoint = copyStringSlice(ep)
|
||||
b.OCIv1.Config.Entrypoint = slices.Clone(ep)
|
||||
b.Docker.Config.Entrypoint = slices.Clone(ep)
|
||||
}
|
||||
|
||||
// Labels returns a set of key-value pairs from the image's runtime
|
||||
// configuration.
|
||||
func (b *Builder) Labels() map[string]string {
|
||||
return copyStringStringMap(b.OCIv1.Config.Labels)
|
||||
return maps.Clone(b.OCIv1.Config.Labels)
|
||||
}
|
||||
|
||||
// SetLabel adds or overwrites a key's value from the image's runtime
|
||||
|
@ -669,11 +682,12 @@ func (b *Builder) Healthcheck() *docker.HealthConfig {
|
|||
return nil
|
||||
}
|
||||
return &docker.HealthConfig{
|
||||
Test: copyStringSlice(b.Docker.Config.Healthcheck.Test),
|
||||
Interval: b.Docker.Config.Healthcheck.Interval,
|
||||
Timeout: b.Docker.Config.Healthcheck.Timeout,
|
||||
StartPeriod: b.Docker.Config.Healthcheck.StartPeriod,
|
||||
Retries: b.Docker.Config.Healthcheck.Retries,
|
||||
Test: slices.Clone(b.Docker.Config.Healthcheck.Test),
|
||||
Interval: b.Docker.Config.Healthcheck.Interval,
|
||||
Timeout: b.Docker.Config.Healthcheck.Timeout,
|
||||
StartPeriod: b.Docker.Config.Healthcheck.StartPeriod,
|
||||
StartInterval: b.Docker.Config.Healthcheck.StartInterval,
|
||||
Retries: b.Docker.Config.Healthcheck.Retries,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -690,11 +704,12 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
|
|||
b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format")
|
||||
}
|
||||
b.Docker.Config.Healthcheck = &docker.HealthConfig{
|
||||
Test: copyStringSlice(config.Test),
|
||||
Interval: config.Interval,
|
||||
Timeout: config.Timeout,
|
||||
StartPeriod: config.StartPeriod,
|
||||
Retries: config.Retries,
|
||||
Test: slices.Clone(config.Test),
|
||||
Interval: config.Interval,
|
||||
Timeout: config.Timeout,
|
||||
StartPeriod: config.StartPeriod,
|
||||
StartInterval: config.StartInterval,
|
||||
Retries: config.Retries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -745,3 +760,62 @@ func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, c
|
|||
func (b *Builder) ClearAppendedEmptyLayers() {
|
||||
b.AppendedEmptyLayers = nil
|
||||
}
|
||||
|
||||
// AddPrependedLinkedLayer adds an item to the history that we'll create when
|
||||
// committing the image, optionally with a layer, after any history we inherit
|
||||
// from the base image, but before the history item that we'll use to describe
|
||||
// the new layer that we're adding.
|
||||
// The blobPath can be either the location of an uncompressed archive, or a
|
||||
// directory whose contents will be archived to use as a layer blob. Leaving
|
||||
// blobPath empty is functionally similar to calling AddPrependedEmptyLayer().
|
||||
func (b *Builder) AddPrependedLinkedLayer(created *time.Time, createdBy, author, comment, blobPath string) {
|
||||
if created != nil {
|
||||
copiedTimestamp := *created
|
||||
created = &copiedTimestamp
|
||||
}
|
||||
b.PrependedLinkedLayers = append(b.PrependedLinkedLayers, LinkedLayer{
|
||||
BlobPath: blobPath,
|
||||
History: ociv1.History{
|
||||
Created: created,
|
||||
CreatedBy: createdBy,
|
||||
Author: author,
|
||||
Comment: comment,
|
||||
EmptyLayer: blobPath == "",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// ClearPrependedLinkedLayers clears the list of history entries that we'll add
|
||||
// the committed image before the layer that we're adding (if we're adding it).
|
||||
func (b *Builder) ClearPrependedLinkedLayers() {
|
||||
b.PrependedLinkedLayers = nil
|
||||
}
|
||||
|
||||
// AddAppendedLinkedLayer adds an item to the history that we'll create when
|
||||
// committing the image, optionally with a layer, after the history item that
|
||||
// we'll use to describe the new layer that we're adding.
|
||||
// The blobPath can be either the location of an uncompressed archive, or a
|
||||
// directory whose contents will be archived to use as a layer blob. Leaving
|
||||
// blobPath empty is functionally similar to calling AddAppendedEmptyLayer().
|
||||
func (b *Builder) AddAppendedLinkedLayer(created *time.Time, createdBy, author, comment, blobPath string) {
|
||||
if created != nil {
|
||||
copiedTimestamp := *created
|
||||
created = &copiedTimestamp
|
||||
}
|
||||
b.AppendedLinkedLayers = append(b.AppendedLinkedLayers, LinkedLayer{
|
||||
BlobPath: blobPath,
|
||||
History: ociv1.History{
|
||||
Created: created,
|
||||
CreatedBy: createdBy,
|
||||
Author: author,
|
||||
Comment: comment,
|
||||
EmptyLayer: blobPath == "",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// ClearAppendedLinkedLayers clears the list of linked layers that we'll add to
|
||||
// the committed image after the layer that we're adding (if we're adding it).
|
||||
func (b *Builder) ClearAppendedLinkedLayers() {
|
||||
b.AppendedLinkedLayers = nil
|
||||
}
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
# [stable|testing|upstream]/Containerfile
|
||||
#
|
||||
# Build a Buildah container image from the latest version
|
||||
# of Fedora.
|
||||
#
|
||||
# FLAVOR defaults to stable if unset
|
||||
#
|
||||
# FLAVOR=stable acquires a stable version of Buildah
|
||||
# from the Fedoras Updates System.
|
||||
# FLAVOR=testing acquires a testing version of Buildah
|
||||
# from the Fedoras Updates System.
|
||||
# FLAVOR=upstream acquires a testing version of Buildah
|
||||
# from the Fedora Copr Buildsystem.
|
||||
# https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/
|
||||
#
|
||||
# https://bodhi.fedoraproject.org/updates/?search=buildah
|
||||
#
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
ARG FLAVOR=stable
|
||||
|
||||
label "io.containers.capabilities"="CHOWN,DAC_OVERRIDE,FOWNER,FSETID,KILL,NET_BIND_SERVICE,SETFCAP,SETGID,SETPCAP,SETUID,CHOWN,DAC_OVERRIDE,FOWNER,FSETID,KILL,NET_BIND_SERVICE,SETFCAP,SETGID,SETPCAP,SETUID,SYS_CHROOT"
|
||||
|
||||
# When building for multiple-architectures in parallel using emulation
|
||||
# it's really easy for one/more dnf processes to timeout or mis-count
|
||||
# the minimum download rates. Bump both to be extremely forgiving of
|
||||
# an overworked host.
|
||||
RUN echo -e "\n\n# Added during image build" >> /etc/dnf/dnf.conf && \
|
||||
echo -e "minrate=100\ntimeout=60\n" >> /etc/dnf/dnf.conf
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by dnf that are just taking
|
||||
# up space.
|
||||
# TODO: rpm --setcaps... needed due to Fedora (base) image builds
|
||||
# being (maybe still?) affected by
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y makecache && \
|
||||
dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
case "${FLAVOR}" in \
|
||||
stable) \
|
||||
dnf -y install buildah fuse-overlayfs cpp --exclude container-selinux \
|
||||
;; \
|
||||
testing) \
|
||||
dnf -y install --enablerepo=updates-testing buildah fuse-overlayfs cpp \
|
||||
--exclude container-selinux \
|
||||
;; \
|
||||
upstream) \
|
||||
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
|
||||
dnf -y copr enable rhcontainerbot/podman-next && \
|
||||
dnf -y install buildah fuse-overlayfs \
|
||||
--exclude container-selinux \
|
||||
--enablerepo=updates-testing \
|
||||
;; \
|
||||
*) \
|
||||
printf "\\nFLAVOR argument must be set and valid, currently: '${FLAVOR}'\\n\\n" 1>&2 && \
|
||||
exit 1 \
|
||||
;; \
|
||||
esac && \
|
||||
dnf -y clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
ADD ./containers.conf /etc/containers/
|
||||
|
||||
# Setup internal Buildah to pass secrets/subscriptions down from host to internal container
|
||||
RUN printf '/run/secrets/etc-pki-entitlement:/run/secrets/etc-pki-entitlement\n/run/secrets/rhsm:/run/secrets/rhsm\n' > /etc/containers/mounts.conf
|
||||
|
||||
# Copy & modify the defaults to provide reference if runtime changes needed.
|
||||
# Changes here are required for running with fuse-overlay storage inside container.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e '/additionalimage.*/a "/var/lib/shared",' \
|
||||
-e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \
|
||||
/usr/share/containers/storage.conf \
|
||||
> /etc/containers/storage.conf && \
|
||||
chmod 644 /etc/containers/storage.conf && \
|
||||
chmod 644 /etc/containers/containers.conf
|
||||
|
||||
RUN mkdir -p /var/lib/shared/overlay-images \
|
||||
/var/lib/shared/overlay-layers \
|
||||
/var/lib/shared/vfs-images \
|
||||
/var/lib/shared/vfs-layers && \
|
||||
touch /var/lib/shared/overlay-images/images.lock && \
|
||||
touch /var/lib/shared/overlay-layers/layers.lock && \
|
||||
touch /var/lib/shared/vfs-images/images.lock && \
|
||||
touch /var/lib/shared/vfs-layers/layers.lock
|
||||
|
||||
# Define uid/gid ranges for our user https://github.com/containers/buildah/issues/3053
|
||||
RUN useradd build && \
|
||||
echo -e "build:1:999\nbuild:1001:64535" > /etc/subuid && \
|
||||
echo -e "build:1:999\nbuild:1001:64535" > /etc/subgid && \
|
||||
mkdir -p /home/build/.local/share/containers && \
|
||||
mkdir -p /home/build/.config/containers && \
|
||||
chown -R build:build /home/build
|
||||
# See: https://github.com/containers/buildah/issues/4669
|
||||
# Copy & modify the config for the `build` user and remove the global
|
||||
# `runroot` and `graphroot` which current `build` user cannot access,
|
||||
# in such case storage will choose a runroot in `/var/tmp`.
|
||||
RUN sed -e 's|^#mount_program|mount_program|g' \
|
||||
-e 's|^graphroot|#graphroot|g' \
|
||||
-e 's|^runroot|#runroot|g' \
|
||||
/etc/containers/storage.conf \
|
||||
> /home/build/.config/containers/storage.conf && \
|
||||
chown build:build /home/build/.config/containers/storage.conf
|
||||
|
||||
VOLUME /var/lib/containers
|
||||
VOLUME /home/build/.local/share/containers
|
||||
|
||||
# Set an environment variable to default to chroot isolation for RUN
|
||||
# instructions and "buildah run".
|
||||
ENV BUILDAH_ISOLATION=chroot
|
|
@ -1,86 +1,2 @@
|
|||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (ANY changes made to this file, once committed/merged must)
|
||||
[comment]: <> (be manually copy/pasted -in markdown- into the description)
|
||||
[comment]: <> (field on Quay at the following locations:)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (https://quay.io/repository/containers/buildah)
|
||||
[comment]: <> (https://quay.io/repository/buildah/stable)
|
||||
[comment]: <> (https://quay.io/repository/buildah/testing)
|
||||
[comment]: <> (https://quay.io/repository/buildah/upstream)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
|
||||

|
||||
|
||||
# buildahimage
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains the Dockerfiles necessary to create the buildahimage container
|
||||
images that are housed on quay.io under the buildah account. All repositories where
|
||||
the images live are public and can be pulled without credentials. These container images are secured and the
|
||||
resulting containers can run safely with privileges within the container.
|
||||
|
||||
The container images are built using the latest Fedora and then Buildah is installed into them.
|
||||
The PATH in the container images is set to the default PATH provided by Fedora. Also, the
|
||||
ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they
|
||||
default to `/`.
|
||||
|
||||
The container images are:
|
||||
|
||||
* `quay.io/containers/buildah:<version>` and `quay.io/buildah/stable:<version>` -
|
||||
These images are built daily. They are intended to contain an unchanging
|
||||
and stable version of buildah. For the most recent `<version>` tags (`vX`,
|
||||
`vX.Y`, and `vX.Y.Z`) the image contents will be updated daily to incorporate
|
||||
(especially) security upgrades. For build details, please [see the
|
||||
configuration file](stable/Dockerfile).
|
||||
* `quay.io/containers/buildah:latest` and `quay.io/buildah/stable:latest` -
|
||||
Built daily using the same Dockerfile as above. The buildah version
|
||||
will remain the "latest" available in Fedora, however the other image
|
||||
contents may vary compared to the version-tagged images.
|
||||
* `quay.io/buildah/testing:latest` - This image is built daily, using the
|
||||
latest version of Buildah that was in the Fedora `updates-testing` repository.
|
||||
The image is Built with [the testing Dockerfile](testing/Dockerfile).
|
||||
* `quay.io/buildah/upstream:latest` - This image is built daily using the latest
|
||||
code found in this GitHub repository. Due to the image changing frequently,
|
||||
it's not guaranteed to be stable or even executable. The image is built with
|
||||
[the upstream Dockerfile](upstream/Dockerfile). Note: The actual compilation
|
||||
of upstream buildah [occurs continuously in
|
||||
COPR](https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/).
|
||||
|
||||
|
||||
## Sample Usage
|
||||
|
||||
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
|
||||
|
||||
```
|
||||
podman pull docker://quay.io/buildah/stable:latest
|
||||
|
||||
podman run stable buildah version
|
||||
|
||||
# Create a directory on the host to mount the container's
|
||||
# /var/lib/container directory to so containers can be
|
||||
# run within the container.
|
||||
mkdir /var/lib/mycontainer
|
||||
|
||||
# Run the image detached using the host's network in a container name
|
||||
# buildahctr, turn off label and seccomp confinement in the container
|
||||
# and then do a little shell hackery to keep the container up and running.
|
||||
podman run --detach --name=buildahctr --net=host --security-opt label=disable --security-opt seccomp=unconfined --device /dev/fuse:rw -v /var/lib/mycontainer:/var/lib/containers:Z stable sh -c 'while true ;do sleep 100000 ; done'
|
||||
|
||||
podman exec -it buildahctr /bin/sh
|
||||
|
||||
# Now inside of the container
|
||||
|
||||
buildah from alpine
|
||||
|
||||
buildah images
|
||||
|
||||
exit
|
||||
```
|
||||
|
||||
**Note:** If you encounter a `fuse: device not found` error when running the container image, it is likely that
|
||||
the fuse kernel module has not been loaded on your host system. Use the command `modprobe fuse` to load the
|
||||
module and then run the container image. To enable this automatically at boot time, you can add a configuration
|
||||
file to `/etc/modules.load.d`. See `man modules-load.d` for more details.
|
||||
The buildah container image build context and automation have been
|
||||
moved to [https://github.com/containers/image_build/tree/main/buildah](https://github.com/containers/image_build/tree/main/buildah)
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
[engine]
|
||||
cgroup_manager = "cgroupfs"
|
|
@ -68,6 +68,10 @@ CIRRUS_BASE_SHA=${CIRRUS_BASE_SHA:-unknown$(date +%d)} # difficult to reliably
|
|||
CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-unknown$(date +%s)} # must be short and unique enough
|
||||
CIRRUS_TASK_ID=${CIRRUS_BUILD_ID:-unknown$(date +%d)} # to prevent state thrashing when
|
||||
# debugging with `hack/get_ci_vm.sh`
|
||||
|
||||
# All CI jobs use a local registry
|
||||
export CI_USE_REGISTRY_CACHE=true
|
||||
|
||||
# Regex defining all CI-related env. vars. necessary for all possible
|
||||
# testing operations on all platforms and versions. This is necessary
|
||||
# to avoid needlessly passing through global/system values across
|
||||
|
@ -77,7 +81,7 @@ CIRRUS_TASK_ID=${CIRRUS_BUILD_ID:-unknown$(date +%d)} # to prevent state thras
|
|||
# N/B: Don't include BUILDAH_ISOLATION, STORAGE_DRIVER, or CGROUP_MANAGER
|
||||
# here because they will negatively affect execution of the rootless
|
||||
# integration tests.
|
||||
PASSTHROUGH_ENV_EXACT='DEST_BRANCH|DISTRO_NV|GOPATH|GOSRC|ROOTLESS_USER|SCRIPT_BASE|IN_PODMAN_IMAGE'
|
||||
PASSTHROUGH_ENV_EXACT='BUILDAH_RUNTIME|DEST_BRANCH|DISTRO_NV|GOPATH|GOSRC|ROOTLESS_USER|SCRIPT_BASE|IN_PODMAN_IMAGE'
|
||||
|
||||
# List of envariable patterns which must match AT THE BEGINNING of the name.
|
||||
PASSTHROUGH_ENV_ATSTART='CI|TEST'
|
||||
|
@ -92,8 +96,8 @@ PASSTHROUGH_ENV_RE="(^($PASSTHROUGH_ENV_EXACT)\$)|(^($PASSTHROUGH_ENV_ATSTART))|
|
|||
SECRET_ENV_RE='ACCOUNT|GC[EP]..|SSH|PASSWORD|SECRET|TOKEN'
|
||||
|
||||
# FQINs needed for testing
|
||||
REGISTRY_FQIN=${REGISTRY_FQIN:-docker.io/library/registry}
|
||||
ALPINE_FQIN=${ALPINE_FQIN:-docker.io/library/alpine}
|
||||
REGISTRY_FQIN=${REGISTRY_FQIN:-quay.io/libpod/registry:2.8.2}
|
||||
ALPINE_FQIN=${ALPINE_FQIN:-quay.io/libpod/alpine}
|
||||
|
||||
# for in-container testing
|
||||
IN_PODMAN_NAME="in_podman_$CIRRUS_TASK_ID"
|
||||
|
@ -189,7 +193,7 @@ in_podman() {
|
|||
done <<<"$(passthrough_envars)"
|
||||
|
||||
showrun podman run -i --name="$IN_PODMAN_NAME" \
|
||||
--net="container:registry" \
|
||||
--net=host \
|
||||
--privileged \
|
||||
--cgroupns=host \
|
||||
"${envargs[@]}" \
|
||||
|
@ -200,7 +204,7 @@ in_podman() {
|
|||
-e "CGROUP_MANAGER=cgroupfs" \
|
||||
-v "$HOME/auth:$HOME/auth:ro" \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup:rw \
|
||||
-v /dev/fuse:/dev/fuse:rw \
|
||||
--device /dev/fuse:rwm \
|
||||
-v "$GOSRC:$GOSRC:z" \
|
||||
--workdir "$GOSRC" \
|
||||
"$@"
|
||||
|
@ -292,11 +296,22 @@ setup_rootless() {
|
|||
msg "************************************************************"
|
||||
cd $GOSRC || exit 1
|
||||
# Guarantee independence from specific values
|
||||
rootless_uid=$[RANDOM+1000]
|
||||
rootless_gid=$[RANDOM+1000]
|
||||
msg "creating $rootless_uid:$rootless_gid $ROOTLESS_USER user"
|
||||
rootless_uid=$((RANDOM+1000))
|
||||
rootless_gid=$((RANDOM+1000))
|
||||
rootless_supplemental_gid1=$((rootless_gid+1))
|
||||
rootless_supplemental_gid2=$((rootless_supplemental_gid1+1))
|
||||
rootless_supplemental_gid3=$((rootless_supplemental_gid2+1))
|
||||
msg "creating $rootless_uid:$rootless_gid,$rootless_supplemental_gid1,$rootless_supplemental_gid2,$rootless_supplemental_gid3 $ROOTLESS_USER user"
|
||||
groupadd -g $rootless_gid $ROOTLESS_USER
|
||||
useradd -g $rootless_gid -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER
|
||||
groupadd -g $rootless_supplemental_gid1 ${ROOTLESS_USER}sg1
|
||||
groupadd -g $rootless_supplemental_gid2 ${ROOTLESS_USER}sg2
|
||||
groupadd -g $rootless_supplemental_gid3 ${ROOTLESS_USER}sg3
|
||||
useradd -g $rootless_gid -G ${ROOTLESS_USER}sg1,${ROOTLESS_USER}sg2,${ROOTLESS_USER}sg3 -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER
|
||||
rootless_supplemental_gid4=$(awk 'BEGIN{FS=":"}/^rootlessuser:/{print $2+$3}' /etc/subgid)
|
||||
groupadd -g $rootless_supplemental_gid4 ${ROOTLESS_USER}sg4
|
||||
usermod -G ${ROOTLESS_USER}sg1,${ROOTLESS_USER}sg2,${ROOTLESS_USER}sg3,${ROOTLESS_USER}sg4 $ROOTLESS_USER
|
||||
msg "running id for $ROOTLESS_USER"
|
||||
id $ROOTLESS_USER
|
||||
|
||||
# We also set up rootless user for image-scp tests (running as root)
|
||||
if [[ $PRIV_NAME = "rootless" ]]; then
|
||||
|
|
|
@ -10,6 +10,9 @@ source $(dirname $0)/lib.sh
|
|||
|
||||
req_env_vars OS_RELEASE_ID OS_RELEASE_VER GOSRC IN_PODMAN_IMAGE CIRRUS_CHANGE_TITLE
|
||||
|
||||
msg "Running df."
|
||||
df -hT
|
||||
|
||||
msg "Disabling git repository owner-check system-wide."
|
||||
# Newer versions of git bark if repo. files are unexpectedly owned.
|
||||
# This mainly affects rootless and containerized testing. But
|
||||
|
@ -84,6 +87,18 @@ echo "Configuring /etc/containers/registries.conf"
|
|||
mkdir -p /etc/containers
|
||||
echo -e "[registries.search]\nregistries = ['docker.io', 'registry.fedoraproject.org', 'quay.io']" | tee /etc/containers/registries.conf
|
||||
|
||||
# As of July 2024, CI VMs come built-in with a registry.
|
||||
LCR=/var/cache/local-registry/local-cache-registry
|
||||
if [[ -x $LCR ]]; then
|
||||
# Images in cache registry are prepopulated at the time
|
||||
# VMs are built. If any PR adds a dependency on new images,
|
||||
# those must be fetched now, at VM start time. This should
|
||||
# be rare, and must be fixed in next automation_images build.
|
||||
while read new_image; do
|
||||
$LCR cache $new_image
|
||||
done < <(grep '^[^#]' tests/NEW-IMAGES || true)
|
||||
fi
|
||||
|
||||
show_env_vars
|
||||
|
||||
if [[ -z "$CONTAINER" ]]; then
|
||||
|
|
|
@ -63,11 +63,17 @@ else
|
|||
export GITVALIDATE_EPOCH="$CIRRUS_LAST_GREEN_CHANGE"
|
||||
fi
|
||||
echo "Linting & Validating from ${GITVALIDATE_EPOCH:-default EPOCH}"
|
||||
showrun make lint LINTFLAGS="--deadline=20m --color=always -j1"
|
||||
showrun make lint LINTFLAGS="--timeout=20m --color=always -j1"
|
||||
showrun make validate
|
||||
;;
|
||||
unit)
|
||||
showrun make test-unit
|
||||
race=
|
||||
if [[ -z "$CIRRUS_PR" ]]; then
|
||||
# If not running on a PR then run unit tests
|
||||
# with appropriate `-race` flags.
|
||||
race="-race"
|
||||
fi
|
||||
showrun make test-unit RACEFLAGS=$race
|
||||
;;
|
||||
conformance)
|
||||
# Typically it's undesirable to install packages at runtime.
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
|
||||
"github.com/containers/buildah/internal/mkcw"
|
||||
mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -67,8 +66,13 @@ func (d *dummyAttestationHandler) ServeHTTP(rw http.ResponseWriter, req *http.Re
|
|||
}
|
||||
|
||||
func TestCWConvertImage(t *testing.T) {
|
||||
// This test cannot be parallelized as this uses NewBuilder()
|
||||
// which eventually and indirectly accesses a global variable
|
||||
// defined in `go-selinux`, this must be fixed at `go-selinux`
|
||||
// or builder must enable sometime of locking mechanism i.e if
|
||||
// routine is creating Builder other's must wait for it.
|
||||
// Tracked here: https://github.com/containers/buildah/issues/5967
|
||||
ctx := context.TODO()
|
||||
systemContext := &types.SystemContext{}
|
||||
for _, status := range []int{http.StatusOK, http.StatusInternalServerError} {
|
||||
for _, ignoreChainRetrievalErrors := range []bool{false, true} {
|
||||
for _, ignoreAttestationErrors := range []bool{false, true} {
|
||||
|
@ -110,8 +114,9 @@ func TestCWConvertImage(t *testing.T) {
|
|||
AttestationURL: "http://" + addr.String(),
|
||||
IgnoreAttestationErrors: ignoreAttestationErrors,
|
||||
Slop: "16MB",
|
||||
SignaturePolicyPath: testSystemContext.SignaturePolicyPath,
|
||||
}
|
||||
id, _, _, err := CWConvertImage(ctx, systemContext, store, options)
|
||||
id, _, _, err := CWConvertImage(ctx, &testSystemContext, store, options)
|
||||
if status != http.StatusOK && !ignoreAttestationErrors {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
|
|
729
copier/copier.go
729
copier/copier.go
File diff suppressed because it is too large
Load Diff
|
@ -13,9 +13,9 @@ import (
|
|||
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/moby/sys/capability"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -117,17 +117,17 @@ func TestGetPermissionErrorChroot(t *testing.T) {
|
|||
func testGetPermissionError(t *testing.T) {
|
||||
dropCaps := []capability.Cap{capability.CAP_DAC_OVERRIDE, capability.CAP_DAC_READ_SEARCH}
|
||||
tmp := t.TempDir()
|
||||
err := os.Mkdir(filepath.Join(tmp, "unreadable-directory"), 0000)
|
||||
err := os.Mkdir(filepath.Join(tmp, "unreadable-directory"), 0o000)
|
||||
require.NoError(t, err, "error creating an unreadable directory")
|
||||
err = os.Mkdir(filepath.Join(tmp, "readable-directory"), 0755)
|
||||
err = os.Mkdir(filepath.Join(tmp, "readable-directory"), 0o755)
|
||||
require.NoError(t, err, "error creating a readable directory")
|
||||
err = os.Mkdir(filepath.Join(tmp, "readable-directory", "unreadable-subdirectory"), 0000)
|
||||
err = os.Mkdir(filepath.Join(tmp, "readable-directory", "unreadable-subdirectory"), 0o000)
|
||||
require.NoError(t, err, "error creating an unreadable subdirectory")
|
||||
err = os.WriteFile(filepath.Join(tmp, "unreadable-file"), []byte("hi, i'm a file that you can't read"), 0000)
|
||||
err = os.WriteFile(filepath.Join(tmp, "unreadable-file"), []byte("hi, i'm a file that you can't read"), 0o000)
|
||||
require.NoError(t, err, "error creating an unreadable file")
|
||||
err = os.WriteFile(filepath.Join(tmp, "readable-file"), []byte("hi, i'm also a file, and you can read me"), 0644)
|
||||
err = os.WriteFile(filepath.Join(tmp, "readable-file"), []byte("hi, i'm also a file, and you can read me"), 0o644)
|
||||
require.NoError(t, err, "error creating a readable file")
|
||||
err = os.WriteFile(filepath.Join(tmp, "readable-directory", "unreadable-file"), []byte("hi, i'm also a file that you can't read"), 0000)
|
||||
err = os.WriteFile(filepath.Join(tmp, "readable-directory", "unreadable-file"), []byte("hi, i'm also a file that you can't read"), 0o000)
|
||||
require.NoError(t, err, "error creating an unreadable file in a readable directory")
|
||||
for _, ignore := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("ignore=%v", ignore), func(t *testing.T) {
|
||||
|
@ -163,7 +163,7 @@ func TestGetNoCrossDevice(t *testing.T) {
|
|||
require.NoError(t, err, "error creating new mount namespace")
|
||||
|
||||
subdir := filepath.Join(tmpdir, "subdir")
|
||||
err = os.Mkdir(subdir, 0755)
|
||||
err = os.Mkdir(subdir, 0o755)
|
||||
require.NoErrorf(t, err, "error creating %q", subdir)
|
||||
|
||||
err = mount.Mount("tmpfs", subdir, "tmpfs", "rw")
|
||||
|
@ -174,7 +174,7 @@ func TestGetNoCrossDevice(t *testing.T) {
|
|||
}()
|
||||
|
||||
skipped := filepath.Join(subdir, "skipped.txt")
|
||||
err = os.WriteFile(skipped, []byte("this file should have been skipped\n"), 0644)
|
||||
err = os.WriteFile(skipped, []byte("this file should have been skipped\n"), 0o644)
|
||||
require.NoErrorf(t, err, "error writing file at %q", skipped)
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -48,7 +49,7 @@ func makeContents(length int64) io.ReadCloser {
|
|||
for count < length {
|
||||
if _, err := buffered.Write([]byte{"0123456789abcdef"[count%16]}); err != nil {
|
||||
buffered.Flush()
|
||||
pipeWriter.CloseWithError(err) // nolint:errcheck
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
count++
|
||||
|
@ -111,7 +112,7 @@ func makeArchive(headers []tar.Header, contents map[string][]byte) io.ReadCloser
|
|||
tw.Close()
|
||||
buffered.Flush()
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err) // nolint:errcheck
|
||||
pipeWriter.CloseWithError(err)
|
||||
} else {
|
||||
pipeWriter.Close()
|
||||
}
|
||||
|
@ -176,14 +177,15 @@ type enumeratedFile struct {
|
|||
}
|
||||
|
||||
var (
|
||||
testDate = time.Unix(1485449953, 0)
|
||||
testDate = time.Unix(1485449953, 0)
|
||||
secondTestDate = time.Unix(1485449953*2, 0)
|
||||
|
||||
uid = os.Getuid()
|
||||
|
||||
testArchiveSlice = makeArchiveSlice([]tar.Header{
|
||||
{Name: "item-0", Typeflag: tar.TypeReg, Size: 123, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-1", Typeflag: tar.TypeReg, Size: 456, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-2", Typeflag: tar.TypeReg, Size: 789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "item-0", Typeflag: tar.TypeReg, Size: 123, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "item-1", Typeflag: tar.TypeReg, Size: 456, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "item-2", Typeflag: tar.TypeReg, Size: 789, Mode: 0o600, ModTime: testDate},
|
||||
})
|
||||
|
||||
testArchives = []struct {
|
||||
|
@ -204,38 +206,38 @@ var (
|
|||
name: "regular",
|
||||
rootOnly: false,
|
||||
headers: []tar.Header{
|
||||
{Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-c", Typeflag: tar.TypeLink, Linkname: "file-a", Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-u", Typeflag: tar.TypeReg, Size: 23, Mode: cISUID | 0755, ModTime: testDate},
|
||||
{Name: "file-g", Typeflag: tar.TypeReg, Size: 23, Mode: cISGID | 0755, ModTime: testDate},
|
||||
{Name: "file-t", Typeflag: tar.TypeReg, Size: 23, Mode: cISVTX | 0755, ModTime: testDate},
|
||||
{Name: "link-0", Typeflag: tar.TypeSymlink, Linkname: "../file-0", Size: 123456789, Mode: 0777, ModTime: testDate},
|
||||
{Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "link-b", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "hlink-b", Typeflag: tar.TypeLink, Linkname: "../file-b", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 34, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-c", Typeflag: tar.TypeSymlink, Linkname: "/file-c", Size: 23, Mode: 0777, ModTime: testDate},
|
||||
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0660, ModTime: testDate},
|
||||
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-c/file-n", Typeflag: tar.TypeReg, Size: 432, Mode: 0666, ModTime: testDate},
|
||||
{Name: "subdir-c/file-o", Typeflag: tar.TypeReg, Size: 56, Mode: 0666, ModTime: testDate},
|
||||
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0700, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600, ModTime: testDate},
|
||||
{Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0500, ModTime: testDate},
|
||||
{Name: "subdir-e/file-p", Typeflag: tar.TypeReg, Size: 890, Mode: 0600, ModTime: testDate},
|
||||
{Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "file-c", Typeflag: tar.TypeLink, Linkname: "file-a", Mode: 0o600, ModTime: testDate},
|
||||
{Name: "file-u", Typeflag: tar.TypeReg, Size: 23, Mode: cISUID | 0o755, ModTime: testDate},
|
||||
{Name: "file-g", Typeflag: tar.TypeReg, Size: 23, Mode: cISGID | 0o755, ModTime: testDate},
|
||||
{Name: "file-t", Typeflag: tar.TypeReg, Size: 23, Mode: cISVTX | 0o755, ModTime: testDate},
|
||||
{Name: "link-0", Typeflag: tar.TypeSymlink, Linkname: "../file-0", Size: 123456789, Mode: 0o777, ModTime: testDate},
|
||||
{Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0o777, ModTime: testDate},
|
||||
{Name: "link-b", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0o777, ModTime: testDate},
|
||||
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "hlink-b", Typeflag: tar.TypeLink, Linkname: "../file-b", Size: 23, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0o700, ModTime: testDate},
|
||||
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0o660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 34, Mode: 0o660, ModTime: testDate},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0o777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0o777, ModTime: testDate},
|
||||
{Name: "subdir-a/file-c", Typeflag: tar.TypeSymlink, Linkname: "/file-c", Size: 23, Mode: 0o777, ModTime: testDate},
|
||||
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0o700, ModTime: testDate},
|
||||
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0o660, ModTime: testDate},
|
||||
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0o660, ModTime: testDate},
|
||||
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0o700, ModTime: testDate},
|
||||
{Name: "subdir-c/file-n", Typeflag: tar.TypeReg, Size: 432, Mode: 0o666, ModTime: testDate},
|
||||
{Name: "subdir-c/file-o", Typeflag: tar.TypeReg, Size: 56, Mode: 0o666, ModTime: testDate},
|
||||
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0o700, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-a", Typeflag: tar.TypeLink, Linkname: "/file-a", Size: 23, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "subdir-d/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0o500, ModTime: testDate},
|
||||
{Name: "subdir-e/file-p", Typeflag: tar.TypeReg, Size: 890, Mode: 0o600, ModTime: testDate},
|
||||
},
|
||||
contents: map[string][]byte{
|
||||
"archive-a": testArchiveSlice,
|
||||
|
@ -412,8 +414,8 @@ var (
|
|||
name: "devices",
|
||||
rootOnly: true,
|
||||
headers: []tar.Header{
|
||||
{Name: "char-dev", Typeflag: tar.TypeChar, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "blk-dev", Typeflag: tar.TypeBlock, Devmajor: 0, Devminor: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "char-dev", Typeflag: tar.TypeChar, Devmajor: 0, Devminor: 0, Mode: 0o600, ModTime: testDate},
|
||||
{Name: "blk-dev", Typeflag: tar.TypeBlock, Devmajor: 0, Devminor: 0, Mode: 0o600, ModTime: testDate},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -505,7 +507,7 @@ func testPut(t *testing.T) {
|
|||
require.NoErrorf(t, err, "error extracting archive %q to directory %q", testArchives[i].name, tmp)
|
||||
|
||||
var found []string
|
||||
err = filepath.WalkDir(tmp, func(path string, d fs.DirEntry, err error) error {
|
||||
err = filepath.WalkDir(tmp, func(path string, _ fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -534,13 +536,13 @@ func testPut(t *testing.T) {
|
|||
for _, typeFlag := range []byte{tar.TypeReg, tar.TypeLink, tar.TypeSymlink, tar.TypeChar, tar.TypeBlock, tar.TypeFifo} {
|
||||
t.Run(fmt.Sprintf("overwrite (dir)=%v,type=%c", overwrite, typeFlag), func(t *testing.T) {
|
||||
archive := makeArchiveSlice([]tar.Header{
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeReg, Size: 123, Mode: 0755, ModTime: testDate},
|
||||
{Name: "test", Typeflag: tar.TypeDir, Size: 0, Mode: 0755, ModTime: testDate},
|
||||
{Name: "test/content", Typeflag: tar.TypeReg, Size: 0, Mode: 0755, ModTime: testDate},
|
||||
{Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0o755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0o755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeReg, Size: 123, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "test", Typeflag: tar.TypeDir, Size: 0, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "test/content", Typeflag: tar.TypeReg, Size: 0, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0o755, Linkname: "target", ModTime: testDate},
|
||||
})
|
||||
tmp := t.TempDir()
|
||||
err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, NoOverwriteDirNonDir: !overwrite}, bytes.NewReader(archive))
|
||||
|
@ -560,13 +562,13 @@ func testPut(t *testing.T) {
|
|||
for _, typeFlag := range []byte{tar.TypeReg, tar.TypeLink, tar.TypeSymlink, tar.TypeChar, tar.TypeBlock, tar.TypeFifo} {
|
||||
t.Run(fmt.Sprintf("overwrite (non-dir)=%v,type=%c", overwrite, typeFlag), func(t *testing.T) {
|
||||
archive := makeArchiveSlice([]tar.Header{
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeReg, Mode: 0755, ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeReg, Size: 123, Mode: 0755, ModTime: testDate},
|
||||
{Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "test", Typeflag: tar.TypeDir, Size: 0, Mode: 0755, ModTime: testDate},
|
||||
{Name: "test/content", Typeflag: tar.TypeReg, Size: 0, Mode: 0755, ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0o755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeReg, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeSymlink, Mode: 0o755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "target", Typeflag: tar.TypeReg, Size: 123, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0o755, Linkname: "target", ModTime: testDate},
|
||||
{Name: "test", Typeflag: tar.TypeDir, Size: 0, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "test/content", Typeflag: tar.TypeReg, Size: 0, Mode: 0o755, ModTime: testDate},
|
||||
})
|
||||
tmp := t.TempDir()
|
||||
err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, NoOverwriteNonDirDir: !overwrite}, bytes.NewReader(archive))
|
||||
|
@ -587,9 +589,9 @@ func testPut(t *testing.T) {
|
|||
t.Skip("can only test !IgnoreDevices with root privileges, skipping")
|
||||
}
|
||||
archive := makeArchiveSlice([]tar.Header{
|
||||
{Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0600, ModTime: testDate, Devmajor: 0, Devminor: 0},
|
||||
{Name: "link", Typeflag: tar.TypeLink, Size: 0, Mode: 0600, ModTime: testDate, Linkname: "test"},
|
||||
{Name: "unrelated", Typeflag: tar.TypeReg, Size: 0, Mode: 0600, ModTime: testDate},
|
||||
{Name: "test", Typeflag: typeFlag, Size: 0, Mode: 0o600, ModTime: testDate, Devmajor: 0, Devminor: 0},
|
||||
{Name: "link", Typeflag: tar.TypeLink, Size: 0, Mode: 0o600, ModTime: testDate, Linkname: "test"},
|
||||
{Name: "unrelated", Typeflag: tar.TypeReg, Size: 0, Mode: 0o600, ModTime: testDate},
|
||||
})
|
||||
tmp := t.TempDir()
|
||||
err := Put(tmp, tmp, PutOptions{UIDMap: uidMap, GIDMap: gidMap, IgnoreDevices: ignoreDevices}, bytes.NewReader(archive))
|
||||
|
@ -888,8 +890,10 @@ func testGetMultiple(t *testing.T) {
|
|||
keepDirectoryNames bool
|
||||
renames map[string]string
|
||||
noDerefSymlinks bool
|
||||
parents bool
|
||||
timestamp *time.Time
|
||||
}
|
||||
var getTestArchives = []struct {
|
||||
getTestArchives := []struct {
|
||||
name string
|
||||
headers []tar.Header
|
||||
contents map[string][]byte
|
||||
|
@ -899,32 +903,32 @@ func testGetMultiple(t *testing.T) {
|
|||
{
|
||||
name: "regular",
|
||||
headers: []tar.Header{
|
||||
{Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0600},
|
||||
{Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
|
||||
{Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0600},
|
||||
{Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0600},
|
||||
{Name: "link-c", Typeflag: tar.TypeSymlink, Linkname: "subdir-c", Mode: 0700, ModTime: testDate},
|
||||
{Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0600},
|
||||
{Name: "non-archive-a", Typeflag: tar.TypeReg, Size: 1199, Mode: 0600},
|
||||
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0600},
|
||||
{Name: "something-a", Typeflag: tar.TypeReg, Size: 34, Mode: 0600},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0660},
|
||||
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0660},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0600},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0600},
|
||||
{Name: "subdir-a/file-c", Typeflag: tar.TypeReg, Size: 56, Mode: 0600},
|
||||
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0660},
|
||||
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 67, Mode: 0660},
|
||||
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-c/file-p", Typeflag: tar.TypeReg, Size: 432, Mode: 0666},
|
||||
{Name: "subdir-c/file-q", Typeflag: tar.TypeReg, Size: 78, Mode: 0666},
|
||||
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0600},
|
||||
{Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0700},
|
||||
{Name: "subdir-e/subdir-f/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0600},
|
||||
{Name: "file-0", Typeflag: tar.TypeReg, Size: 123456789, Mode: 0o600},
|
||||
{Name: "file-a", Typeflag: tar.TypeReg, Size: 23, Mode: 0o600},
|
||||
{Name: "file-b", Typeflag: tar.TypeReg, Size: 23, Mode: 0o600},
|
||||
{Name: "link-a", Typeflag: tar.TypeSymlink, Linkname: "file-a", Size: 23, Mode: 0o600},
|
||||
{Name: "link-c", Typeflag: tar.TypeSymlink, Linkname: "subdir-c", Mode: 0o700, ModTime: testDate},
|
||||
{Name: "archive-a", Typeflag: tar.TypeReg, Size: 0, Mode: 0o600},
|
||||
{Name: "non-archive-a", Typeflag: tar.TypeReg, Size: 1199, Mode: 0o600},
|
||||
{Name: "hlink-0", Typeflag: tar.TypeLink, Linkname: "file-0", Size: 123456789, Mode: 0o600},
|
||||
{Name: "something-a", Typeflag: tar.TypeReg, Size: 34, Mode: 0o600},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0o700},
|
||||
{Name: "subdir-a/file-n", Typeflag: tar.TypeReg, Size: 108, Mode: 0o660},
|
||||
{Name: "subdir-a/file-o", Typeflag: tar.TypeReg, Size: 45, Mode: 0o660},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeSymlink, Linkname: "../file-a", Size: 23, Mode: 0o600},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", Size: 23, Mode: 0o600},
|
||||
{Name: "subdir-a/file-c", Typeflag: tar.TypeReg, Size: 56, Mode: 0o600},
|
||||
{Name: "subdir-b", Typeflag: tar.TypeDir, Mode: 0o700},
|
||||
{Name: "subdir-b/file-n", Typeflag: tar.TypeReg, Size: 216, Mode: 0o660},
|
||||
{Name: "subdir-b/file-o", Typeflag: tar.TypeReg, Size: 67, Mode: 0o660},
|
||||
{Name: "subdir-c", Typeflag: tar.TypeDir, Mode: 0o700},
|
||||
{Name: "subdir-c/file-p", Typeflag: tar.TypeReg, Size: 432, Mode: 0o666},
|
||||
{Name: "subdir-c/file-q", Typeflag: tar.TypeReg, Size: 78, Mode: 0o666},
|
||||
{Name: "subdir-d", Typeflag: tar.TypeDir, Mode: 0o700},
|
||||
{Name: "subdir-d/hlink-0", Typeflag: tar.TypeLink, Linkname: "../file-0", Size: 123456789, Mode: 0o600},
|
||||
{Name: "subdir-e", Typeflag: tar.TypeDir, Mode: 0o700},
|
||||
{Name: "subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0o700},
|
||||
{Name: "subdir-e/subdir-f/hlink-b", Typeflag: tar.TypeLink, Linkname: "../../file-b", Size: 23, Mode: 0o600},
|
||||
},
|
||||
contents: map[string][]byte{
|
||||
"archive-a": testArchiveSlice,
|
||||
|
@ -995,6 +999,16 @@ func testGetMultiple(t *testing.T) {
|
|||
"subdir-f/hlink-b", // from subdir-e
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timestamped",
|
||||
pattern: "file*",
|
||||
items: []string{
|
||||
"file-0",
|
||||
"file-a",
|
||||
"file-b",
|
||||
},
|
||||
timestamp: &secondTestDate,
|
||||
},
|
||||
{
|
||||
name: "dot-with-wildcard-includes-and-excludes",
|
||||
pattern: ".",
|
||||
|
@ -1364,6 +1378,124 @@ func testGetMultiple(t *testing.T) {
|
|||
"file-q", // from link-c -> subdir-c
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wildcard and parents",
|
||||
pattern: "*",
|
||||
parents: true,
|
||||
items: []string{
|
||||
"file-0",
|
||||
"file-a",
|
||||
"file-b",
|
||||
"link-a",
|
||||
"hlink-0",
|
||||
"something-a",
|
||||
"archive-a",
|
||||
"non-archive-a",
|
||||
"subdir-a",
|
||||
"subdir-b",
|
||||
"subdir-c",
|
||||
"subdir-d",
|
||||
"subdir-e",
|
||||
"subdir-a/file-n",
|
||||
"subdir-a/file-o",
|
||||
"subdir-a/file-a",
|
||||
"subdir-a/file-b",
|
||||
"subdir-a/file-c",
|
||||
"subdir-b/file-n",
|
||||
"subdir-b/file-o",
|
||||
"subdir-c/file-p",
|
||||
"subdir-c/file-p",
|
||||
"subdir-c/file-q",
|
||||
"subdir-c/file-q",
|
||||
"subdir-d/hlink-0",
|
||||
"subdir-e/subdir-f",
|
||||
"subdir-e/subdir-f/hlink-b",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "everything-with-wildcard-includes-and-excludes-parents",
|
||||
pattern: "*",
|
||||
parents: true,
|
||||
exclude: []string{"**/*-a", "!**/*-c"},
|
||||
items: []string{
|
||||
"file-0",
|
||||
"file-b",
|
||||
"subdir-a",
|
||||
"subdir-b",
|
||||
"subdir-c",
|
||||
"subdir-d",
|
||||
"subdir-e",
|
||||
"subdir-a/file-c",
|
||||
"subdir-b/file-n",
|
||||
"subdir-b/file-o",
|
||||
"subdir-c/file-p",
|
||||
"subdir-c/file-p",
|
||||
"subdir-c/file-q",
|
||||
"subdir-c/file-q",
|
||||
"hlink-0",
|
||||
"subdir-d/hlink-0",
|
||||
"subdir-e/subdir-f",
|
||||
"subdir-e/subdir-f/hlink-b",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "file-and-dir-wildcard-parents",
|
||||
pattern: "*-a",
|
||||
parents: true,
|
||||
items: []string{
|
||||
"file-a",
|
||||
"link-a",
|
||||
"something-a",
|
||||
"archive-a",
|
||||
"non-archive-a",
|
||||
"subdir-a",
|
||||
"subdir-a/file-n",
|
||||
"subdir-a/file-o",
|
||||
"subdir-a/file-a",
|
||||
"subdir-a/file-b",
|
||||
"subdir-a/file-c",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root-wildcard-parents",
|
||||
pattern: "/subdir-b/*",
|
||||
parents: true,
|
||||
items: []string{
|
||||
"subdir-b",
|
||||
"subdir-b/file-n",
|
||||
"subdir-b/file-o",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dotdot-wildcard-parents",
|
||||
pattern: "../../subdir-b/*",
|
||||
parents: true,
|
||||
items: []string{
|
||||
"subdir-b",
|
||||
"subdir-b/file-n",
|
||||
"subdir-b/file-o",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dir-with-parents",
|
||||
pattern: "subdir-e/subdir-f",
|
||||
parents: true,
|
||||
items: []string{
|
||||
"subdir-e",
|
||||
"subdir-e/subdir-f",
|
||||
"subdir-e/subdir-f/hlink-b",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hlink-with-parents",
|
||||
pattern: "subdir-e/subdir-f/hlink-b",
|
||||
parents: true,
|
||||
items: []string{
|
||||
"subdir-e",
|
||||
"subdir-e/subdir-f",
|
||||
"subdir-e/subdir-f/hlink-b",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -1399,6 +1531,8 @@ func testGetMultiple(t *testing.T) {
|
|||
KeepDirectoryNames: testCase.keepDirectoryNames,
|
||||
Rename: testCase.renames,
|
||||
NoDerefSymlinks: testCase.noDerefSymlinks,
|
||||
Parents: testCase.parents,
|
||||
Timestamp: testCase.timestamp,
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("topdir=%s,archive=%s,case=%s,pattern=%s", topdir, testArchive.name, testCase.name, testCase.pattern), func(t *testing.T) {
|
||||
|
@ -1414,15 +1548,18 @@ func testGetMultiple(t *testing.T) {
|
|||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
getErr = Get(root, topdir, getOptions, []string{testCase.pattern}, pipeWriter)
|
||||
pipeWriter.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
tr := tar.NewReader(pipeReader)
|
||||
hdr, err := tr.Next()
|
||||
actualContents := []string{}
|
||||
for err == nil {
|
||||
actualContents = append(actualContents, filepath.FromSlash(hdr.Name))
|
||||
if testCase.timestamp != nil {
|
||||
assert.Truef(t, testCase.timestamp.Equal(hdr.ModTime), "timestamp was supposed to be forced for %q", hdr.Name)
|
||||
}
|
||||
hdr, err = tr.Next()
|
||||
}
|
||||
pipeReader.Close()
|
||||
|
@ -1518,9 +1655,9 @@ func testMkdir(t *testing.T) {
|
|||
{
|
||||
name: "regular",
|
||||
headers: []tar.Header{
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/dangle1", Typeflag: tar.TypeSymlink, Linkname: "dangle1-target", ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/dangle2", Typeflag: tar.TypeSymlink, Linkname: "../dangle2-target", ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/dangle3", Typeflag: tar.TypeSymlink, Linkname: "../../dangle3-target", ModTime: testDate},
|
||||
|
@ -1607,7 +1744,7 @@ func testMkdir(t *testing.T) {
|
|||
root := dir
|
||||
options := MkdirOptions{ChownNew: &idtools.IDPair{UID: os.Getuid(), GID: os.Getgid()}}
|
||||
var beforeNames, afterNames []string
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
err = filepath.WalkDir(dir, func(path string, _ fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1621,7 +1758,7 @@ func testMkdir(t *testing.T) {
|
|||
require.NoErrorf(t, err, "error walking directory to catalog pre-Mkdir contents: %v", err)
|
||||
err = Mkdir(root, testCase.create, options)
|
||||
require.NoErrorf(t, err, "error creating directory %q under %q with Mkdir: %v", testCase.create, root, err)
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
err = filepath.WalkDir(dir, func(path string, _ fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1633,7 +1770,7 @@ func testMkdir(t *testing.T) {
|
|||
return nil
|
||||
})
|
||||
require.NoErrorf(t, err, "error walking directory to catalog post-Mkdir contents: %v", err)
|
||||
expected := append([]string{}, beforeNames...)
|
||||
expected := slices.Clone(beforeNames)
|
||||
for _, expect := range testCase.expect {
|
||||
expected = append(expected, filepath.FromSlash(expect))
|
||||
}
|
||||
|
@ -1713,17 +1850,17 @@ func testRemove(t *testing.T) {
|
|||
{
|
||||
name: "regular",
|
||||
headers: []tar.Header{
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeReg, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeReg, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/file-a", Typeflag: tar.TypeReg, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/file-b", Typeflag: tar.TypeReg, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c/parent", Typeflag: tar.TypeSymlink, Linkname: "..", ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c/link-b", Typeflag: tar.TypeSymlink, Linkname: "../../file-b", ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-b/subdir-c/root", Typeflag: tar.TypeSymlink, Linkname: "/", ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-d", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-e", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-d", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-e", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
{Name: "subdir-a/subdir-e/subdir-f", Typeflag: tar.TypeDir, Mode: 0o755, ModTime: testDate},
|
||||
},
|
||||
testCases: []testCase{
|
||||
{
|
||||
|
@ -1822,7 +1959,7 @@ func testRemove(t *testing.T) {
|
|||
root := dir
|
||||
options := RemoveOptions{All: testCase.all}
|
||||
beforeNames := make(map[string]struct{})
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
err = filepath.WalkDir(dir, func(path string, _ fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1841,7 +1978,7 @@ func testRemove(t *testing.T) {
|
|||
}
|
||||
require.NoErrorf(t, err, "error removing item %q under %q with Remove: %v", testCase.remove, root, err)
|
||||
afterNames := make(map[string]struct{})
|
||||
err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
err = filepath.WalkDir(dir, func(path string, _ fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1871,3 +2008,351 @@ func testRemove(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtendedGlob(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
buf := []byte("buffer")
|
||||
var expected1, expected2 []string
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "a"), 0o700))
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "a", "b"), 0o700))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(tmpdir, "a", "b", "a.dat"), buf, 0o600))
|
||||
expected1 = append(expected1, filepath.Join(tmpdir, "a", "b", "a.dat"))
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "b"), 0o700))
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "b", "c"), 0o700))
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "c"), 0o700))
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "c", "d"), 0o700))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(tmpdir, "c", "d", "c.dat"), buf, 0o600))
|
||||
expected1 = append(expected1, filepath.Join(tmpdir, "c", "d", "c.dat"))
|
||||
expected2 = append(expected2, filepath.Join(tmpdir, "c", "d", "c.dat"))
|
||||
require.NoError(t, os.Mkdir(filepath.Join(tmpdir, "d"), 0o700))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(tmpdir, "d", "d.dat"), buf, 0o600))
|
||||
expected1 = append(expected1, filepath.Join(tmpdir, "d", "d.dat"))
|
||||
expected2 = append(expected2, filepath.Join(tmpdir, "d", "d.dat"))
|
||||
matched, err := extendedGlob(filepath.Join(tmpdir, "**", "*.dat"))
|
||||
require.NoError(t, err, "globbing")
|
||||
require.ElementsMatchf(t, expected1, matched, "**/*.dat")
|
||||
matched, err = extendedGlob(filepath.Join(tmpdir, "**", "d", "*.dat"))
|
||||
require.NoError(t, err, "globbing")
|
||||
require.ElementsMatch(t, expected2, matched, "**/d/*.dat")
|
||||
matched, err = extendedGlob(filepath.Join(tmpdir, "**", "**", "d", "*.dat"))
|
||||
require.NoError(t, err, "globbing")
|
||||
require.ElementsMatch(t, expected2, matched, "**/**/d/*.dat")
|
||||
matched, err = extendedGlob(filepath.Join(tmpdir, "**", "d", "**", "*.dat"))
|
||||
require.NoError(t, err, "globbing")
|
||||
require.ElementsMatch(t, expected2, matched, "**/d/**/*.dat")
|
||||
}
|
||||
|
||||
func testEnsure(t *testing.T) {
|
||||
zero := time.Unix(0, 0)
|
||||
worldReadable := os.FileMode(0o644)
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
subdir string
|
||||
options EnsureOptions
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
description: "base",
|
||||
subdir: "subdir",
|
||||
options: EnsureOptions{
|
||||
Paths: []EnsurePath{
|
||||
{
|
||||
Path: filepath.Join(string(os.PathSeparator), "a", "b", "a"),
|
||||
Typeflag: tar.TypeReg,
|
||||
Chmod: &worldReadable,
|
||||
},
|
||||
{
|
||||
Path: filepath.Join("a", "b", "b"),
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: &zero,
|
||||
},
|
||||
{
|
||||
Path: filepath.Join(string(os.PathSeparator), "a", "b", "c"),
|
||||
Typeflag: tar.TypeDir,
|
||||
ModTime: &zero,
|
||||
},
|
||||
{
|
||||
Path: filepath.Join("a", "b", "d"),
|
||||
Typeflag: tar.TypeDir,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{
|
||||
"subdir",
|
||||
"subdir/a",
|
||||
"subdir/a/b",
|
||||
"subdir/a/b/a",
|
||||
"subdir/a/b/b",
|
||||
"subdir/a/b/c",
|
||||
"subdir/a/b/d",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "nosubdir",
|
||||
options: EnsureOptions{
|
||||
Paths: []EnsurePath{
|
||||
{
|
||||
Path: filepath.Join(string(os.PathSeparator), "a", "b", "c"),
|
||||
Typeflag: tar.TypeDir,
|
||||
ModTime: &zero,
|
||||
},
|
||||
{
|
||||
Path: filepath.Join("a", "b", "d"),
|
||||
Typeflag: tar.TypeDir,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []string{
|
||||
"a",
|
||||
"a/b",
|
||||
"a/b/c",
|
||||
"a/b/d",
|
||||
},
|
||||
},
|
||||
}
|
||||
for i := range testCases {
|
||||
t.Run(testCases[i].description, func(t *testing.T) {
|
||||
testStarted := time.Now()
|
||||
tmpdir := t.TempDir()
|
||||
created, err := Ensure(tmpdir, testCases[i].subdir, testCases[i].options)
|
||||
require.NoError(t, err, "unexpected error ensuring")
|
||||
require.EqualValues(t, testCases[i].expected, created, "did not expect these")
|
||||
for _, item := range testCases[i].options.Paths {
|
||||
target := filepath.Join(tmpdir, testCases[i].subdir, item.Path)
|
||||
st, err := os.Stat(target)
|
||||
require.NoError(t, err, "we supposedly created %q", item.Path)
|
||||
if item.Chmod != nil {
|
||||
assert.Equalf(t, *item.Chmod, st.Mode().Perm(), "permissions look wrong on %q", item.Path)
|
||||
}
|
||||
if item.Chown != nil {
|
||||
uid, gid, err := owner(st)
|
||||
require.NoErrorf(t, err, "expected to be able to read uid:gid for %q", item.Path)
|
||||
assert.Equalf(t, item.Chown.UID, uid, "user looks wrong on %q", item.Path)
|
||||
assert.Equalf(t, item.Chown.GID, gid, "group looks wrong on %q", item.Path)
|
||||
}
|
||||
if item.ModTime != nil {
|
||||
assert.Equalf(t, item.ModTime.Unix(), st.ModTime().Unix(), "datestamp looks wrong on %q", item.Path)
|
||||
} else {
|
||||
assert.True(t, !testStarted.After(st.ModTime()), "datestamp is too old on %q: %v < %v", st.ModTime(), testStarted)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureNoChroot(t *testing.T) {
|
||||
couldChroot := canChroot
|
||||
canChroot = false
|
||||
testEnsure(t)
|
||||
canChroot = couldChroot
|
||||
}
|
||||
|
||||
func testConditionalRemove(t *testing.T) {
|
||||
mode, mismatchedMode := os.FileMode(0o751), os.FileMode(0o755)
|
||||
now := time.Now()
|
||||
then := time.Unix(now.Unix()/2, 0)
|
||||
type create struct {
|
||||
path string
|
||||
typeFlag byte
|
||||
mtime *time.Time
|
||||
mode *os.FileMode
|
||||
}
|
||||
testCases := []struct {
|
||||
description string
|
||||
subdir string
|
||||
create []create
|
||||
remove ConditionalRemoveOptions
|
||||
expectedRemoved []string
|
||||
expectedRemain []string
|
||||
}{
|
||||
{
|
||||
description: "withoutsubdir",
|
||||
create: []create{
|
||||
{path: "/a", typeFlag: tar.TypeDir},
|
||||
{path: "b", typeFlag: tar.TypeReg},
|
||||
{path: "c/d", typeFlag: tar.TypeReg},
|
||||
{path: "c/e", typeFlag: tar.TypeReg},
|
||||
},
|
||||
remove: ConditionalRemoveOptions{
|
||||
Paths: []ConditionalRemovePath{
|
||||
{Path: "a"},
|
||||
{Path: "b"},
|
||||
{Path: "c"},
|
||||
{Path: "c/e"},
|
||||
},
|
||||
},
|
||||
expectedRemoved: []string{"a", "b", "c/e"},
|
||||
expectedRemain: []string{"c/d", "c"},
|
||||
},
|
||||
{
|
||||
description: "withsubdir",
|
||||
subdir: "subdir",
|
||||
create: []create{
|
||||
{path: "/a", typeFlag: tar.TypeDir},
|
||||
{path: "b", typeFlag: tar.TypeReg},
|
||||
{path: "c/d", typeFlag: tar.TypeReg},
|
||||
{path: "c/e", typeFlag: tar.TypeReg},
|
||||
},
|
||||
remove: ConditionalRemoveOptions{
|
||||
Paths: []ConditionalRemovePath{
|
||||
{Path: "a"},
|
||||
{Path: "b"},
|
||||
{Path: "c"},
|
||||
{Path: "c/e"},
|
||||
},
|
||||
},
|
||||
expectedRemoved: []string{"a", "b", "c/e"},
|
||||
expectedRemain: []string{"c/d", "c"},
|
||||
},
|
||||
{
|
||||
description: "withsubdir",
|
||||
subdir: "subdir",
|
||||
create: []create{
|
||||
{path: "/a", typeFlag: tar.TypeDir},
|
||||
{path: "b", typeFlag: tar.TypeReg},
|
||||
{path: "c/d", typeFlag: tar.TypeReg},
|
||||
{path: "c/e", typeFlag: tar.TypeReg},
|
||||
},
|
||||
remove: ConditionalRemoveOptions{
|
||||
Paths: []ConditionalRemovePath{
|
||||
{Path: "a"},
|
||||
{Path: "b"},
|
||||
{Path: "c"},
|
||||
{Path: "c/e"},
|
||||
},
|
||||
},
|
||||
expectedRemoved: []string{"a", "b", "c/e"},
|
||||
expectedRemain: []string{"c/d", "c"},
|
||||
},
|
||||
{
|
||||
description: "unconditional",
|
||||
create: []create{
|
||||
{path: "/a", typeFlag: tar.TypeDir, mtime: &then, mode: &mode},
|
||||
{path: "b", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
{path: "c/d", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
{path: "c/e", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
},
|
||||
remove: ConditionalRemoveOptions{
|
||||
Paths: []ConditionalRemovePath{
|
||||
{Path: "a"},
|
||||
{Path: "b"},
|
||||
{Path: "c"},
|
||||
{Path: "c/e"},
|
||||
},
|
||||
},
|
||||
expectedRemoved: []string{"a", "b", "c/e"},
|
||||
expectedRemain: []string{"c/d", "c"},
|
||||
},
|
||||
{
|
||||
description: "conditions-not-met",
|
||||
create: []create{
|
||||
{path: "/a", typeFlag: tar.TypeDir, mtime: &then, mode: &mode},
|
||||
{path: "b", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
{path: "c/d", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
{path: "c/e", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
},
|
||||
remove: ConditionalRemoveOptions{
|
||||
Paths: []ConditionalRemovePath{
|
||||
{Path: "a", Mode: &mismatchedMode},
|
||||
{Path: "b", Mode: &mismatchedMode},
|
||||
{Path: "c", Mode: &mismatchedMode},
|
||||
{Path: "c/e", Mode: &mismatchedMode},
|
||||
{Path: "a", ModTime: &now},
|
||||
{Path: "b", ModTime: &now},
|
||||
{Path: "c", ModTime: &now},
|
||||
{Path: "c/e", ModTime: &now},
|
||||
},
|
||||
},
|
||||
expectedRemain: []string{"a", "b", "c/e", "c/d", "c"},
|
||||
},
|
||||
{
|
||||
description: "conditions-met",
|
||||
create: []create{
|
||||
{path: "/a", typeFlag: tar.TypeDir, mtime: &then, mode: &mode},
|
||||
{path: "b", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
{path: "c/d", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
{path: "c/e", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
|
||||
},
|
||||
remove: ConditionalRemoveOptions{
|
||||
Paths: []ConditionalRemovePath{
|
||||
{Path: "a", ModTime: &then, Mode: &mode},
|
||||
{Path: "b", ModTime: &then, Mode: &mode},
|
||||
{Path: "c"},
|
||||
{Path: "c/d", ModTime: &then, Mode: &mode},
|
||||
},
|
||||
},
|
||||
expectedRemoved: []string{"a", "b", "c/d"},
|
||||
expectedRemain: []string{"c", "c/e"},
|
||||
},
|
||||
}
|
||||
for i := range testCases {
|
||||
t.Run(testCases[i].description, func(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
var create EnsureOptions
|
||||
for _, what := range testCases[i].create {
|
||||
create.Paths = append(create.Paths, EnsurePath{
|
||||
Path: what.path,
|
||||
Typeflag: what.typeFlag,
|
||||
ModTime: what.mtime,
|
||||
Chmod: what.mode,
|
||||
})
|
||||
}
|
||||
created, err := Ensure(tmpdir, testCases[i].subdir, create)
|
||||
require.NoErrorf(t, err, "unexpected error creating %#v", create)
|
||||
remove := testCases[i].remove
|
||||
for _, what := range created {
|
||||
remove.Paths = append(remove.Paths, ConditionalRemovePath{
|
||||
Path: what,
|
||||
})
|
||||
}
|
||||
removed, err := ConditionalRemove(tmpdir, testCases[i].subdir, testCases[i].remove)
|
||||
require.NoError(t, err, "unexpected error removing")
|
||||
expectedRemoved := slices.Clone(testCases[i].expectedRemoved)
|
||||
slices.Sort(expectedRemoved)
|
||||
require.EqualValues(t, expectedRemoved, removed, "did not expect these to be removed")
|
||||
var remain []string
|
||||
err = filepath.Walk(filepath.Join(tmpdir, testCases[i].subdir), func(path string, _ fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(filepath.Join(tmpdir, testCases[i].subdir), path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing path of %q relative to %q: %w", path, filepath.Join(tmpdir, testCases[i].subdir), err)
|
||||
}
|
||||
if rel != "" && rel == "." {
|
||||
return nil
|
||||
}
|
||||
remain = append(remain, rel)
|
||||
return nil
|
||||
})
|
||||
slices.Sort(remain)
|
||||
expectedRemain := slices.Clone(testCases[i].expectedRemain)
|
||||
slices.Sort(expectedRemain)
|
||||
require.NoError(t, err, "unexpected error checking what's left")
|
||||
require.EqualValues(t, expectedRemain, remain, "did not expect these to be left behind")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConditionalRemoveNoChroot(t *testing.T) {
|
||||
couldChroot := canChroot
|
||||
canChroot = false
|
||||
testConditionalRemove(t)
|
||||
canChroot = couldChroot
|
||||
}
|
||||
|
||||
func TestSortedExtendedGlob(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
buf := []byte("buffer")
|
||||
expect := []string{}
|
||||
for _, name := range []string{"z", "y", "x", "a", "b", "c", "d", "e", "f"} {
|
||||
require.NoError(t, os.WriteFile(filepath.Join(tmpdir, name), buf, 0o600))
|
||||
expect = append(expect, filepath.Join(tmpdir, name))
|
||||
}
|
||||
sort.Strings(expect)
|
||||
|
||||
matched, err := extendedGlob(filepath.Join(tmpdir, "*"))
|
||||
require.NoError(t, err, "globbing")
|
||||
require.ElementsMatch(t, expect, matched, "sorted globbing")
|
||||
}
|
||||
|
|
|
@ -1,11 +1,17 @@
|
|||
// +build !windows
|
||||
//go:build !windows
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testModeMask = int64(os.ModePerm)
|
||||
testIgnoreSymlinkDates = false
|
||||
)
|
||||
|
||||
func TestPutChroot(t *testing.T) {
|
||||
if uid != 0 {
|
||||
t.Skip("chroot() requires root privileges, skipping")
|
||||
|
@ -75,3 +81,23 @@ func TestRemoveChroot(t *testing.T) {
|
|||
testRemove(t)
|
||||
canChroot = couldChroot
|
||||
}
|
||||
|
||||
func TestEnsureChroot(t *testing.T) {
|
||||
if uid != 0 {
|
||||
t.Skip("chroot() requires root privileges, skipping")
|
||||
}
|
||||
couldChroot := canChroot
|
||||
canChroot = true
|
||||
testEnsure(t)
|
||||
canChroot = couldChroot
|
||||
}
|
||||
|
||||
func TestConditionalRemoveChroot(t *testing.T) {
|
||||
if uid != 0 {
|
||||
t.Skip("chroot() requires root privileges, skipping")
|
||||
}
|
||||
couldChroot := canChroot
|
||||
canChroot = true
|
||||
testConditionalRemove(t)
|
||||
canChroot = couldChroot
|
||||
}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
//go:build windows
|
||||
|
||||
package copier
|
||||
|
||||
const (
|
||||
testModeMask = int64(0o600)
|
||||
testIgnoreSymlinkDates = true
|
||||
)
|
|
@ -1,5 +1,4 @@
|
|||
//go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le)
|
||||
// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le
|
||||
|
||||
package copier
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd
|
||||
// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd
|
||||
//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd || netbsd
|
||||
|
||||
package copier
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build linux || darwin || freebsd
|
||||
// +build linux darwin freebsd
|
||||
//go:build !windows
|
||||
|
||||
package copier
|
||||
|
||||
|
@ -25,6 +24,7 @@ func (h *hardlinkChecker) Check(fi os.FileInfo) string {
|
|||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
|
||||
h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package copier
|
||||
|
||||
|
@ -7,11 +6,11 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
type hardlinkChecker struct {
|
||||
}
|
||||
type hardlinkChecker struct{}
|
||||
|
||||
func (h *hardlinkChecker) Check(fi os.FileInfo) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue