Compare commits

...

40 Commits

Author SHA1 Message Date
Tim Zhang e4551b1013 shim-protos: replace ttrpc-codegen to the released crate version
The new version v0.6.0 has been released.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2025-07-15 17:54:05 +00:00
jinda.ljd 0e4ff0c5da truncate backing file path exceeding 64 bytes in LoopInfo
When the backing file path exceeds 64 bytes, an 'out of range' error occurs due to the limitation of the `file_name` field in `LoopInfo`. This commit truncates the file path to ensure it does not exceed the maximum supported length, preventing the error while maintaining usability.

Signed-off-by: jinda.ljd <jinda.ljd@alibaba-inc.com>
2025-07-15 02:56:39 +00:00
zzzzzzzzzy9 afab3c8eba add some test for mount_rootfs and umount_recursive and setup_loop
Signed-off-by: zzzzzzzzzy9 <zhang.yu58@zte.com.cn>
2025-07-07 04:51:59 +00:00
zzzzzzzzzy9 11e97809b8 support loop-dev mount in mount_rootfs
Signed-off-by: zzzzzzzzzy9 <zhang.yu58@zte.com.cn>
2025-07-07 04:51:59 +00:00
zzzzzzzzzy9 15fbabcf8e Unmount the mount point of the container when the container is deleted.
Signed-off-by: zzzzzzzzzy9 <zhang.yu58@zte.com.cn>
2025-07-07 04:51:59 +00:00
zzzzzzzzzy9 6aa801807e move mount to mount_linux and mount_other
Signed-off-by: zzzzzzzzzy9 <zhang.yu58@zte.com.cn>
2025-07-07 04:51:59 +00:00
Maksym Pavlenko 220d6d6a65
Fix runc dependency
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-06-30 10:09:42 -07:00
Maksym Pavlenko b8107d6101
Bump runc crate version
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-06-30 09:54:17 -07:00
Bryant Biggs c0b92c4e96 chore: Update protobuf definitions using latest containerd version v2.1.1 2025-06-11 00:15:32 +00:00
Bryant Biggs 8469d6d7a8 chore: Bump ttrpc-codegen as well 2025-06-11 00:13:54 +00:00
Bryant Biggs ad6d647960 chore: Update protobuf dependency to latest 2025-06-11 00:13:54 +00:00
jokemanfire 055e49bf6f fix ut test
Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-06-11 00:13:30 +00:00
jokemanfire 0a07bdde72 runc: split the lib's trait from lib to async and sync
Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-06-11 00:13:30 +00:00
Bryant Biggs 468fbc0b4c chore: Move tokio dependency in `snapshots` to dev dependency 2025-06-11 00:09:51 +00:00
Bryant Biggs cd5a84d8a7 chore: Update tonic dependencies to latest 2025-06-11 00:07:41 +00:00
jokemanfire cea5523d20 fix(update): avoid update resource while the process is zombie
add a check in zombie init process

Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-05-09 01:42:38 +00:00
dependabot[bot] 9b5727a28e build(deps): bump bnjbvr/cargo-machete from 0.7.0 to 0.8.0
Bumps [bnjbvr/cargo-machete](https://github.com/bnjbvr/cargo-machete) from 0.7.0 to 0.8.0.
- [Release notes](https://github.com/bnjbvr/cargo-machete/releases)
- [Changelog](https://github.com/bnjbvr/cargo-machete/blob/main/CHANGELOG.md)
- [Commits](https://github.com/bnjbvr/cargo-machete/compare/v0.7.0...v0.8.0)

---
updated-dependencies:
- dependency-name: bnjbvr/cargo-machete
  dependency-version: 0.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-08 19:31:20 +00:00
Maksym Pavlenko 4744fafcd6 Remove Ubuntu 20.04 runners and bump containerd versions
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-05-08 18:40:27 +00:00
jokemanfire 3b42040af2 optimize: use a more user-friendly interface in ttrpc
Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-04-15 03:41:12 +00:00
Jorge Prendes 8130e41939 remove dependency on signal-hook-tokio
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-04-10 16:27:31 +00:00
Jorge Prendes 2a098d9f69 re-enable cargo machete
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-04-09 14:23:23 +00:00
Jorge Prendes 59589df20f ping grcov version
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-04-09 13:47:06 +00:00
Jorge Prendes 18fcf6bc52 remove unused dependencies
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-03-19 23:27:16 +00:00
Jorge Prendes 277a1a65f2 use windows activation strategy on async code
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-03-19 23:27:16 +00:00
Jorge Prendes df1b8f05dd use windows activation strategy on all platforms in sync code
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-03-19 23:27:16 +00:00
Jorge Prendes 35980682a6 Consolidate the NamedPipeLogger into the FifoLogger
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-03-19 23:25:14 +00:00
Jorge Prendes f4fdddc5e5 ignore advisory with proc-macro-error and protobuf to unblock CI
Signed-off-by: Jorge Prendes <jorge.prendes@gmail.com>
2025-03-19 22:54:47 +00:00
Jiaxiao (mossaka) Zhou 8fba47295b feat(shim): make logger module public
Downstream shim implementations can use this module to setup logger by
themselves. One example is runwasi's container process needs to use this
module to setup logger so that logs from the container process can be
populated to containerd.

Signed-off-by: Jiaxiao (mossaka) Zhou <duibao55328@gmail.com>
2025-02-25 23:17:12 +00:00
jokemanfire 906bd0f466 Update publisher err deal.
ref:https://github.com/containerd/ttrpc-rust/pull/259
Due to the inclusion of the latest version of ttrpc, this part of the code has been updated

Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-02-24 18:54:43 +00:00
Kyle Kosic 559cc576b9 add experimental flag 2025-02-20 18:20:05 +00:00
Phil Estes 9d9cc05d18 Add example using the transfer service
Pulls an image to the containerd content store

Signed-off-by: Phil Estes <estesp@amazon.com>
2025-02-01 06:55:48 +00:00
Maksym Pavlenko 822a065062 Add Jiaxiao Zhou to reviewers
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-01-31 19:05:21 +00:00
Maksym Pavlenko cd926a2d89
Remove --files-with-diff
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-01-31 11:09:25 -08:00
Maksym Pavlenko ae876e3a33
Fix shim dependency
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-01-28 12:06:30 -08:00
Maksym Pavlenko c016d933ed
Bump shim crate version to 0.8
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-01-28 11:59:33 -08:00
Maksym Pavlenko 2d999680fe
Fix publish?
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-01-28 11:49:18 -08:00
Maksym Pavlenko ef81c80577
Bump shim protos to 0.8
Signed-off-by: Maksym Pavlenko <pavlenko.maksym@gmail.com>
2025-01-28 11:30:33 -08:00
jokemanfire e5db241747 Fix complie error
1. update ttrpc version
2. proto3 syntax  "To define 'optional' fields in Proto3, simply remove the 'optional' label, as fields are 'optional' by default"
3. compile fail because "required for the cast from `Arc<std::boxed::Box<<T as asynchronous::Shim>::T>>` to `Arc<(dyn containerd_shim_protos::shim_async::Task + std::marker::Send + Sync + 'static)>`"

Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-01-27 20:08:08 +00:00
dependabot[bot] 26b783c0b4 build(deps): bump actions/stale from 9.0.0 to 9.1.0
Bumps [actions/stale](https://github.com/actions/stale) from 9.0.0 to 9.1.0.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](28ca103628...5bef64f19d)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 20:06:30 +00:00
jokemanfire 545b0e789e Add info for get runtimeinfo
This interface is already in goshim.
Use containerd-shim-runc-v2 --info.

Signed-off-by: jokemanfire <hu.dingyang@zte.com.cn>
2025-01-27 20:06:12 +00:00
44 changed files with 2393 additions and 1555 deletions

View File

@ -23,7 +23,7 @@ jobs:
shell: bash
- run: rustup toolchain install nightly --component rustfmt
- run: cargo +nightly fmt --all -- --check --files-with-diff
- run: cargo +nightly fmt --all -- --check
# the "runc" and "containerd-shim" crates have `sync` code that is not covered by the workspace
- run: cargo check -p runc --all-targets
@ -40,10 +40,11 @@ jobs:
- run: cargo doc --no-deps --features docs
env:
RUSTDOCFLAGS: -Dwarnings
# See https://github.com/containerd/rust-extensions/issues/348
# - name: check unused dependencies
# uses: bnjbvr/cargo-machete@v0.7.0
- name: check unused dependencies
uses: bnjbvr/cargo-machete@v0.8.0
env:
RUSTUP_TOOLCHAIN: "stable"
# TODO: Merge this with the checks job above
windows-checks:
@ -58,7 +59,7 @@ jobs:
- run: cargo check --examples --tests -p containerd-shim -p containerd-shim-protos -p containerd-client
- run: rustup toolchain install nightly --component rustfmt
- run: cargo +nightly fmt -p containerd-shim -p containerd-shim-protos -p containerd-client -- --check --files-with-diff
- run: cargo +nightly fmt -p containerd-shim -p containerd-shim-protos -p containerd-client -- --check
- run: cargo clippy -p containerd-shim -p containerd-shim-protos -- -D warnings
- run: cargo doc --no-deps -p containerd-shim -p containerd-shim-protos -p containerd-client
@ -118,7 +119,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: EmbarkStudios/cargo-deny-action@v1
- uses: EmbarkStudios/cargo-deny-action@v2
linux-integration:
name: Linux Integration
@ -127,8 +128,8 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04, ubuntu-22.04]
containerd: [v1.6.36, v1.7.24, v2.0.1]
os: [ubuntu-latest]
containerd: [v1.6.38, v1.7.27, v2.1.1]
steps:
- name: Checkout extensions

View File

@ -22,7 +22,7 @@ jobs:
- name: Install grcov
run: |
cargo install --locked grcov
cargo install --locked grcov@0.8.24
grcov --version
- name: Tests

View File

@ -17,7 +17,7 @@ jobs:
pull-requests: write
steps:
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
# All stale bot options: https://github.com/actions/stale#all-options
with:
# Idle number of days before marking issues/PRs stale

View File

@ -45,7 +45,7 @@ tempfile = "3.6"
thiserror = "2.0"
time = { version = "0.3.29", features = ["serde", "std", "formatting"] }
tokio = "1.26"
tonic = "0.12"
tonic-build = "0.12"
tonic = "0.13"
tonic-build = "0.13"
tower = "0.5"
uuid = { version = "1.0", features = ["v4"] }

View File

@ -8,3 +8,5 @@
# GitHub ID, Name, Email address
"Burning1020","Zhang Tianyang","burning9699@gmail.com"
"jsturtevant","James Sturtevant","jstur@microsoft.com"
"mossaka","Jiaxiao Zhou","jiazho@microsoft.com"

View File

@ -67,6 +67,7 @@ const FIXUP_MODULES: &[&str] = &[
fn main() {
let mut config = prost_build::Config::new();
config.protoc_arg("--experimental_allow_proto3_optional");
config.enable_type_names();
tonic_build::configure()

View File

@ -0,0 +1,91 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use std::env::consts;
use client::{
services::v1::{transfer_client::TransferClient, TransferOptions, TransferRequest},
to_any,
types::{
transfer::{ImageStore, OciRegistry, UnpackConfiguration},
Platform,
},
with_namespace,
};
use containerd_client as client;
use tonic::Request;
const IMAGE: &str = "docker.io/library/alpine:latest";
const NAMESPACE: &str = "default";
/// Make sure you run containerd before running this example.
/// NOTE: to run this example, you must prepare a rootfs.
#[tokio::main(flavor = "current_thread")]
async fn main() {
let arch = match consts::ARCH {
"x86_64" => "amd64",
"aarch64" => "arm64",
_ => consts::ARCH,
};
let channel = client::connect("/run/containerd/containerd.sock")
.await
.expect("Connect Failed");
let mut client = TransferClient::new(channel.clone());
// Create the source (OCIRegistry)
let source = OciRegistry {
reference: IMAGE.to_string(),
resolver: Default::default(),
};
let platform = Platform {
os: "linux".to_string(),
architecture: arch.to_string(),
variant: "".to_string(),
os_version: "".to_string(),
};
// Create the destination (ImageStore)
let destination = ImageStore {
name: IMAGE.to_string(),
platforms: vec![platform.clone()],
unpacks: vec![UnpackConfiguration {
platform: Some(platform),
..Default::default()
}],
..Default::default()
};
let anys = to_any(&source);
let anyd = to_any(&destination);
println!("Pulling image for linux/{} from source: {:?}", arch, source);
// Create the transfer request
let request = TransferRequest {
source: Some(anys),
destination: Some(anyd),
options: Some(TransferOptions {
..Default::default()
}),
};
// Execute the transfer (pull)
client
.transfer(with_namespace!(request, NAMESPACE))
.await
.expect("unable to transfer image");
}

View File

@ -23,6 +23,11 @@ import "github.com/containerd/containerd/api/types/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.types.fieldpath_all) = true;
message ContentCreate {
string digest = 1;
int64 size = 2;
}
message ContentDelete {
string digest = 1;
}

View File

@ -27,6 +27,16 @@ message OCIRegistry {
RegistryResolver resolver = 2;
}
enum HTTPDebug {
DISABLED = 0;
// Enable HTTP debugging
DEBUG = 1;
// Enable HTTP requests tracing
TRACE = 2;
// Enable both HTTP debugging and requests tracing
BOTH = 3;
}
message RegistryResolver {
// auth_stream is used to refer to a stream which auth callbacks may be
// made on.
@ -40,6 +50,13 @@ message RegistryResolver {
string default_scheme = 4;
// Force skip verify
// CA callback? Client TLS callback?
// Whether to debug/trace HTTP requests to OCI registry.
HTTPDebug http_debug = 5;
// Stream ID to use for HTTP logs (when logs are streamed to client).
// When empty, logs are written to containerd logs.
string logs_stream = 6;
}
// AuthRequest is sent as a callback on a stream

View File

@ -23,6 +23,11 @@ import "github.com/containerd/containerd/api/types/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.types.fieldpath_all) = true;
message ContentCreate {
string digest = 1;
int64 size = 2;
}
message ContentDelete {
string digest = 1;
}

View File

@ -27,6 +27,16 @@ message OCIRegistry {
RegistryResolver resolver = 2;
}
enum HTTPDebug {
DISABLED = 0;
// Enable HTTP debugging
DEBUG = 1;
// Enable HTTP requests tracing
TRACE = 2;
// Enable both HTTP debugging and requests tracing
BOTH = 3;
}
message RegistryResolver {
// auth_stream is used to refer to a stream which auth callbacks may be
// made on.
@ -40,6 +50,13 @@ message RegistryResolver {
string default_scheme = 4;
// Force skip verify
// CA callback? Client TLS callback?
// Whether to debug/trace HTTP requests to OCI registry.
HTTPDebug http_debug = 5;
// Stream ID to use for HTTP logs (when logs are streamed to client).
// When empty, logs are written to containerd logs.
string logs_stream = 6;
}
// AuthRequest is sent as a callback on a stream

View File

@ -25,18 +25,17 @@ path = "src/main.rs"
doc = false
[dependencies]
containerd-shim = { path = "../shim", version = "0.7.1", features = ["async"] }
containerd-shim = { path = "../shim", version = "0.8.0", features = ["async"] }
libc.workspace = true
log.workspace = true
nix = { workspace = true, features = ["socket", "uio", "term"] }
oci-spec.workspace = true
prctl.workspace = true
runc = { path = "../runc", version = "0.2.0", features = ["async"] }
runc = { path = "../runc", version = "0.3.0", features = ["async"] }
serde.workspace = true
serde_json.workspace = true
time.workspace = true
uuid.workspace = true
# Async dependencies
async-trait.workspace = true
tokio = { workspace = true, features = ["full"] }

View File

@ -14,9 +14,14 @@
limitations under the License.
*/
use std::env;
use std::{env, io::Write};
use containerd_shim::{asynchronous::run, parse};
use containerd_shim::{
asynchronous::run,
parse,
protos::protobuf::{well_known_types::any::Any, Message},
run_info,
};
mod cgroup_memory;
mod common;
@ -47,6 +52,30 @@ fn parse_version() {
std::process::exit(0);
}
if flags.info {
let r = run_info();
match r {
Ok(rinfo) => {
let mut info = Any::new();
info.type_url = "io.containerd.runc.v2.Info".to_string();
info.value = match rinfo.write_to_bytes() {
Ok(bytes) => bytes,
Err(e) => {
eprintln!("Failed to write runtime info to bytes: {}", e);
std::process::exit(1);
}
};
std::io::stdout()
.write_all(info.write_to_bytes().unwrap().as_slice())
.expect("Failed to write to stdout");
}
Err(_) => {
eprintln!("Failed to get runtime info");
std::process::exit(1);
}
}
std::process::exit(0);
}
}
#[tokio::main]

View File

@ -34,6 +34,7 @@ use containerd_shim::{
asynchronous::monitor::{monitor_subscribe, monitor_unsubscribe, Subscription},
io_error,
monitor::{ExitEvent, Subject, Topic},
mount::umount_recursive,
other, other_error,
protos::{
api::ProcessInfo,
@ -65,6 +66,19 @@ use crate::{
io::Stdio,
};
/// check the process is zombie
#[cfg(target_os = "linux")]
fn is_zombie_process(pid: i32) -> bool {
if let Ok(status) = std::fs::read_to_string(format!("/proc/{}/status", pid)) {
for line in status.lines() {
if line.starts_with("State:") && line.contains('Z') {
return true;
}
}
}
false
}
pub type ExecProcess = ProcessTemplate<RuncExecLifecycle>;
pub type InitProcess = ProcessTemplate<RuncInitLifecycle>;
@ -299,6 +313,7 @@ impl ProcessLifecycle<InitProcess> for RuncInitLifecycle {
);
}
}
umount_recursive(Path::new(&self.bundle).join("rootfs").to_str(), 0)?;
self.exit_signal.signal();
Ok(())
}
@ -311,6 +326,15 @@ impl ProcessLifecycle<InitProcess> for RuncInitLifecycle {
p.pid
));
}
// check the process is zombie
if is_zombie_process(p.pid) {
return Err(other!(
"failed to update resources because process {} is a zombie",
p.pid
));
}
containerd_shim::cgroup::update_resources(p.pid as u32, resources)
}
@ -327,6 +351,15 @@ impl ProcessLifecycle<InitProcess> for RuncInitLifecycle {
p.pid
));
}
// check the process is zombie
if is_zombie_process(p.pid) {
return Err(other!(
"failed to collect metrics because process {} is a zombie",
p.pid
));
}
containerd_shim::cgroup::collect_metrics(p.pid as u32)
}

View File

@ -14,7 +14,7 @@
limitations under the License.
*/
use std::{env::current_dir, sync::Arc};
use std::{env::current_dir, sync::Arc, time::Duration};
use ::runc::options::DeleteOpts;
use async_trait::async_trait;
@ -27,7 +27,8 @@ use containerd_shim::{
event::Event,
io_error,
monitor::{Subject, Topic},
protos::{events::task::TaskExit, protobuf::MessageDyn, ttrpc::context::with_timeout},
mount::umount_recursive,
protos::{events::task::TaskExit, protobuf::MessageDyn, ttrpc::context::with_duration},
util::{
convert_to_timestamp, read_options, read_pid_from_file, read_runtime, read_spec, timestamp,
write_str_to_file,
@ -124,6 +125,8 @@ impl Shim for Service {
runc.delete(&self.id, Some(&DeleteOpts { force: true }))
.await
.unwrap_or_else(|e| warn!("failed to remove runc container: {}", e));
umount_recursive(bundle.join("rootfs").to_str(), 0)
.unwrap_or_else(|e| warn!("failed to umount recursive rootfs: {}", e));
let mut resp = DeleteResponse::new();
// sigkill
resp.set_exit_status(137);
@ -228,7 +231,7 @@ async fn forward(
// Prevent event reporting from taking too long time.
// Learnd from goshim's containerd/runtime/v2/shim/publisher.go
publisher
.publish(with_timeout(5000000000), &topic, &ns, e)
.publish(with_duration(Duration::from_secs(5)), &topic, &ns, e)
.await
.unwrap_or_else(|e| warn!("publish {} to containerd: {}", topic, e));
}

View File

@ -1,6 +1,6 @@
[package]
name = "runc"
version = "0.2.0"
version = "0.3.0"
authors = ["Yuna Tomida <ytomida.mmm@gmail.com>", "The containerd Authors"]
description = "A crate for consuming the runc binary in your Rust applications"
keywords = ["containerd", "containers", "runc"]

View File

@ -15,11 +15,13 @@
*/
pub mod io;
mod pipe;
mod runc;
use std::{fmt::Debug, io::Result, os::fd::AsRawFd};
use async_trait::async_trait;
use log::debug;
pub use pipe::Pipe;
pub use runc::{DefaultExecutor, Spawner};
use tokio::io::{AsyncRead, AsyncWrite};
use crate::Command;

View File

@ -0,0 +1,525 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use std::{fmt::Debug, path::Path, process::ExitStatus};
use async_trait::async_trait;
use log::debug;
use oci_spec::runtime::{LinuxResources, Process};
use crate::{
container::Container,
error::Error,
events,
options::*,
utils::{self, write_value_to_temp_file},
Command, Response, Result, Runc,
};
// a macro tool to cleanup the file with name $filename,
// there is no async drop in async rust, so we have to call remove_file everytime
// after a temp file created, before return of a function.
// with this macro we don't have to write the match case codes everytime.
macro_rules! tc {
($b:expr, $filename: expr) => {
match $b {
Ok(r) => r,
Err(e) => {
let _ = tokio::fs::remove_file($filename).await;
return Err(e);
}
}
};
}
/// Async implementation for [Runc].
///
/// Note that you MUST use this client on tokio runtime, as this client internally use [`tokio::process::Command`]
/// and some other utilities.
impl Runc {
pub(crate) async fn launch(&self, mut cmd: Command, combined_output: bool) -> Result<Response> {
debug!("Execute command {:?}", cmd);
unsafe {
cmd.pre_exec(move || {
#[cfg(target_os = "linux")]
if let Ok(thp) = std::env::var("THP_DISABLED") {
if let Ok(thp_disabled) = thp.parse::<bool>() {
if let Err(e) = prctl::set_thp_disable(thp_disabled) {
debug!("set_thp_disable err: {}", e);
};
}
}
Ok(())
});
}
let (status, pid, stdout, stderr) = self.spawner.execute(cmd).await?;
if status.success() {
let output = if combined_output {
stdout + stderr.as_str()
} else {
stdout
};
Ok(Response {
pid,
status,
output,
})
} else {
Err(Error::CommandFailed {
status,
stdout,
stderr,
})
}
}
/// Create a new container
pub async fn create<P>(
&self,
id: &str,
bundle: P,
opts: Option<&CreateOpts>,
) -> Result<Response>
where
P: AsRef<Path>,
{
let mut args = vec![
"create".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(CreateOpts { io: Some(io), .. }) => {
io.set(&mut cmd).await.map_err(Error::UnavailableIO)?;
let res = self.launch(cmd, true).await?;
io.close_after_start().await;
Ok(res)
}
_ => self.launch(cmd, true).await,
}
}
/// Delete a container
pub async fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> {
let mut args = vec!["delete".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// Return an event stream of container notifications
pub async fn events(&self, _id: &str, _interval: &std::time::Duration) -> Result<()> {
Err(Error::Unimplemented("events".to_string()))
}
/// Execute an additional process inside the container
pub async fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> {
let f = write_value_to_temp_file(spec).await?;
let mut args = vec!["exec".to_string(), "--process".to_string(), f.clone()];
if let Some(opts) = opts {
args.append(&mut tc!(opts.args(), &f));
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(ExecOpts { io: Some(io), .. }) => {
tc!(
io.set(&mut cmd)
.await
.map_err(|e| Error::IoSet(e.to_string())),
&f
);
tc!(self.launch(cmd, true).await, &f);
io.close_after_start().await;
}
_ => {
tc!(self.launch(cmd, true).await, &f);
}
}
let _ = tokio::fs::remove_file(&f).await;
Ok(())
}
/// Send the specified signal to processes inside the container
pub async fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> {
let mut args = vec!["kill".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
args.push(sig.to_string());
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// List all containers associated with this runc instance
pub async fn list(&self) -> Result<Vec<Container>> {
let args = ["list".to_string(), "--format=json".to_string()];
let res = self.launch(self.command(&args)?, true).await?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Pause a container
pub async fn pause(&self, id: &str) -> Result<()> {
let args = ["pause".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// Resume a container
pub async fn resume(&self, id: &str) -> Result<()> {
let args = ["resume".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
pub async fn checkpoint(&self) -> Result<()> {
Err(Error::Unimplemented("checkpoint".to_string()))
}
pub async fn restore(&self) -> Result<()> {
Err(Error::Unimplemented("restore".to_string()))
}
/// List all the processes inside the container, returning their pids
pub async fn ps(&self, id: &str) -> Result<Vec<usize>> {
let args = [
"ps".to_string(),
"--format=json".to_string(),
id.to_string(),
];
let res = self.launch(self.command(&args)?, true).await?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Run the create, start, delete lifecycle of the container and return its exit status
pub async fn run<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<()>
where
P: AsRef<Path>,
{
let mut args = vec![
"run".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
if let Some(CreateOpts { io: Some(io), .. }) = opts {
io.set(&mut cmd)
.await
.map_err(|e| Error::IoSet(e.to_string()))?;
};
let _ = self.launch(cmd, true).await?;
Ok(())
}
/// Start an already created container
pub async fn start(&self, id: &str) -> Result<()> {
let args = vec!["start".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// Return the state of a container
pub async fn state(&self, id: &str) -> Result<Container> {
let args = vec!["state".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true).await?;
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)
}
/// Return the latest statistics for a container
pub async fn stats(&self, id: &str) -> Result<events::Stats> {
let args = vec!["events".to_string(), "--stats".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true).await?;
let event: events::Event =
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?;
if let Some(stats) = event.stats {
Ok(stats)
} else {
Err(Error::MissingContainerStats)
}
}
/// Update a container with the provided resource spec
pub async fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> {
let f = write_value_to_temp_file(resources).await?;
let args = [
"update".to_string(),
"--resources".to_string(),
f.to_string(),
id.to_string(),
];
let _ = tc!(self.launch(self.command(&args)?, true).await, &f);
let _ = tokio::fs::remove_file(&f).await;
Ok(())
}
}
#[async_trait]
pub trait Spawner: Debug {
async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>;
}
#[derive(Debug)]
pub struct DefaultExecutor {}
#[async_trait]
impl Spawner for DefaultExecutor {
async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> {
let mut cmd = cmd;
let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?;
let pid = child.id().unwrap();
let result = child
.wait_with_output()
.await
.map_err(Error::InvalidCommand)?;
let status = result.status;
let stdout = String::from_utf8_lossy(&result.stdout).to_string();
let stderr = String::from_utf8_lossy(&result.stderr).to_string();
Ok((status, pid, stdout, stderr))
}
}
#[cfg(test)]
#[cfg(target_os = "linux")]
mod tests {
use std::sync::Arc;
use crate::{
error::Error,
io::{InheritedStdIo, PipedStdIo},
options::{CreateOpts, DeleteOpts, GlobalOpts},
Runc,
};
fn ok_client() -> Runc {
GlobalOpts::new()
.command("/bin/true")
.build()
.expect("unable to create runc instance")
}
fn fail_client() -> Runc {
GlobalOpts::new()
.command("/bin/false")
.build()
.expect("unable to create runc instance")
}
fn echo_client() -> Runc {
GlobalOpts::new()
.command("/bin/echo")
.build()
.expect("unable to create runc instance")
}
#[tokio::test]
async fn test_async_create() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
let ok_task = tokio::spawn(async move {
let response = ok_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("true failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
});
let opts = CreateOpts::new();
let fail_runc = fail_client();
let fail_task = tokio::spawn(async move {
match fail_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
{
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
});
ok_task.await.expect("ok_task failed.");
fail_task.await.expect("fail_task unexpectedly succeeded.");
}
#[tokio::test]
async fn test_async_start() {
let ok_runc = ok_client();
let ok_task = tokio::spawn(async move {
ok_runc.start("fake-id").await.expect("true failed.");
eprintln!("ok_runc succeeded.");
});
let fail_runc = fail_client();
let fail_task = tokio::spawn(async move {
match fail_runc.start("fake-id").await {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
});
ok_task.await.expect("ok_task failed.");
fail_task.await.expect("fail_task unexpectedly succeeded.");
}
#[tokio::test]
async fn test_async_run() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
tokio::spawn(async move {
ok_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("true failed.");
eprintln!("ok_runc succeeded.");
});
let opts = CreateOpts::new();
let fail_runc = fail_client();
tokio::spawn(async move {
match fail_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
{
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
})
.await
.expect("tokio spawn falied.");
}
#[tokio::test]
async fn test_async_delete() {
let opts = DeleteOpts::new();
let ok_runc = ok_client();
tokio::spawn(async move {
ok_runc
.delete("fake-id", Some(&opts))
.await
.expect("true failed.");
eprintln!("ok_runc succeeded.");
});
let opts = DeleteOpts::new();
let fail_runc = fail_client();
tokio::spawn(async move {
match fail_runc.delete("fake-id", Some(&opts)).await {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
})
.await
.expect("tokio spawn falied.");
}
#[tokio::test]
async fn test_async_output() {
// test create cmd with inherit Io, expect empty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(InheritedStdIo::new().unwrap()));
let echo_runc = echo_client();
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("echo failed:");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
// test create cmd with pipe Io, expect nonempty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(PipedStdIo::new().unwrap()));
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("echo failed:");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(!response.output.is_empty());
}
}

View File

@ -39,22 +39,15 @@
//! [go-runc](https://github.com/containerd/go-runc) for Go.
use std::{
fmt::{self, Debug, Display},
path::{Path, PathBuf},
path::PathBuf,
process::{ExitStatus, Stdio},
sync::Arc,
};
#[cfg(feature = "async")]
use async_trait::async_trait;
#[cfg(feature = "async")]
use log::debug;
use oci_spec::runtime::{LinuxResources, Process};
#[cfg(feature = "async")]
pub use crate::asynchronous::*;
#[cfg(not(feature = "async"))]
pub use crate::synchronous::*;
use crate::{container::Container, error::Error, options::*, utils::write_value_to_temp_file};
#[cfg(feature = "async")]
pub mod asynchronous;
@ -69,6 +62,9 @@ pub mod monitor;
pub mod options;
pub mod utils;
const JSON: &str = "json";
const TEXT: &str = "text";
pub type Result<T> = std::result::Result<T, crate::error::Error>;
/// Response is for (pid, exit status, outputs).
@ -131,914 +127,3 @@ impl Runc {
Ok(cmd)
}
}
#[cfg(not(feature = "async"))]
impl Runc {
fn launch(&self, cmd: Command, combined_output: bool) -> Result<Response> {
let (status, pid, stdout, stderr) = self.spawner.execute(cmd)?;
if status.success() {
let output = if combined_output {
stdout + stderr.as_str()
} else {
stdout
};
Ok(Response {
pid,
status,
output,
})
} else {
Err(Error::CommandFailed {
status,
stdout,
stderr,
})
}
}
/// Create a new container
pub fn create<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<Response>
where
P: AsRef<Path>,
{
let mut args = vec![
"create".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(CreateOpts { io: Some(io), .. }) => {
io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;
let res = self.launch(cmd, true)?;
io.close_after_start();
Ok(res)
}
_ => self.launch(cmd, true),
}
}
/// Delete a container
pub fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> {
let mut args = vec!["delete".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
self.launch(self.command(&args)?, true)?;
Ok(())
}
/// Execute an additional process inside the container
pub fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> {
let (_temp_file, filename) = write_value_to_temp_file(spec)?;
let mut args = vec!["exec".to_string(), "--process".to_string(), filename];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(ExecOpts { io: Some(io), .. }) => {
io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;
self.launch(cmd, true)?;
io.close_after_start();
}
_ => {
self.launch(cmd, true)?;
}
}
Ok(())
}
/// Send the specified signal to processes inside the container
pub fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> {
let mut args = vec!["kill".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
args.push(sig.to_string());
let _ = self.launch(self.command(&args)?, true)?;
Ok(())
}
/// List all containers associated with this runc instance
pub fn list(&self) -> Result<Vec<Container>> {
let args = ["list".to_string(), "--format=json".to_string()];
let res = self.launch(self.command(&args)?, true)?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Pause a container
pub fn pause(&self, id: &str) -> Result<()> {
let args = ["pause".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true)?;
Ok(())
}
/// Resume a container
pub fn resume(&self, id: &str) -> Result<()> {
let args = ["resume".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true)?;
Ok(())
}
pub fn checkpoint(&self) -> Result<()> {
Err(Error::Unimplemented("checkpoint".to_string()))
}
pub fn restore(&self) -> Result<()> {
Err(Error::Unimplemented("restore".to_string()))
}
/// List all the processes inside the container, returning their pids
pub fn ps(&self, id: &str) -> Result<Vec<usize>> {
let args = [
"ps".to_string(),
"--format=json".to_string(),
id.to_string(),
];
let res = self.launch(self.command(&args)?, false)?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Run the create, start, delete lifecycle of the container and return its exit status
pub fn run<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<Response>
where
P: AsRef<Path>,
{
let mut args = vec![
"run".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
if let Some(CreateOpts { io: Some(io), .. }) = opts {
io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;
};
self.launch(cmd, true)
}
/// Start an already created container
pub fn start(&self, id: &str) -> Result<Response> {
let args = ["start".to_string(), id.to_string()];
self.launch(self.command(&args)?, true)
}
/// Return the state of a container
pub fn state(&self, id: &str) -> Result<Container> {
let args = ["state".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true)?;
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)
}
/// Return the latest statistics for a container
pub fn stats(&self, id: &str) -> Result<events::Stats> {
let args = vec!["events".to_string(), "--stats".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true)?;
let event: events::Event =
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?;
if let Some(stats) = event.stats {
Ok(stats)
} else {
Err(Error::MissingContainerStats)
}
}
/// Update a container with the provided resource spec
pub fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> {
let (_temp_file, filename) = write_value_to_temp_file(resources)?;
let args = [
"update".to_string(),
"--resources".to_string(),
filename,
id.to_string(),
];
self.launch(self.command(&args)?, true)?;
Ok(())
}
}
// a macro tool to cleanup the file with name $filename,
// there is no async drop in async rust, so we have to call remove_file everytime
// after a temp file created, before return of a function.
// with this macro we don't have to write the match case codes everytime.
#[cfg(feature = "async")]
macro_rules! tc {
($b:expr, $filename: expr) => {
match $b {
Ok(r) => r,
Err(e) => {
let _ = tokio::fs::remove_file($filename).await;
return Err(e);
}
}
};
}
#[cfg(not(feature = "async"))]
pub trait Spawner: Debug {
fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>;
}
#[cfg(feature = "async")]
#[async_trait]
pub trait Spawner: Debug {
async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>;
}
/// Async implementation for [Runc].
///
/// Note that you MUST use this client on tokio runtime, as this client internally use [`tokio::process::Command`]
/// and some other utilities.
#[cfg(feature = "async")]
impl Runc {
async fn launch(&self, mut cmd: Command, combined_output: bool) -> Result<Response> {
debug!("Execute command {:?}", cmd);
unsafe {
cmd.pre_exec(move || {
#[cfg(target_os = "linux")]
if let Ok(thp) = std::env::var("THP_DISABLED") {
if let Ok(thp_disabled) = thp.parse::<bool>() {
if let Err(e) = prctl::set_thp_disable(thp_disabled) {
debug!("set_thp_disable err: {}", e);
};
}
}
Ok(())
});
}
let (status, pid, stdout, stderr) = self.spawner.execute(cmd).await?;
if status.success() {
let output = if combined_output {
stdout + stderr.as_str()
} else {
stdout
};
Ok(Response {
pid,
status,
output,
})
} else {
Err(Error::CommandFailed {
status,
stdout,
stderr,
})
}
}
/// Create a new container
pub async fn create<P>(
&self,
id: &str,
bundle: P,
opts: Option<&CreateOpts>,
) -> Result<Response>
where
P: AsRef<Path>,
{
let mut args = vec![
"create".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(CreateOpts { io: Some(io), .. }) => {
io.set(&mut cmd).await.map_err(Error::UnavailableIO)?;
let res = self.launch(cmd, true).await?;
io.close_after_start().await;
Ok(res)
}
_ => self.launch(cmd, true).await,
}
}
/// Delete a container
pub async fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> {
let mut args = vec!["delete".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// Return an event stream of container notifications
pub async fn events(&self, _id: &str, _interval: &std::time::Duration) -> Result<()> {
Err(Error::Unimplemented("events".to_string()))
}
/// Execute an additional process inside the container
pub async fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> {
let f = write_value_to_temp_file(spec).await?;
let mut args = vec!["exec".to_string(), "--process".to_string(), f.clone()];
if let Some(opts) = opts {
args.append(&mut tc!(opts.args(), &f));
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(ExecOpts { io: Some(io), .. }) => {
tc!(
io.set(&mut cmd)
.await
.map_err(|e| Error::IoSet(e.to_string())),
&f
);
tc!(self.launch(cmd, true).await, &f);
io.close_after_start().await;
}
_ => {
tc!(self.launch(cmd, true).await, &f);
}
}
let _ = tokio::fs::remove_file(&f).await;
Ok(())
}
/// Send the specified signal to processes inside the container
pub async fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> {
let mut args = vec!["kill".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
args.push(sig.to_string());
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// List all containers associated with this runc instance
pub async fn list(&self) -> Result<Vec<Container>> {
let args = ["list".to_string(), "--format=json".to_string()];
let res = self.launch(self.command(&args)?, true).await?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Pause a container
pub async fn pause(&self, id: &str) -> Result<()> {
let args = ["pause".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// Resume a container
pub async fn resume(&self, id: &str) -> Result<()> {
let args = ["resume".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
pub async fn checkpoint(&self) -> Result<()> {
Err(Error::Unimplemented("checkpoint".to_string()))
}
pub async fn restore(&self) -> Result<()> {
Err(Error::Unimplemented("restore".to_string()))
}
/// List all the processes inside the container, returning their pids
pub async fn ps(&self, id: &str) -> Result<Vec<usize>> {
let args = [
"ps".to_string(),
"--format=json".to_string(),
id.to_string(),
];
let res = self.launch(self.command(&args)?, true).await?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Run the create, start, delete lifecycle of the container and return its exit status
pub async fn run<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<()>
where
P: AsRef<Path>,
{
let mut args = vec![
"run".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
if let Some(CreateOpts { io: Some(io), .. }) = opts {
io.set(&mut cmd)
.await
.map_err(|e| Error::IoSet(e.to_string()))?;
};
let _ = self.launch(cmd, true).await?;
Ok(())
}
/// Start an already created container
pub async fn start(&self, id: &str) -> Result<()> {
let args = vec!["start".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true).await?;
Ok(())
}
/// Return the state of a container
pub async fn state(&self, id: &str) -> Result<Container> {
let args = vec!["state".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true).await?;
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)
}
/// Return the latest statistics for a container
pub async fn stats(&self, id: &str) -> Result<events::Stats> {
let args = vec!["events".to_string(), "--stats".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true).await?;
let event: events::Event =
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?;
if let Some(stats) = event.stats {
Ok(stats)
} else {
Err(Error::MissingContainerStats)
}
}
/// Update a container with the provided resource spec
pub async fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> {
let f = write_value_to_temp_file(resources).await?;
let args = [
"update".to_string(),
"--resources".to_string(),
f.to_string(),
id.to_string(),
];
let _ = tc!(self.launch(self.command(&args)?, true).await, &f);
let _ = tokio::fs::remove_file(&f).await;
Ok(())
}
}
#[derive(Debug)]
pub struct DefaultExecutor {}
#[cfg(feature = "async")]
#[async_trait]
impl Spawner for DefaultExecutor {
async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> {
let mut cmd = cmd;
let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?;
let pid = child.id().unwrap();
let result = child
.wait_with_output()
.await
.map_err(Error::InvalidCommand)?;
let status = result.status;
let stdout = String::from_utf8_lossy(&result.stdout).to_string();
let stderr = String::from_utf8_lossy(&result.stderr).to_string();
Ok((status, pid, stdout, stderr))
}
}
#[cfg(not(feature = "async"))]
impl Spawner for DefaultExecutor {
fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> {
let mut cmd = cmd;
let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?;
let pid = child.id();
let result = child.wait_with_output().map_err(Error::InvalidCommand)?;
let status = result.status;
let stdout = String::from_utf8_lossy(&result.stdout).to_string();
let stderr = String::from_utf8_lossy(&result.stderr).to_string();
Ok((status, pid, stdout, stderr))
}
}
#[cfg(test)]
#[cfg(all(target_os = "linux", not(feature = "async")))]
mod tests {
use std::sync::Arc;
use crate::{
io::{InheritedStdIo, PipedStdIo},
*,
};
fn ok_client() -> Runc {
GlobalOpts::new()
.command("/bin/true")
.build()
.expect("unable to create runc instance")
}
fn fail_client() -> Runc {
GlobalOpts::new()
.command("/bin/false")
.build()
.expect("unable to create runc instance")
}
fn echo_client() -> Runc {
GlobalOpts::new()
.command("/bin/echo")
.build()
.expect("unable to create runc instance")
}
fn dummy_process() -> Process {
serde_json::from_str(
"
{
\"user\": {
\"uid\": 1000,
\"gid\": 1000
},
\"cwd\": \"/path/to/dir\"
}",
)
.unwrap()
}
#[test]
fn test_create() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
let response = ok_runc
.create("fake-id", "fake-bundle", Some(&opts))
.expect("true failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
let fail_runc = fail_client();
match fail_runc.create("fake-id", "fake-bundle", Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_run() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
let response = ok_runc
.run("fake-id", "fake-bundle", Some(&opts))
.expect("true failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
let fail_runc = fail_client();
match fail_runc.run("fake-id", "fake-bundle", Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_exec() {
let opts = ExecOpts::new();
let ok_runc = ok_client();
let proc = dummy_process();
ok_runc
.exec("fake-id", &proc, Some(&opts))
.expect("true failed.");
eprintln!("ok_runc succeeded.");
let fail_runc = fail_client();
match fail_runc.exec("fake-id", &proc, Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_delete() {
let opts = DeleteOpts::new();
let ok_runc = ok_client();
ok_runc
.delete("fake-id", Some(&opts))
.expect("true failed.");
eprintln!("ok_runc succeeded.");
let fail_runc = fail_client();
match fail_runc.delete("fake-id", Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_output() {
// test create cmd with inherit Io, expect empty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(InheritedStdIo::new().unwrap()));
let echo_runc = echo_client();
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.expect("echo failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
// test create cmd with pipe Io, expect nonempty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(PipedStdIo::new().unwrap()));
let echo_runc = echo_client();
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.expect("echo failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(!response.output.is_empty());
}
}
/// Tokio tests
#[cfg(test)]
#[cfg(all(target_os = "linux", feature = "async"))]
mod tests {
use std::sync::Arc;
use super::{
io::{InheritedStdIo, PipedStdIo},
*,
};
fn ok_client() -> Runc {
GlobalOpts::new()
.command("/bin/true")
.build()
.expect("unable to create runc instance")
}
fn fail_client() -> Runc {
GlobalOpts::new()
.command("/bin/false")
.build()
.expect("unable to create runc instance")
}
fn echo_client() -> Runc {
GlobalOpts::new()
.command("/bin/echo")
.build()
.expect("unable to create runc instance")
}
#[tokio::test]
async fn test_async_create() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
let ok_task = tokio::spawn(async move {
let response = ok_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("true failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
});
let opts = CreateOpts::new();
let fail_runc = fail_client();
let fail_task = tokio::spawn(async move {
match fail_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
{
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
});
ok_task.await.expect("ok_task failed.");
fail_task.await.expect("fail_task unexpectedly succeeded.");
}
#[tokio::test]
async fn test_async_start() {
let ok_runc = ok_client();
let ok_task = tokio::spawn(async move {
ok_runc.start("fake-id").await.expect("true failed.");
eprintln!("ok_runc succeeded.");
});
let fail_runc = fail_client();
let fail_task = tokio::spawn(async move {
match fail_runc.start("fake-id").await {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
});
ok_task.await.expect("ok_task failed.");
fail_task.await.expect("fail_task unexpectedly succeeded.");
}
#[tokio::test]
async fn test_async_run() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
tokio::spawn(async move {
ok_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("true failed.");
eprintln!("ok_runc succeeded.");
});
let opts = CreateOpts::new();
let fail_runc = fail_client();
tokio::spawn(async move {
match fail_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
{
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
})
.await
.expect("tokio spawn falied.");
}
#[tokio::test]
async fn test_async_delete() {
let opts = DeleteOpts::new();
let ok_runc = ok_client();
tokio::spawn(async move {
ok_runc
.delete("fake-id", Some(&opts))
.await
.expect("true failed.");
eprintln!("ok_runc succeeded.");
});
let opts = DeleteOpts::new();
let fail_runc = fail_client();
tokio::spawn(async move {
match fail_runc.delete("fake-id", Some(&opts)).await {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
})
.await
.expect("tokio spawn falied.");
}
#[tokio::test]
async fn test_async_output() {
// test create cmd with inherit Io, expect empty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(InheritedStdIo::new().unwrap()));
let echo_runc = echo_client();
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("echo failed:");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
// test create cmd with pipe Io, expect nonempty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(PipedStdIo::new().unwrap()));
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.await
.expect("echo failed:");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(!response.output.is_empty());
}
}

View File

@ -15,6 +15,7 @@
*/
pub mod io;
mod pipe;
mod runc;
use std::{
fmt::Debug,
io::{Read, Result, Write},
@ -23,6 +24,7 @@ use std::{
use log::debug;
pub use pipe::Pipe;
pub use runc::{DefaultExecutor, Spawner};
use crate::Command;

View File

@ -0,0 +1,445 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use std::{fmt::Debug, path::Path, process::ExitStatus};
use oci_spec::runtime::{LinuxResources, Process};
use crate::{
container::Container,
error::Error,
events,
options::*,
utils::{self, write_value_to_temp_file},
Command, Response, Result, Runc,
};
impl Runc {
pub(crate) fn launch(&self, cmd: Command, combined_output: bool) -> Result<Response> {
let (status, pid, stdout, stderr) = self.spawner.execute(cmd)?;
if status.success() {
let output = if combined_output {
stdout + stderr.as_str()
} else {
stdout
};
Ok(Response {
pid,
status,
output,
})
} else {
Err(Error::CommandFailed {
status,
stdout,
stderr,
})
}
}
/// Create a new container
pub fn create<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<Response>
where
P: AsRef<Path>,
{
let mut args = vec![
"create".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(CreateOpts { io: Some(io), .. }) => {
io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;
let res = self.launch(cmd, true)?;
io.close_after_start();
Ok(res)
}
_ => self.launch(cmd, true),
}
}
/// Delete a container
pub fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> {
let mut args = vec!["delete".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
self.launch(self.command(&args)?, true)?;
Ok(())
}
/// Execute an additional process inside the container
pub fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> {
let (_temp_file, filename) = write_value_to_temp_file(spec)?;
let mut args = vec!["exec".to_string(), "--process".to_string(), filename];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
match opts {
Some(ExecOpts { io: Some(io), .. }) => {
io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;
self.launch(cmd, true)?;
io.close_after_start();
}
_ => {
self.launch(cmd, true)?;
}
}
Ok(())
}
/// Send the specified signal to processes inside the container
pub fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> {
let mut args = vec!["kill".to_string()];
if let Some(opts) = opts {
args.append(&mut opts.args());
}
args.push(id.to_string());
args.push(sig.to_string());
let _ = self.launch(self.command(&args)?, true)?;
Ok(())
}
/// List all containers associated with this runc instance
pub fn list(&self) -> Result<Vec<Container>> {
let args = ["list".to_string(), "--format=json".to_string()];
let res = self.launch(self.command(&args)?, true)?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Pause a container
pub fn pause(&self, id: &str) -> Result<()> {
let args = ["pause".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true)?;
Ok(())
}
/// Resume a container
pub fn resume(&self, id: &str) -> Result<()> {
let args = ["resume".to_string(), id.to_string()];
let _ = self.launch(self.command(&args)?, true)?;
Ok(())
}
pub fn checkpoint(&self) -> Result<()> {
Err(Error::Unimplemented("checkpoint".to_string()))
}
pub fn restore(&self) -> Result<()> {
Err(Error::Unimplemented("restore".to_string()))
}
/// List all the processes inside the container, returning their pids
pub fn ps(&self, id: &str) -> Result<Vec<usize>> {
let args = [
"ps".to_string(),
"--format=json".to_string(),
id.to_string(),
];
let res = self.launch(self.command(&args)?, false)?;
let output = res.output.trim();
// Ugly hack to work around golang
Ok(if output == "null" {
Vec::new()
} else {
serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?
})
}
/// Run the create, start, delete lifecycle of the container and return its exit status
pub fn run<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<Response>
where
P: AsRef<Path>,
{
let mut args = vec![
"run".to_string(),
"--bundle".to_string(),
utils::abs_string(bundle)?,
];
if let Some(opts) = opts {
args.append(&mut opts.args()?);
}
args.push(id.to_string());
let mut cmd = self.command(&args)?;
if let Some(CreateOpts { io: Some(io), .. }) = opts {
io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;
};
self.launch(cmd, true)
}
/// Start an already created container
pub fn start(&self, id: &str) -> Result<Response> {
let args = ["start".to_string(), id.to_string()];
self.launch(self.command(&args)?, true)
}
/// Return the state of a container
pub fn state(&self, id: &str) -> Result<Container> {
let args = ["state".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true)?;
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)
}
/// Return the latest statistics for a container
pub fn stats(&self, id: &str) -> Result<events::Stats> {
let args = vec!["events".to_string(), "--stats".to_string(), id.to_string()];
let res = self.launch(self.command(&args)?, true)?;
let event: events::Event =
serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?;
if let Some(stats) = event.stats {
Ok(stats)
} else {
Err(Error::MissingContainerStats)
}
}
/// Update a container with the provided resource spec
pub fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> {
let (_temp_file, filename) = write_value_to_temp_file(resources)?;
let args = [
"update".to_string(),
"--resources".to_string(),
filename,
id.to_string(),
];
self.launch(self.command(&args)?, true)?;
Ok(())
}
}
pub trait Spawner: Debug {
fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>;
}
#[derive(Debug)]
pub struct DefaultExecutor {}
impl Spawner for DefaultExecutor {
fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> {
let mut cmd = cmd;
let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?;
let pid = child.id();
let result = child.wait_with_output().map_err(Error::InvalidCommand)?;
let status = result.status;
let stdout = String::from_utf8_lossy(&result.stdout).to_string();
let stderr = String::from_utf8_lossy(&result.stderr).to_string();
Ok((status, pid, stdout, stderr))
}
}
#[cfg(test)]
#[cfg(target_os = "linux")]
mod tests {
use std::sync::Arc;
use oci_spec::runtime::Process;
use crate::{
error::Error,
io::{InheritedStdIo, PipedStdIo},
options::{CreateOpts, DeleteOpts, ExecOpts, GlobalOpts},
Runc,
};
fn ok_client() -> Runc {
GlobalOpts::new()
.command("/bin/true")
.build()
.expect("unable to create runc instance")
}
fn fail_client() -> Runc {
GlobalOpts::new()
.command("/bin/false")
.build()
.expect("unable to create runc instance")
}
fn echo_client() -> Runc {
GlobalOpts::new()
.command("/bin/echo")
.build()
.expect("unable to create runc instance")
}
fn dummy_process() -> Process {
serde_json::from_str(
"
{
\"user\": {
\"uid\": 1000,
\"gid\": 1000
},
\"cwd\": \"/path/to/dir\"
}",
)
.unwrap()
}
#[test]
fn test_create() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
let response = ok_runc
.create("fake-id", "fake-bundle", Some(&opts))
.expect("true failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
let fail_runc = fail_client();
match fail_runc.create("fake-id", "fake-bundle", Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_run() {
let opts = CreateOpts::new();
let ok_runc = ok_client();
let response = ok_runc
.run("fake-id", "fake-bundle", Some(&opts))
.expect("true failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
let fail_runc = fail_client();
match fail_runc.run("fake-id", "fake-bundle", Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_exec() {
let opts = ExecOpts::new();
let ok_runc = ok_client();
let proc = dummy_process();
ok_runc
.exec("fake-id", &proc, Some(&opts))
.expect("true failed.");
eprintln!("ok_runc succeeded.");
let fail_runc = fail_client();
match fail_runc.exec("fake-id", &proc, Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_delete() {
let opts = DeleteOpts::new();
let ok_runc = ok_client();
ok_runc
.delete("fake-id", Some(&opts))
.expect("true failed.");
eprintln!("ok_runc succeeded.");
let fail_runc = fail_client();
match fail_runc.delete("fake-id", Some(&opts)) {
Ok(_) => panic!("fail_runc returned exit status 0."),
Err(Error::CommandFailed {
status,
stdout,
stderr,
}) => {
if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {
eprintln!("fail_runc succeeded.");
} else {
panic!("unexpected outputs from fail_runc.")
}
}
Err(e) => panic!("unexpected error from fail_runc: {:?}", e),
}
}
#[test]
fn test_output() {
// test create cmd with inherit Io, expect empty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(InheritedStdIo::new().unwrap()));
let echo_runc = echo_client();
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.expect("echo failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(response.output.is_empty());
// test create cmd with pipe Io, expect nonempty cmd output
let mut opts = CreateOpts::new();
opts.io = Some(Arc::new(PipedStdIo::new().unwrap()));
let echo_runc = echo_client();
let response = echo_runc
.create("fake-id", "fake-bundle", Some(&opts))
.expect("echo failed.");
assert_ne!(response.pid, 0);
assert!(response.status.success());
assert!(!response.output.is_empty());
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "containerd-shim-protos"
version = "0.7.2"
version = "0.8.0"
authors = [
"Maksym Pavlenko <pavlenko.maksym@gmail.com>",
"The containerd Authors",
@ -49,13 +49,11 @@ required-features = ["async"]
[dependencies]
async-trait = { workspace = true, optional = true }
# protobuf 3.5 introduces a breaking change: https://github.com/containerd/rust-extensions/issues/295
# pinning to <3.5.0 until we can update the generated code
protobuf = ">= 3.0, <3.5.0"
ttrpc = "0.8.2"
protobuf = "3.7.2"
ttrpc = "0.8.3"
[build-dependencies]
ttrpc-codegen = "0.4.2"
ttrpc-codegen = "0.6.0"
[dev-dependencies]
ctrlc = { version = "3.0", features = ["termination"] }

View File

@ -32,6 +32,7 @@ fn main() {
"vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
"vendor/github.com/containerd/containerd/api/types/mount.proto",
"vendor/github.com/containerd/containerd/api/types/task/task.proto",
"vendor/github.com/containerd/containerd/api/types/introspection.proto",
#[cfg(feature = "sandbox")]
"vendor/github.com/containerd/containerd/api/types/platform.proto",
],

View File

@ -60,7 +60,7 @@ impl Task for FakeServer {
async fn main() {
simple_logger::SimpleLogger::new().init().unwrap();
let tservice = create_task(Arc::new(Box::new(FakeServer::new())));
let tservice = create_task(Arc::new(FakeServer::new()));
let mut server = Server::new()
.bind("unix:///tmp/shim-proto-ttrpc-001")

View File

@ -57,7 +57,7 @@ impl Task for FakeServer {
fn main() {
simple_logger::SimpleLogger::new().init().unwrap();
let tservice = create_task(Arc::new(Box::new(FakeServer::new())));
let tservice = create_task(Arc::new(FakeServer::new()));
let mut server = Server::new()
.bind("unix:///tmp/shim-proto-ttrpc-001")

View File

@ -34,6 +34,9 @@ pub mod fieldpath {
include!(concat!(env!("OUT_DIR"), "/types/fieldpath.rs"));
}
pub mod introspection {
include!(concat!(env!("OUT_DIR"), "/types/introspection.rs"));
}
#[cfg(feature = "sandbox")]
pub mod platform {
include!(concat!(env!("OUT_DIR"), "/types/platform.rs"));

View File

@ -72,7 +72,7 @@ fn create_ttrpc_context() -> (
#[test]
fn test_task_method_num() {
let task = create_task(Arc::new(Box::new(FakeServer::new())));
let task = create_task(Arc::new(FakeServer::new()));
assert_eq!(task.len(), 17);
}
@ -96,7 +96,7 @@ fn test_create_task() {
request.set_timeout_nano(10000);
request.set_metadata(ttrpc::context::to_pb(ctx.metadata.clone()));
let task = create_task(Arc::new(Box::new(FakeServer::new())));
let task = create_task(Arc::new(FakeServer::new()));
let create = task.get("/containerd.task.v2.Task/Create").unwrap();
create.handler(ctx, request).unwrap();
@ -137,7 +137,7 @@ fn test_delete_task() {
request.set_timeout_nano(10000);
request.set_metadata(ttrpc::context::to_pb(ctx.metadata.clone()));
let task = create_task(Arc::new(Box::new(FakeServer::new())));
let task = create_task(Arc::new(FakeServer::new()));
let delete = task.get("/containerd.task.v2.Task/Delete").unwrap();
delete.handler(ctx, request).unwrap();

View File

@ -0,0 +1,46 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package containerd.types;
import "google/protobuf/any.proto";
option go_package = "github.com/containerd/containerd/api/types;types";
message RuntimeRequest {
string runtime_path = 1;
// Options correspond to CreateTaskRequest.options.
// This is needed to pass the runc binary path, etc.
google.protobuf.Any options = 2;
}
message RuntimeVersion {
string version = 1;
string revision = 2;
}
message RuntimeInfo {
string name = 1;
RuntimeVersion version = 2;
// Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)
google.protobuf.Any options = 3;
// OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md
google.protobuf.Any features = 4;
// Annotations of the shim. Irrelevant to features.Annotations.
map<string, string> annotations = 5;
}

View File

@ -1,6 +1,6 @@
[package]
name = "containerd-shim"
version = "0.7.4"
version = "0.8.0"
authors = [
"Maksym Pavlenko <pavlenko.maksym@gmail.com>",
"The containerd Authors",
@ -19,7 +19,6 @@ async = [
"async-trait",
"containerd-shim-protos/async",
"futures",
"signal-hook-tokio",
"tokio",
]
tracing = ["dep:tracing"]
@ -34,7 +33,8 @@ name = "windows-log-reader"
path = "examples/windows_log_reader.rs"
[dependencies]
containerd-shim-protos = { path = "../shim-protos", version = "0.7.2" }
which = "7.0.1"
containerd-shim-protos = { path = "../shim-protos", version = "0.8.0" }
go-flag = "0.1.0"
lazy_static = "1.4.0"
sha2 = "0.10.2"
@ -54,6 +54,7 @@ prctl.workspace = true
signal-hook = "0.3.13"
serde.workspace = true
serde_json.workspace = true
tempfile.workspace = true
thiserror.workspace = true
time.workspace = true
@ -63,20 +64,13 @@ tracing = { version = "0.1", optional = true }
# Async dependencies
async-trait = { workspace = true, optional = true }
futures = { workspace = true, optional = true }
signal-hook-tokio = { version = "0.3.1", optional = true, features = [
"futures-v0_3",
] }
tokio = { workspace = true, features = ["full"], optional = true }
[target.'cfg(target_os = "linux")'.dependencies]
cgroups-rs.workspace = true
[target.'cfg(unix)'.dependencies]
command-fds = "0.3.0"
[target.'cfg(windows)'.dependencies]
mio = { version = "1.0", features = ["os-ext", "os-poll"] }
os_pipe.workspace = true
windows-sys = { version = "0.52.0", features = [
"Win32_Foundation",
"Win32_System_WindowsProgramming",

View File

@ -41,6 +41,8 @@ pub struct Flags {
pub action: String,
/// Version of the shim.
pub version: bool,
/// get the option protobuf from stdin, print the shim info protobuf to stdout, and exit
pub info: bool,
}
/// Parses command line arguments passed to the shim.
@ -57,6 +59,7 @@ pub fn parse<S: AsRef<OsStr>>(args: &[S]) -> Result<Flags> {
f.add_flag("bundle", &mut flags.bundle);
f.add_flag("address", &mut flags.address);
f.add_flag("publish-binary", &mut flags.publish_binary);
f.add_flag("info", &mut flags.info);
})
.map_err(|e| Error::InvalidArgument(e.to_string()))?;

View File

@ -15,27 +15,28 @@
*/
use std::{
convert::TryFrom,
env,
io::Read,
os::unix::{fs::FileTypeExt, net::UnixListener},
path::Path,
process,
process::{Command, Stdio},
process::{self, Command as StdCommand, Stdio},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
task::{ready, Poll},
};
use async_trait::async_trait;
use command_fds::{CommandFdExt, FdMapping};
use containerd_shim_protos::{
api::DeleteResponse,
protobuf::Message,
protobuf::{well_known_types::any::Any, Message, MessageField},
shim::oci::Options,
shim_async::{create_task, Client, Task},
ttrpc::r#async::Server,
types::introspection::{self, RuntimeInfo},
};
use futures::StreamExt;
use futures::stream::{poll_fn, BoxStream, SelectAll, StreamExt};
use libc::{SIGCHLD, SIGINT, SIGPIPE, SIGTERM};
use log::{debug, error, info, warn};
use nix::{
@ -46,8 +47,11 @@ use nix::{
},
unistd::Pid,
};
use signal_hook_tokio::Signals;
use tokio::{io::AsyncWriteExt, sync::Notify};
use oci_spec::runtime::Features;
use tokio::{io::AsyncWriteExt, process::Command, sync::Notify};
use which::which;
const DEFAULT_BINARY_NAME: &str = "runc";
use crate::{
args,
@ -55,7 +59,7 @@ use crate::{
error::{Error, Result},
logger, parse_sockaddr, reap, socket_address,
util::{asyncify, read_file_to_str, write_str_to_file},
Config, Flags, StartOpts, SOCKET_FD, TTRPC_ADDRESS,
Config, Flags, StartOpts, TTRPC_ADDRESS,
};
pub mod monitor;
@ -109,6 +113,51 @@ where
process::exit(1);
}
}
/// get runtime info
pub fn run_info() -> Result<RuntimeInfo> {
let mut info = introspection::RuntimeInfo {
name: "containerd-shim-runc-v2-rs".to_string(),
version: MessageField::some(introspection::RuntimeVersion {
version: env!("CARGO_PKG_VERSION").to_string(),
revision: String::default(),
..Default::default()
}),
..Default::default()
};
let mut binary_name = DEFAULT_BINARY_NAME.to_string();
let mut data: Vec<u8> = Vec::new();
std::io::stdin()
.read_to_end(&mut data)
.map_err(io_error!(e, "read stdin"))?;
// get BinaryName from stdin
if !data.is_empty() {
let opts =
Any::parse_from_bytes(&data).and_then(|any| Options::parse_from_bytes(&any.value))?;
if !opts.binary_name().is_empty() {
binary_name = opts.binary_name().to_string();
}
}
let binary_path = which(binary_name).unwrap();
// get features
let output = StdCommand::new(binary_path)
.arg("features")
.output()
.unwrap();
let features: Features = serde_json::from_str(&String::from_utf8_lossy(&output.stdout))?;
// set features
let features_any = Any {
type_url: "types.containerd.io/opencontainers/runtime-spec/1/features/Features".to_string(),
// features to json
value: serde_json::to_vec(&features)?,
..Default::default()
};
info.features = MessageField::some(features_any);
Ok(info)
}
#[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))]
async fn bootstrap<T>(runtime_id: &str, opts: Option<Config>) -> Result<()>
@ -167,6 +216,12 @@ where
Ok(())
}
_ => {
if flags.socket.is_empty() {
return Err(Error::InvalidArgument(String::from(
"Shim socket cannot be empty",
)));
}
if !config.no_setup_logger {
logger::init(
flags.debug,
@ -177,13 +232,18 @@ where
}
let publisher = RemotePublisher::new(&ttrpc_address).await?;
let task = shim.create_task_service(publisher).await;
let task_service = create_task(Arc::new(Box::new(task)));
let mut server = Server::new().register_service(task_service);
server = server.add_listener(SOCKET_FD)?;
server = server.set_domain_unix();
let task = Box::new(shim.create_task_service(publisher).await)
as Box<dyn containerd_shim_protos::shim_async::Task + Send + Sync>;
let task_service = create_task(Arc::from(task));
let Some(mut server) = create_server_with_retry(&flags).await? else {
signal_server_started();
return Ok(());
};
server = server.register_service(task_service);
server.start().await?;
signal_server_started();
info!("Shim successfully started, waiting for exit signal...");
tokio::spawn(async move {
handle_signals(signals).await;
@ -247,38 +307,18 @@ pub async fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) ->
let cwd = env::current_dir().map_err(io_error!(e, ""))?;
let address = socket_address(&opts.address, &opts.namespace, grouping);
// Create socket and prepare listener.
// We'll use `add_listener` when creating TTRPC server.
let listener = match start_listener(&address).await {
Ok(l) => l,
Err(e) => {
if let Error::IoError {
err: ref io_err, ..
} = e
{
if io_err.kind() != std::io::ErrorKind::AddrInUse {
return Err(e);
};
}
if let Ok(()) = wait_socket_working(&address, 5, 200).await {
write_str_to_file("address", &address).await?;
return Ok(address);
}
remove_socket(&address).await?;
start_listener(&address).await?
}
};
// Activation pattern comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70
// another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating
// the logic in Rust's 'command' for process creation. There is an issue in Rust to make it simplier to specify handle inheritence and this could
// be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented.
// tokio::process::Command do not have method `fd_mappings`,
// and the `spawn()` is also not an async method,
// so we use the std::process::Command here
let mut command = Command::new(cmd);
command
.current_dir(cwd)
.stdout(Stdio::null())
.stdout(Stdio::piped())
.stdin(Stdio::null())
.stderr(Stdio::null())
.envs(vars)
.args([
"-namespace",
&opts.namespace,
@ -286,31 +326,139 @@ pub async fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) ->
&opts.id,
"-address",
&opts.address,
])
.fd_mappings(vec![FdMapping {
parent_fd: listener.into(),
child_fd: SOCKET_FD,
}])?;
"-socket",
&address,
]);
if opts.debug {
command.arg("-debug");
}
command.envs(vars);
let _child = command.spawn().map_err(io_error!(e, "spawn shim"))?;
let mut child = command.spawn().map_err(io_error!(e, "spawn shim"))?;
#[cfg(target_os = "linux")]
crate::cgroup::set_cgroup_and_oom_score(_child.id())?;
crate::cgroup::set_cgroup_and_oom_score(child.id().unwrap())?;
let mut reader = child.stdout.take().unwrap();
tokio::io::copy(&mut reader, &mut tokio::io::stderr())
.await
.unwrap();
Ok(address)
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
fn setup_signals_tokio(config: &Config) -> Signals {
if config.no_reaper {
Signals::new([SIGTERM, SIGINT, SIGPIPE]).expect("new signal failed")
} else {
Signals::new([SIGTERM, SIGINT, SIGPIPE, SIGCHLD]).expect("new signal failed")
async fn create_server(flags: &args::Flags) -> Result<Server> {
use std::os::fd::IntoRawFd;
let listener = start_listener(&flags.socket).await?;
let mut server = Server::new();
server = server.add_listener(listener.into_raw_fd())?;
server = server.set_domain_unix();
Ok(server)
}
async fn create_server_with_retry(flags: &args::Flags) -> Result<Option<Server>> {
// Really try to create a server.
let server = match create_server(flags).await {
Ok(server) => server,
Err(Error::IoError { err, .. }) if err.kind() == std::io::ErrorKind::AddrInUse => {
// If the address is already in use then make sure it is up and running and return the address
// This allows for running a single shim per container scenarios
if let Ok(()) = wait_socket_working(&flags.socket, 5, 200).await {
write_str_to_file("address", &flags.socket).await?;
return Ok(None);
}
remove_socket(&flags.socket).await?;
create_server(flags).await?
}
Err(e) => return Err(e),
};
Ok(Some(server))
}
fn signal_server_started() {
use libc::{dup2, STDERR_FILENO, STDOUT_FILENO};
unsafe {
if dup2(STDERR_FILENO, STDOUT_FILENO) < 0 {
panic!("Error closing pipe: {}", std::io::Error::last_os_error())
}
}
}
#[cfg(unix)]
fn signal_stream(kind: i32) -> std::io::Result<BoxStream<'static, i32>> {
use tokio::signal::unix::{signal, SignalKind};
let kind = SignalKind::from_raw(kind);
signal(kind).map(|mut sig| {
// The object returned by `signal` is not a `Stream`.
// The `poll_fn` function constructs a `Stream` based on a polling function.
// We need to create a `Stream` so that we can use the `SelectAll` stream "merge"
// all the signal streams.
poll_fn(move |cx| {
ready!(sig.poll_recv(cx));
Poll::Ready(Some(kind.as_raw_value()))
})
.boxed()
})
}
#[cfg(windows)]
fn signal_stream(kind: i32) -> std::io::Result<BoxStream<'static, i32>> {
use tokio::signal::windows::ctrl_c;
// Windows doesn't have similar signal like SIGCHLD
// We could implement something if required but for now
// just implement support for SIGINT
if kind != SIGINT {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("Invalid signal {kind}"),
));
}
ctrl_c().map(|mut sig| {
// The object returned by `signal` is not a `Stream`.
// The `poll_fn` function constructs a `Stream` based on a polling function.
// We need to create a `Stream` so that we can use the `SelectAll` stream "merge"
// all the signal streams.
poll_fn(move |cx| {
ready!(sig.poll_recv(cx));
Poll::Ready(Some(kind))
})
.boxed()
})
}
type Signals = SelectAll<BoxStream<'static, i32>>;
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
fn setup_signals_tokio(config: &Config) -> Signals {
#[cfg(unix)]
let signals: &[i32] = if config.no_reaper {
&[SIGTERM, SIGINT, SIGPIPE]
} else {
&[SIGTERM, SIGINT, SIGPIPE, SIGCHLD]
};
// Windows doesn't have similar signal like SIGCHLD
// We could implement something if required but for now
// just listen for SIGINT
// Note: see comment at the counterpart in synchronous/mod.rs for details.
#[cfg(windows)]
let signals: &[i32] = &[SIGINT];
let signals: Vec<_> = signals
.iter()
.copied()
.map(signal_stream)
.collect::<std::io::Result<_>>()
.expect("signal setup failed");
SelectAll::from_iter(signals)
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
async fn handle_signals(signals: Signals) {
let mut signals = signals.fuse();
@ -322,14 +470,7 @@ async fn handle_signals(signals: Signals) {
}
SIGCHLD => loop {
// Note: see comment at the counterpart in synchronous/mod.rs for details.
match asyncify(move || {
Ok(wait::waitpid(
Some(Pid::from_raw(-1)),
Some(WaitPidFlag::WNOHANG),
)?)
})
.await
{
match wait::waitpid(Some(Pid::from_raw(-1)), Some(WaitPidFlag::WNOHANG)) {
Ok(WaitStatus::Exited(pid, status)) => {
monitor_notify_by_pid(pid.as_raw(), status)
.await
@ -345,7 +486,7 @@ async fn handle_signals(signals: Signals) {
Ok(WaitStatus::StillAlive) => {
break;
}
Err(Error::Nix(Errno::ECHILD)) => {
Err(Errno::ECHILD) => {
break;
}
Err(e) => {

View File

@ -24,7 +24,7 @@ use containerd_shim_protos::{
ttrpc,
ttrpc::context::Context,
};
use log::debug;
use log::{debug, error, warn};
use tokio::sync::mpsc;
use crate::{
@ -93,15 +93,20 @@ impl RemotePublisher {
count: item.count + 1,
};
if let Err(e) = client.forward(new_item.ctx.clone(), &req).await {
debug!("publish error {:?}", e);
// This is a bug from ttrpc, ttrpc should return RemoteClosed|ClientClosed error. change it in future
// if e == (ttrpc::error::Error::RemoteClosed || ttrpc::error::Error::ClientClosed)
// reconnect client
let new_client = Self::connect(address.as_str()).await.map_err(|e| {
debug!("reconnect the ttrpc client {:?} fail", e);
});
if let Ok(c) = new_client {
client = EventsClient::new(c);
match e {
ttrpc::error::Error::RemoteClosed | ttrpc::error::Error::LocalClosed => {
warn!("publish fail because the server or client close {:?}", e);
// reconnect client
if let Ok(c) = Self::connect(address.as_str()).await.map_err(|e| {
debug!("reconnect the ttrpc client {:?} fail", e);
}) {
client = EventsClient::new(c);
}
}
_ => {
// TODO! if it is other error , May we should deal with socket file
error!("the client forward err is {:?}", e);
}
}
let sender_ref = sender.clone();
// Take a another task requeue , for no blocking the recv task
@ -218,7 +223,7 @@ mod tests {
let barrier2 = barrier.clone();
let server_thread = tokio::spawn(async move {
let listener = UnixListener::bind(&path1).unwrap();
let service = create_events(Arc::new(Box::new(server)));
let service = create_events(Arc::new(server));
let mut server = Server::new()
.set_domain_unix()
.add_listener(listener.as_raw_fd())

View File

@ -49,11 +49,6 @@ pub enum Error {
#[error("Failed to setup logger: {0}")]
Setup(#[from] log::SetLoggerError),
/// Unable to pass fd to child process (we rely on `command_fds` crate for this).
#[cfg(unix)]
#[error("Failed to pass socket fd to child: {0}")]
FdMap(#[from] command_fds::FdMappingCollision),
#[cfg(unix)]
#[error("Nix error: {0}")]
Nix(#[from] nix::Error),

View File

@ -18,10 +18,7 @@
use std::{fs::File, path::PathBuf};
#[cfg(unix)]
use std::{
os::unix::{io::RawFd, net::UnixListener},
path::Path,
};
use std::{os::unix::net::UnixListener, path::Path};
pub use containerd_shim_protos as protos;
#[cfg(unix)]
@ -56,13 +53,19 @@ pub use args::{parse, Flags};
pub mod asynchronous;
pub mod cgroup;
pub mod event;
mod logger;
pub mod logger;
pub mod monitor;
pub mod mount;
#[cfg(target_os = "linux")]
pub mod mount_linux;
#[cfg(not(target_os = "linux"))]
pub mod mount_other;
#[cfg(target_os = "linux")]
pub use mount_linux as mount;
#[cfg(not(target_os = "linux"))]
pub use mount_other as mount;
mod reap;
#[cfg(not(feature = "async"))]
pub mod synchronous;
mod sys;
pub mod util;
/// Generated request/response structures.
@ -151,12 +154,6 @@ pub struct StartOpts {
pub debug: bool,
}
/// The shim process communicates with the containerd server through a communication channel
/// created by containerd. One endpoint of the communication channel is passed to shim process
/// through a file descriptor during forking, which is the fourth(3) file descriptor.
#[cfg(unix)]
const SOCKET_FD: RawFd = 3;
#[cfg(target_os = "linux")]
pub const SOCKET_ROOT: &str = "/run/containerd";

View File

@ -31,8 +31,6 @@ use log::{
use time::{format_description::well_known::Rfc3339, OffsetDateTime};
use crate::error::Error;
#[cfg(windows)]
use crate::sys::windows::NamedPipeLogger;
pub const LOG_ENV: &str = "RUST_LOG";
@ -41,22 +39,92 @@ pub struct FifoLogger {
}
impl FifoLogger {
#[allow(dead_code)]
pub fn new() -> Result<FifoLogger, io::Error> {
Self::with_path("log")
pub fn new(_namespace: &str, _id: &str) -> io::Result<FifoLogger> {
#[cfg(unix)]
let logger = Self::with_path("log")?;
#[cfg(windows)]
let logger = {
let pipe_name = format!(r"\\.\pipe\containerd-shim-{_namespace}-{_id}-log");
Self::with_named_pipe(&pipe_name)?
};
Ok(logger)
}
#[allow(dead_code)]
pub fn with_path<P: AsRef<Path>>(path: P) -> Result<FifoLogger, io::Error> {
pub fn with_path(path: impl AsRef<Path>) -> io::Result<FifoLogger> {
let f = OpenOptions::new()
.write(true)
.read(false)
.create(false)
.open(path)?;
Ok(FifoLogger {
file: Mutex::new(f),
})
Ok(FifoLogger::with_file(f))
}
pub fn with_file(file: File) -> FifoLogger {
let file = Mutex::new(file);
FifoLogger { file }
}
#[cfg(windows)]
pub fn with_named_pipe(name: &str) -> io::Result<FifoLogger> {
// Containerd on windows expects the log to be a named pipe in the format of \\.\pipe\containerd-<namespace>-<id>-log
// There is an assumption that there is always only one client connected which is containerd.
// If there is a restart of containerd then logs during that time period will be lost.
//
// https://github.com/containerd/containerd/blob/v1.7.0/runtime/v2/shim_windows.go#L77
// https://github.com/microsoft/hcsshim/blob/5871d0c4436f131c377655a3eb09fc9b5065f11d/cmd/containerd-shim-runhcs-v1/serve.go#L132-L137
use std::os::windows::io::{AsRawHandle, BorrowedHandle};
use mio::{windows::NamedPipe, Events, Interest, Poll, Token};
let mut pipe_server = NamedPipe::new(name)?;
let file = unsafe { BorrowedHandle::borrow_raw(pipe_server.as_raw_handle()) }
.try_clone_to_owned()?;
let file = File::from(file);
let poll = Poll::new()?;
poll.registry().register(
&mut pipe_server,
Token(0),
Interest::READABLE | Interest::WRITABLE,
)?;
std::thread::spawn(move || {
let pipe_server = pipe_server;
let mut poll = poll;
let mut events = Events::with_capacity(128);
let _ = pipe_server.connect();
loop {
poll.poll(&mut events, None).unwrap();
for event in events.iter() {
if event.is_writable() {
match pipe_server.connect() {
Ok(()) => {}
Err(e) if e.kind() == io::ErrorKind::Interrupted => {
// this would block just keep processing
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// this would block just keep processing
}
Err(e) => {
panic!("Error connecting to client: {}", e);
}
};
}
if event.is_readable() {
pipe_server.disconnect().unwrap();
}
}
}
});
Ok(FifoLogger::with_file(file))
}
}
@ -116,29 +184,12 @@ impl log::Log for FifoLogger {
fn flush(&self) {
// The logger server may have temporarily shutdown, ignore the error instead of panic.
let _ = self.file.lock().unwrap().sync_all();
let _ = self.file.lock().unwrap().flush();
}
}
pub fn init(
debug: bool,
default_log_level: &str,
_namespace: &str,
_id: &str,
) -> Result<(), Error> {
#[cfg(unix)]
let logger = FifoLogger::new().map_err(io_error!(e, "failed to init logger"))?;
// Containerd on windows expects the log to be a named pipe in the format of \\.\pipe\containerd-<namespace>-<id>-log
// There is an assumption that there is always only one client connected which is containerd.
// If there is a restart of containerd then logs during that time period will be lost.
//
// https://github.com/containerd/containerd/blob/v1.7.0/runtime/v2/shim_windows.go#L77
// https://github.com/microsoft/hcsshim/blob/5871d0c4436f131c377655a3eb09fc9b5065f11d/cmd/containerd-shim-runhcs-v1/serve.go#L132-L137
#[cfg(windows)]
let logger =
NamedPipeLogger::new(_namespace, _id).map_err(io_error!(e, "failed to init logger"))?;
pub fn init(debug: bool, default_log_level: &str, namespace: &str, id: &str) -> Result<(), Error> {
let logger = FifoLogger::new(namespace, id).map_err(io_error!(e, "failed to init logger"))?;
configure_logging_level(debug, default_log_level);
log::set_boxed_logger(Box::new(logger))?;
Ok(())
@ -291,3 +342,130 @@ mod tests {
assert!(contents.contains("level=error key=\"1\" b=\"2\" msg=\"structured!\""));
}
}
#[cfg(all(windows, test))]
mod windows_tests {
use std::{
fs::OpenOptions,
io::Read,
os::windows::{
fs::OpenOptionsExt,
io::{FromRawHandle, IntoRawHandle},
prelude::AsRawHandle,
},
time::Duration,
};
use log::{Log, Record};
use mio::{windows::NamedPipe, Events, Interest, Poll, Token};
use windows_sys::Win32::{
Foundation::ERROR_PIPE_NOT_CONNECTED, Storage::FileSystem::FILE_FLAG_OVERLAPPED,
};
use super::*;
#[test]
fn test_namedpipe_log_can_write_before_client_connected() {
let ns = "test".to_string();
let id = "notconnected".to_string();
let logger = FifoLogger::new(&ns, &id).unwrap();
// test can write before a reader is connected (should succeed but the messages will be dropped)
log::set_max_level(log::LevelFilter::Info);
let record = Record::builder()
.level(log::Level::Info)
.line(Some(1))
.file(Some("sample file"))
.args(format_args!("hello"))
.build();
logger.log(&record);
logger.flush();
}
#[test]
fn test_namedpipe_log() {
use std::fs::File;
let ns = "test".to_string();
let id = "clients".to_string();
let pipe_name = format!("\\\\.\\pipe\\containerd-shim-{}-{}-log", ns, id);
let logger = FifoLogger::new(&ns, &id).unwrap();
let mut client = create_client(pipe_name.as_str());
log::set_max_level(log::LevelFilter::Info);
let kvs: &[(&str, i32)] = &[("key", 1), ("b", 2)];
let record = Record::builder()
.level(log::Level::Info)
.line(Some(1))
.key_values(&kvs)
.args(format_args!("hello"))
.build();
logger.log(&record);
logger.flush();
let buf = read_message(&mut client, 73);
let message = std::str::from_utf8(&buf).unwrap();
assert!(message.starts_with("time=\""), "message was: {:?}", message);
assert!(
message.contains("level=info key=\"1\" b=\"2\" msg=\"hello\"\n"),
"message was: {:?}",
message
);
// test that we can reconnect after a reader disconnects
// we need to get the raw handle and drop that as well to force full disconnect
// and give a few milliseconds for the disconnect to happen
println!("dropping client");
let handle = client.as_raw_handle();
drop(client);
let f = unsafe { File::from_raw_handle(handle) };
drop(f);
std::thread::sleep(Duration::from_millis(100));
let mut client2 = create_client(pipe_name.as_str());
logger.log(&record);
logger.flush();
read_message(&mut client2, 51);
}
fn read_message(client: &mut NamedPipe, length: usize) -> Vec<u8> {
let mut poll = Poll::new().unwrap();
poll.registry()
.register(client, Token(1), Interest::READABLE)
.unwrap();
let mut events = Events::with_capacity(128);
let mut buf = vec![0; length];
loop {
poll.poll(&mut events, Some(Duration::from_millis(10)))
.unwrap();
match client.read(&mut buf) {
Ok(0) => {
panic!("Read no bytes from pipe")
}
Ok(_) => {
break;
}
Err(e) if e.raw_os_error() == Some(ERROR_PIPE_NOT_CONNECTED as i32) => {
panic!("not connected to the pipe");
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => panic!("Error reading from pipe: {}", e),
}
}
buf.to_vec()
}
fn create_client(pipe_name: &str) -> mio::windows::NamedPipe {
let mut opts = OpenOptions::new();
opts.read(true)
.write(true)
.custom_flags(FILE_FLAG_OVERLAPPED);
let file = opts.open(pipe_name).unwrap();
unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) }
}
}

View File

@ -1,4 +1,3 @@
#![cfg(not(windows))]
/*
Copyright The containerd Authors.
@ -19,36 +18,118 @@
use std::{
collections::HashMap,
env,
fs::File,
io::BufRead,
ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not},
os::fd::AsRawFd,
path::Path,
};
use lazy_static::lazy_static;
use log::error;
#[cfg(target_os = "linux")]
use nix::mount::{mount, MsFlags};
#[cfg(target_os = "linux")]
use nix::sched::{unshare, CloneFlags};
#[cfg(target_os = "linux")]
use nix::unistd::{fork, ForkResult};
use nix::{
mount::{mount, MntFlags, MsFlags},
sched::{unshare, CloneFlags},
unistd::{fork, ForkResult},
};
use crate::error::{Error, Result};
#[cfg(not(feature = "async"))]
use crate::monitor::{monitor_subscribe, wait_pid, Topic};
#[cfg(target_os = "linux")]
struct Flag {
clear: bool,
flags: MsFlags,
}
#[cfg(target_os = "linux")]
#[derive(Debug, Default)]
pub struct LoopParams {
readonly: bool,
auto_clear: bool,
direct: bool,
}
#[repr(C)]
#[derive(Debug)]
pub struct LoopInfo {
device: u64,
inode: u64,
rdevice: u64,
offset: u64,
size_limit: u64,
number: u32,
encrypt_type: u32,
encrypt_key_size: u32,
flags: u32,
file_name: [u8; 64],
crypt_name: [u8; 64],
encrypt_key: [u8; 32],
init: [u64; 2],
}
impl Default for LoopInfo {
fn default() -> Self {
LoopInfo {
device: 0,
inode: 0,
rdevice: 0,
offset: 0,
size_limit: 0,
number: 0,
encrypt_type: 0,
encrypt_key_size: 0,
flags: 0,
file_name: [0; 64],
crypt_name: [0; 64],
encrypt_key: [0; 32],
init: [0; 2],
}
}
}
const LOOP_CONTROL_PATH: &str = "/dev/loop-control";
const LOOP_DEV_FORMAT: &str = "/dev/loop";
const EBUSY_STRING: &str = "device or resource busy";
const OVERLAY_LOWERDIR_PREFIX: &str = "lowerdir=";
#[cfg(target_os = "linux")]
#[derive(Debug, Default, Clone)]
struct MountInfo {
/// id is a unique identifier of the mount (may be reused after umount).
pub id: u32,
/// parent is the ID of the parent mount (or of self for the root
/// of this mount namespace's mount tree).
pub parent: u32,
/// major and minor are the major and the minor components of the Dev
/// field of unix.Stat_t structure returned by unix.*Stat calls for
/// files on this filesystem.
pub major: u32,
pub minor: u32,
/// root is the pathname of the directory in the filesystem which forms
/// the root of this mount.
pub root: String,
/// mountpoint is the pathname of the mount point relative to the
/// process's root directory.
pub mountpoint: String,
/// options is a comma-separated list of mount options.
pub options: String,
/// optional are zero or more fields of the form "tag[:value]",
/// separated by a space. Currently, the possible optional fields are
/// "shared", "master", "propagate_from", and "unbindable". For more
/// information, see mount_namespaces(7) Linux man page.
pub optional: String,
/// fs_type is the filesystem type in the form "type[.subtype]".
pub fs_type: String,
/// source is filesystem-specific information, or "none".
pub source: String,
/// vfs_options is a comma-separated list of superblock options.
pub vfs_options: String,
}
lazy_static! {
static ref MOUNT_FLAGS: HashMap<&'static str, Flag> = {
let mut mf = HashMap::new();
let zero: MsFlags = MsFlags::from_bits(0).unwrap();
let zero: MsFlags = MsFlags::empty();
mf.insert(
"async",
Flag {
@ -267,12 +348,10 @@ fn longest_common_prefix(dirs: &[String]) -> &str {
// NOTE: the snapshot id is based on digits.
// in order to avoid to get snapshots/x, should be back to parent dir.
// however, there is assumption that the common dir is ${root}/io.containerd.v1.overlayfs/snapshots.
#[cfg(target_os = "linux")]
fn trim_flawed_dir(s: &str) -> String {
s[0..s.rfind('/').unwrap_or(0) + 1].to_owned()
}
#[cfg(target_os = "linux")]
#[derive(Default)]
struct LowerdirCompactor {
options: Vec<String>,
@ -280,7 +359,6 @@ struct LowerdirCompactor {
lowerdir_prefix: Option<String>,
}
#[cfg(target_os = "linux")]
impl LowerdirCompactor {
fn new(options: &[String]) -> Self {
Self {
@ -409,7 +487,6 @@ impl From<MountExitCode> for Result<()> {
}
#[cfg(not(feature = "async"))]
#[cfg(target_os = "linux")]
pub fn mount_rootfs(
fs_type: Option<&str>,
source: Option<&str>,
@ -428,7 +505,7 @@ pub fn mount_rootfs(
(None, options.to_vec())
};
let mut flags: MsFlags = MsFlags::from_bits(0).unwrap();
let mut flags: MsFlags = MsFlags::empty();
let mut data = Vec::new();
options.iter().for_each(|x| {
if let Some(f) = MOUNT_FLAGS.get(x.as_str()) {
@ -467,7 +544,7 @@ pub fn mount_rootfs(
}
// mount with non-propagation first, or remount with changed data
let oflags = flags.bitand(PROPAGATION_TYPES.not());
let zero: MsFlags = MsFlags::from_bits(0).unwrap();
let zero: MsFlags = MsFlags::empty();
if flags.bitand(MsFlags::MS_REMOUNT).eq(&zero) || data.is_some() {
mount(source, target.as_ref(), fs_type, oflags, data).unwrap_or_else(|err| {
error!(
@ -524,7 +601,6 @@ pub fn mount_rootfs(
}
#[cfg(feature = "async")]
#[cfg(target_os = "linux")]
pub fn mount_rootfs(
fs_type: Option<&str>,
source: Option<&str>,
@ -541,8 +617,10 @@ pub fn mount_rootfs(
(None, options.to_vec())
};
let mut flags: MsFlags = MsFlags::from_bits(0).unwrap();
let mut flags: MsFlags = MsFlags::empty();
let mut data = Vec::new();
let mut lo_setup = false;
let mut loop_params = LoopParams::default();
options.iter().for_each(|x| {
if let Some(f) = MOUNT_FLAGS.get(x.as_str()) {
if f.clear {
@ -550,6 +628,8 @@ pub fn mount_rootfs(
} else {
flags.bitor_assign(f.flags)
}
} else if x.as_str() == "loop" {
lo_setup = true;
} else {
data.push(x.as_str())
}
@ -562,17 +642,31 @@ pub fn mount_rootfs(
None
};
unshare(CloneFlags::CLONE_FS).unwrap();
if let Some(workdir) = chdir {
unshare(CloneFlags::CLONE_FS)?;
env::set_current_dir(Path::new(&workdir)).unwrap_or_else(|_| {
unsafe { libc::_exit(i32::from(MountExitCode::ChdirErr)) };
});
}
// mount with non-propagation first, or remount with changed data
let oflags = flags.bitand(PROPAGATION_TYPES.not());
let zero: MsFlags = MsFlags::from_bits(0).unwrap();
if lo_setup {
loop_params = LoopParams {
readonly: oflags.bitand(MsFlags::MS_RDONLY) == MsFlags::MS_RDONLY,
auto_clear: true,
direct: false,
};
}
let zero: MsFlags = MsFlags::empty();
if flags.bitand(MsFlags::MS_REMOUNT).eq(&zero) || data.is_some() {
mount(source, target.as_ref(), fs_type, oflags, data).map_err(mount_error!(
let mut lo_file = String::new();
let s = if lo_setup {
lo_file = setup_loop(source, loop_params)?;
Some(lo_file.as_str())
} else {
source
};
mount(s, target.as_ref(), fs_type, oflags, data).map_err(mount_error!(
e,
"Mount {:?} to {}",
source,
@ -605,18 +699,297 @@ pub fn mount_rootfs(
Ok(())
}
#[cfg(not(target_os = "linux"))]
pub fn mount_rootfs(
fs_type: Option<&str>,
source: Option<&str>,
options: &[String],
target: impl AsRef<Path>,
) -> Result<()> {
Err(Error::Unimplemented("start".to_string()))
fn setup_loop(source: Option<&str>, params: LoopParams) -> Result<String> {
let src = source.ok_or(other!("loop source is None"))?;
for _ in 0..100 {
let num = get_free_loop_dev()?;
let loop_dev = format!("{}{}", LOOP_DEV_FORMAT, num);
match setup_loop_dev(src, loop_dev.as_str(), &params) {
Ok(_) => return Ok(loop_dev),
Err(e) => {
if e.to_string().contains(EBUSY_STRING) {
continue;
} else {
return Err(e);
}
}
}
}
Err(Error::Other(
"creating new loopback device after 100 times".to_string(),
))
}
pub fn get_free_loop_dev() -> Result<i32> {
const LOOP_CTL_GET_FREE: i32 = 0x4c82;
let loop_control = File::options()
.read(true)
.write(true)
.open(LOOP_CONTROL_PATH)
.map_err(|e| Error::IoError {
context: format!("open {} error: ", LOOP_CONTROL_PATH),
err: e,
})?;
unsafe {
#[cfg(target_env = "gnu")]
let ret = libc::ioctl(
loop_control.as_raw_fd() as libc::c_int,
LOOP_CTL_GET_FREE as libc::c_ulong,
) as i32;
#[cfg(target_env = "musl")]
let ret = libc::ioctl(
loop_control.as_raw_fd() as libc::c_int,
LOOP_CTL_GET_FREE as libc::c_int,
) as i32;
match nix::errno::Errno::result(ret) {
Ok(ret) => Ok(ret),
Err(e) => Err(Error::Nix(e)),
}
}
}
pub fn setup_loop_dev(backing_file: &str, loop_dev: &str, params: &LoopParams) -> Result<File> {
const LOOP_SET_FD: u32 = 0x4c00;
const LOOP_CLR_FD: u32 = 0x4c01;
const LOOP_SET_STATUS64: u32 = 0x4c04;
const LOOP_SET_DIRECT_IO: u32 = 0x4c08;
const LO_FLAGS_READ_ONLY: u32 = 0x1;
const LO_FLAGS_AUTOCLEAR: u32 = 0x4;
let mut open_options = File::options();
open_options.read(true);
if !params.readonly {
open_options.write(true);
}
// 1. open backing file
let back = open_options
.open(backing_file)
.map_err(|e| Error::IoError {
context: format!("open {} error: ", backing_file),
err: e,
})?;
let loop_dev = open_options.open(loop_dev).map_err(|e| Error::IoError {
context: format!("open {} error: ", loop_dev),
err: e,
})?;
// 2. set FD
unsafe {
#[cfg(target_env = "gnu")]
let ret = libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_SET_FD as libc::c_ulong,
back.as_raw_fd() as libc::c_int,
);
#[cfg(target_env = "musl")]
let ret = libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_SET_FD as libc::c_int,
back.as_raw_fd() as libc::c_int,
);
if let Err(e) = nix::errno::Errno::result(ret) {
return Err(Error::Nix(e));
}
}
// 3. set info
let mut info = LoopInfo::default();
let backing_file_truncated = if backing_file.as_bytes().len() > info.file_name.len() {
&backing_file[0..info.file_name.len()]
} else {
backing_file
};
info.file_name[..backing_file_truncated.as_bytes().len()]
.copy_from_slice(backing_file_truncated.as_bytes());
if params.readonly {
info.flags |= LO_FLAGS_READ_ONLY;
}
if params.auto_clear {
info.flags |= LO_FLAGS_AUTOCLEAR;
}
unsafe {
#[cfg(target_env = "gnu")]
let ret = libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_SET_STATUS64 as libc::c_ulong,
info,
);
#[cfg(target_env = "musl")]
if let Err(e) = nix::errno::Errno::result(ret) {
libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_CLR_FD as libc::c_int,
0,
);
return Err(Error::Nix(e));
}
}
// 4. Set Direct IO
if params.direct {
unsafe {
#[cfg(target_env = "gnu")]
let ret = libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_SET_DIRECT_IO as libc::c_ulong,
1,
);
#[cfg(target_env = "musl")]
let ret = libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_SET_DIRECT_IO as libc::c_int,
1,
);
if let Err(e) = nix::errno::Errno::result(ret) {
#[cfg(target_env = "gnu")]
libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_CLR_FD as libc::c_ulong,
0,
);
#[cfg(target_env = "musl")]
libc::ioctl(
loop_dev.as_raw_fd() as libc::c_int,
LOOP_CLR_FD as libc::c_int,
0,
);
return Err(Error::Nix(e));
}
}
}
Ok(loop_dev)
}
pub fn umount_recursive(target: Option<&str>, flags: i32) -> Result<()> {
if let Some(target) = target {
let mut mounts = get_mounts(Some(prefix_filter(target.to_string())))?;
mounts.sort_by(|a, b| b.mountpoint.len().cmp(&a.mountpoint.len()));
for (index, target) in mounts.iter().enumerate() {
umount_all(Some(target.clone().mountpoint), flags)?;
}
};
Ok(())
}
fn umount_all(target: Option<String>, flags: i32) -> Result<()> {
if let Some(target) = target {
if let Err(e) = std::fs::metadata(target.clone()) {
if e.kind() == std::io::ErrorKind::NotFound {
return Ok(());
}
}
loop {
if let Err(e) = nix::mount::umount2(
&std::path::PathBuf::from(&target),
MntFlags::from_bits(flags).unwrap_or(MntFlags::empty()),
) {
if e == nix::errno::Errno::EINVAL {
return Ok(());
}
return Err(Error::from(e));
}
}
};
Ok(())
}
fn prefix_filter(prefix: String) -> impl Fn(MountInfo) -> bool {
move |m: MountInfo| {
if let Some(s) = (m.mountpoint.clone() + "/").strip_prefix(&(prefix.clone() + "/")) {
return false;
}
true
}
}
fn get_mounts<F>(f: Option<F>) -> Result<Vec<MountInfo>>
where
F: Fn(MountInfo) -> bool,
{
let mountinfo_path = "/proc/self/mountinfo";
let file = std::fs::File::open(mountinfo_path).map_err(io_error!(e, "io_error"))?;
let reader = std::io::BufReader::new(file);
let lines: Vec<String> = reader.lines().map_while(|line| line.ok()).collect();
let mount_points = lines
.into_iter()
.filter_map(|line| {
/*
See http://man7.org/linux/man-pages/man5/proc.5.html
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
(1) mount ID: unique identifier of the mount (may be reused after umount)
(2) parent ID: ID of parent (or of self for the top of the mount tree)
(3) major:minor: value of st_dev for files on filesystem
(4) root: root of the mount within the filesystem
(5) mount point: mount point relative to the process's root
(6) mount options: per mount options
(7) optional fields: zero or more fields of the form "tag[:value]"
(8) separator: marks the end of the optional fields
(9) filesystem type: name of filesystem of the form "type[.subtype]"
(10) mount source: filesystem specific information or "none"
(11) super options: per super block options
In other words, we have:
* 6 mandatory fields (1)..(6)
* 0 or more optional fields (7)
* a separator field (8)
* 3 mandatory fields (9)..(11)
*/
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
// mountpoint parse error.
return None;
}
// separator field
let mut sep_idx = parts.len() - 4;
// In Linux <= 3.9 mounting a cifs with spaces in a share
// name (like "//srv/My Docs") _may_ end up having a space
// in the last field of mountinfo (like "unc=//serv/My Docs").
// Since kernel 3.10-rc1, cifs option "unc=" is ignored,
// so spaces should not appear.
//
// Check for a separator, and work around the spaces bug
for i in (0..sep_idx).rev() {
if parts[i] == "-" {
sep_idx = i;
break;
}
if sep_idx == 5 {
// mountpoint parse error
return None;
}
}
let mut mount_info = MountInfo {
id: str::parse::<u32>(parts[0]).ok()?,
parent: str::parse::<u32>(parts[1]).ok()?,
major: 0,
minor: 0,
root: parts[3].to_string(),
mountpoint: parts[4].to_string(),
options: parts[5].to_string(),
optional: parts[6..sep_idx].join(" "),
fs_type: parts[sep_idx + 1].to_string(),
source: parts[sep_idx + 2].to_string(),
vfs_options: parts[sep_idx + 3].to_string(),
};
let major_minor = parts[2].splitn(3, ':').collect::<Vec<&str>>();
if major_minor.len() != 2 {
// mountpoint parse error.
return None;
}
mount_info.major = str::parse::<u32>(major_minor[0]).ok()?;
mount_info.minor = str::parse::<u32>(major_minor[1]).ok()?;
if let Some(f) = &f {
if f(mount_info.clone()) {
// skip this mountpoint. This mountpoint is not the container's mountpoint
return None;
}
}
Some(mount_info)
})
.collect::<Vec<MountInfo>>();
Ok(mount_points)
}
#[cfg(test)]
#[cfg(target_os = "linux")]
mod tests {
use super::*;
@ -723,4 +1096,66 @@ mod tests {
assert_eq!(options, expected_options);
}
}
#[cfg(feature = "async")]
#[test]
fn test_mount_rootfs_umount_recursive() {
let target = tempfile::tempdir().expect("create target dir error");
let lower1 = tempfile::tempdir().expect("create lower1 dir error");
let lower2 = tempfile::tempdir().expect("create lower2 dir error");
let upperdir = tempfile::tempdir().expect("create upperdir dir error");
let workdir = tempfile::tempdir().expect("create workdir dir error");
let options = vec![
"lowerdir=".to_string()
+ lower1.path().to_str().expect("lower1 path to str error")
+ ":"
+ lower2.path().to_str().expect("lower2 path to str error"),
"upperdir=".to_string()
+ upperdir
.path()
.to_str()
.expect("upperdir path to str error"),
"workdir=".to_string() + workdir.path().to_str().expect("workdir path to str error"),
];
// mount target.
let result = mount_rootfs(Some("overlay"), Some("overlay"), &options, &target);
assert!(result.is_ok());
let mut mountinfo = get_mounts(Some(prefix_filter(
target
.path()
.to_str()
.expect("target path to str error")
.to_string(),
)))
.expect("get_mounts error");
// make sure the target has been mounted.
assert_ne!(0, mountinfo.len());
// umount target.
let result = umount_recursive(target.path().to_str(), 0);
assert!(result.is_ok());
mountinfo = get_mounts(Some(prefix_filter(
target
.path()
.to_str()
.expect("target path to str error")
.to_string(),
)))
.expect("get_mounts error");
// make sure the target has been unmounted.
assert_eq!(0, mountinfo.len());
}
#[cfg(feature = "async")]
#[test]
fn test_setup_loop_dev() {
let path = tempfile::NamedTempFile::new().expect("cannot create tempfile");
let backing_file = path.path().to_str();
let params = LoopParams {
readonly: false,
auto_clear: true,
direct: false,
};
let result = setup_loop(backing_file, params);
assert!(result.is_ok());
}
}

View File

@ -13,9 +13,23 @@
See the License for the specific language governing permissions and
limitations under the License.
*/
#![allow(unused)]
#[cfg(windows)]
pub(crate) mod windows;
#[cfg(windows)]
#[allow(unused_imports)]
pub use crate::sys::windows::NamedPipeLogger;
use std::path::Path;
use crate::error::{Error, Result};
pub fn mount_rootfs(
fs_type: Option<&str>,
source: Option<&str>,
options: &[String],
target: impl AsRef<Path>,
) -> Result<()> {
// On on-Linux systems, we should return OK
// instead of exiting with an error.
Ok(())
}
pub fn umount_recursive(target: Option<&str>, flags: i32) -> Result<()> {
Ok(())
}

View File

@ -75,8 +75,7 @@ use crate::{
};
cfg_unix! {
use crate::{SOCKET_FD, parse_sockaddr};
use command_fds::{CommandFdExt, FdMapping};
use crate::parse_sockaddr;
use libc::{SIGCHLD, SIGINT, SIGPIPE, SIGTERM};
use nix::{
errno::Errno,
@ -252,6 +251,12 @@ where
Ok(())
}
_ => {
if flags.socket.is_empty() {
return Err(Error::InvalidArgument(String::from(
"Shim socket cannot be empty",
)));
}
#[cfg(windows)]
util::setup_debugger_event();
@ -265,13 +270,16 @@ where
}
let publisher = publisher::RemotePublisher::new(&ttrpc_address)?;
let task = shim.create_task_service(publisher);
let task_service = create_task(Arc::new(Box::new(task)));
let mut server = create_server(flags)?;
let task = Box::new(shim.create_task_service(publisher))
as Box<dyn containerd_shim_protos::Task + Send + Sync + 'static>;
let task_service = create_task(Arc::from(task));
let Some(mut server) = create_server_with_retry(&flags)? else {
signal_server_started();
return Ok(());
};
server = server.register_service(task_service);
server.start()?;
#[cfg(windows)]
signal_server_started();
info!("Shim successfully started, waiting for exit signal...");
@ -291,24 +299,46 @@ where
}
}
#[cfg(windows)]
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
fn create_server(_flags: args::Flags) -> Result<Server> {
fn create_server(flags: &args::Flags) -> Result<Server> {
start_listener(&flags.socket).map_err(io_error!(e, "starting listener"))?;
let mut server = Server::new();
#[cfg(unix)]
{
server = server.add_listener(SOCKET_FD)?;
}
#[cfg(windows)]
{
let address = socket_address(&_flags.address, &_flags.namespace, &_flags.id);
server = server.bind(address.as_str())?;
}
server = server.bind(&flags.socket)?;
Ok(server)
}
#[cfg(unix)]
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
fn create_server(flags: &args::Flags) -> Result<Server> {
use std::os::fd::IntoRawFd;
let listener = start_listener(&flags.socket).map_err(io_error!(e, "starting listener"))?;
let mut server = Server::new();
server = server.add_listener(listener.into_raw_fd())?;
Ok(server)
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
fn create_server_with_retry(flags: &args::Flags) -> Result<Option<Server>> {
// Really try to create a server.
let server = match create_server(flags) {
Ok(server) => server,
Err(Error::IoError { err, .. }) if err.kind() == std::io::ErrorKind::AddrInUse => {
// If the address is already in use then make sure it is up and running and return the address
// This allows for running a single shim per container scenarios
if let Ok(()) = wait_socket_working(&flags.socket, 5, 200) {
write_address(&flags.socket)?;
return Ok(None);
}
remove_socket(&flags.socket)?;
create_server(flags)?
}
Err(e) => return Err(e),
};
Ok(Some(server))
}
#[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))]
fn setup_signals(_config: &Config) -> Option<AppSignals> {
#[cfg(unix)]
@ -462,96 +492,47 @@ pub fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) -> Result
let cwd = env::current_dir().map_err(io_error!(e, ""))?;
let address = socket_address(&opts.address, &opts.namespace, grouping);
// Create socket and prepare listener.
// On Linux, We'll use `add_listener` when creating TTRPC server, on Windows the value isn't used hence the clippy allow
// (see note below about activation process for windows)
#[allow(clippy::let_unit_value)]
let _listener = match start_listener(&address) {
Ok(l) => l,
Err(e) => {
if e.kind() != std::io::ErrorKind::AddrInUse {
return Err(Error::IoError {
context: "".to_string(),
err: e,
});
};
// If the address is already in use then make sure it is up and running and return the address
// This allows for running a single shim per container scenarios
if let Ok(()) = wait_socket_working(&address, 5, 200) {
write_address(&address)?;
return Ok((0, address));
}
remove_socket(&address)?;
start_listener(&address).map_err(io_error!(e, ""))?
}
};
// Activation pattern comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70
// another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating
// the logic in Rust's 'command' for process creation. There is an issue in Rust to make it simplier to specify handle inheritence and this could
// be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented.
let mut command = Command::new(cmd);
command.current_dir(cwd).envs(vars).args([
"-namespace",
&opts.namespace,
"-id",
&opts.id,
"-address",
&opts.address,
]);
command
.current_dir(cwd)
.stdout(Stdio::piped())
.stdin(Stdio::null())
.stderr(Stdio::null())
.envs(vars)
.args([
"-namespace",
&opts.namespace,
"-id",
&opts.id,
"-address",
&opts.address,
"-socket",
&address,
]);
if opts.debug {
command.arg("-debug");
}
#[cfg(unix)]
{
command
.stdout(Stdio::null())
.stdin(Stdio::null())
.stderr(Stdio::null())
.fd_mappings(vec![FdMapping {
parent_fd: _listener.into(),
child_fd: SOCKET_FD,
}])?;
command
.spawn()
.map_err(io_error!(e, "spawn shim"))
.map(|child| {
// Ownership of `listener` has been passed to child.
(child.id(), address)
})
}
// On Windows Rust currently sets the `HANDLE_FLAG_INHERIT` flag to true when using Command::spawn.
// When a child process is spawned by another process (containerd) the child process inherits the parent's stdin, stdout, and stderr handles.
// Due to the HANDLE_FLAG_INHERIT flag being set to true this will cause containerd to hand until the child process closes the handles.
// As a workaround we can Disables inheritance on the io pipe handles.
// This workaround comes from https://github.com/rust-lang/rust/issues/54760#issuecomment-1045940560
#[cfg(windows)]
{
// Activation pattern for Windows comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70
// another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating
// the logic in Rust's 'command' for process creation. There is an issue in Rust to make it simplier to specify handle inheritence and this could
// be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented.
let (mut reader, writer) = os_pipe::pipe().map_err(io_error!(e, "create pipe"))?;
let stdio_writer = writer.try_clone().unwrap();
disable_handle_inheritance();
command
.stdout(stdio_writer)
.stdin(Stdio::null())
.stderr(Stdio::null());
let mut child = command.spawn().map_err(io_error!(e, "spawn shim"))?;
// On Windows Rust currently sets the `HANDLE_FLAG_INHERIT` flag to true when using Command::spawn.
// When a child process is spawned by another process (containerd) the child process inherits the parent's stdin, stdout, and stderr handles.
// Due to the HANDLE_FLAG_INHERIT flag being set to true this will cause containerd to hand until the child process closes the handles.
// As a workaround we can Disables inheritance on the io pipe handles.
// This workaround comes from https://github.com/rust-lang/rust/issues/54760#issuecomment-1045940560
disable_handle_inheritance();
command
.spawn()
.map_err(io_error!(e, "spawn shim"))
.map(|child| {
// IMPORTANT: we must drop the writer and command to close up handles before we copy the reader to stderr
// AND the shim Start method must NOT write to stdout/stderr
drop(writer);
drop(command);
io::copy(&mut reader, &mut io::stderr()).unwrap();
(child.id(), address)
})
}
let mut reader = child.stdout.take().unwrap();
std::io::copy(&mut reader, &mut std::io::stderr()).unwrap();
Ok((child.id(), address))
}
#[cfg(windows)]
@ -590,6 +571,19 @@ fn signal_server_started() {
}
}
// This closes the stdout handle which was mapped to the stderr on the first invocation of the shim.
// This releases first process which will give containerd the address of the namedpipe.
#[cfg(unix)]
fn signal_server_started() {
use libc::{dup2, STDERR_FILENO, STDOUT_FILENO};
unsafe {
if dup2(STDERR_FILENO, STDOUT_FILENO) < 0 {
panic!("Error closing pipe: {}", std::io::Error::last_os_error())
}
}
}
#[cfg(test)]
mod tests {
use std::thread;

View File

@ -188,7 +188,8 @@ mod tests {
use std::os::unix::{io::AsRawFd, net::UnixListener};
let listener = UnixListener::bind(server_address).unwrap();
listener.set_nonblocking(true).unwrap();
let service = client::create_events(Arc::new(Box::new(FakeServer {})));
let task = Box::new(FakeServer {}) as Box<dyn Events + Send + Sync>;
let service = client::create_events(task.into());
let server = Server::new()
.add_listener(listener.as_raw_fd())
.unwrap()
@ -199,7 +200,7 @@ mod tests {
#[cfg(windows)]
{
let service = client::create_events(Arc::new(Box::new(FakeServer {})));
let service = client::create_events(Arc::new(FakeServer {}));
Server::new()
.bind(server_address)

View File

@ -1,17 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
pub(crate) mod named_pipe_logger;
pub use named_pipe_logger::NamedPipeLogger;

View File

@ -1,255 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use std::{
io::{self, Write},
sync::{Arc, Mutex},
thread,
};
use log::{Metadata, Record};
use mio::{windows::NamedPipe, Events, Interest, Poll, Token};
use crate::logger;
pub struct NamedPipeLogger {
current_connection: Arc<Mutex<NamedPipe>>,
}
impl NamedPipeLogger {
pub fn new(namespace: &str, id: &str) -> Result<NamedPipeLogger, io::Error> {
let pipe_name = format!("\\\\.\\pipe\\containerd-shim-{}-{}-log", namespace, id);
let mut pipe_server = NamedPipe::new(pipe_name).unwrap();
let mut poll = Poll::new().unwrap();
poll.registry()
.register(
&mut pipe_server,
Token(0),
Interest::READABLE | Interest::WRITABLE,
)
.unwrap();
let current_connection = Arc::new(Mutex::new(pipe_server));
let server_connection = current_connection.clone();
let logger = NamedPipeLogger { current_connection };
thread::spawn(move || {
let mut events = Events::with_capacity(128);
loop {
poll.poll(&mut events, None).unwrap();
for event in events.iter() {
if event.is_writable() {
match server_connection.lock().unwrap().connect() {
Ok(()) => {}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {
// this would block just keep processing
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// this would block just keep processing
}
Err(e) => {
panic!("Error connecting to client: {}", e);
}
};
}
if event.is_readable() {
server_connection.lock().unwrap().disconnect().unwrap();
}
}
}
});
Ok(logger)
}
}
impl log::Log for NamedPipeLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= log::max_level()
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
// collect key_values but don't fail if error parsing
let mut writer = logger::SimpleWriteVistor::new();
let _ = record.key_values().visit(&mut writer);
let message = format!(
"time=\"{}\" level={}{} msg=\"{}\"\n",
logger::rfc3339_formated(),
record.level().as_str().to_lowercase(),
writer.as_str(),
record.args()
);
match self
.current_connection
.lock()
.unwrap()
.write_all(message.as_bytes())
{
Ok(_) => {}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {
// this would block just keep processing
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// this would block just keep processing
}
Err(e) if e.raw_os_error() == Some(536) => {
// no client connected
}
Err(e) if e.raw_os_error() == Some(232) => {
// client was connected but is in process of shutting down
}
Err(e) => {
panic!("Error writing to client: {}", e)
}
}
}
}
fn flush(&self) {
_ = self.current_connection.lock().unwrap().flush();
}
}
#[cfg(test)]
mod tests {
use std::{
fs::OpenOptions,
io::Read,
os::windows::{
fs::OpenOptionsExt,
io::{FromRawHandle, IntoRawHandle},
prelude::AsRawHandle,
},
time::Duration,
};
use log::{Log, Record};
use mio::{windows::NamedPipe, Events, Interest, Poll, Token};
use windows_sys::Win32::{
Foundation::ERROR_PIPE_NOT_CONNECTED, Storage::FileSystem::FILE_FLAG_OVERLAPPED,
};
use super::*;
#[test]
fn test_namedpipe_log_can_write_before_client_connected() {
let ns = "test".to_string();
let id = "notconnected".to_string();
let logger = NamedPipeLogger::new(&ns, &id).unwrap();
// test can write before a reader is connected (should succeed but the messages will be dropped)
log::set_max_level(log::LevelFilter::Info);
let record = Record::builder()
.level(log::Level::Info)
.line(Some(1))
.file(Some("sample file"))
.args(format_args!("hello"))
.build();
logger.log(&record);
logger.flush();
}
#[test]
fn test_namedpipe_log() {
use std::fs::File;
let ns = "test".to_string();
let id = "clients".to_string();
let pipe_name = format!("\\\\.\\pipe\\containerd-shim-{}-{}-log", ns, id);
let logger = NamedPipeLogger::new(&ns, &id).unwrap();
let mut client = create_client(pipe_name.as_str());
log::set_max_level(log::LevelFilter::Info);
let kvs: &[(&str, i32)] = &[("key", 1), ("b", 2)];
let record = Record::builder()
.level(log::Level::Info)
.line(Some(1))
.key_values(&kvs)
.args(format_args!("hello"))
.build();
logger.log(&record);
logger.flush();
let buf = read_message(&mut client, 73);
let message = std::str::from_utf8(&buf).unwrap();
assert!(message.starts_with("time=\""), "message was: {:?}", message);
assert!(
message.contains("level=info key=\"1\" b=\"2\" msg=\"hello\"\n"),
"message was: {:?}",
message
);
// test that we can reconnect after a reader disconnects
// we need to get the raw handle and drop that as well to force full disconnect
// and give a few milliseconds for the disconnect to happen
println!("dropping client");
let handle = client.as_raw_handle();
drop(client);
let f = unsafe { File::from_raw_handle(handle) };
drop(f);
std::thread::sleep(Duration::from_millis(100));
let mut client2 = create_client(pipe_name.as_str());
logger.log(&record);
logger.flush();
read_message(&mut client2, 51);
}
fn read_message(client: &mut NamedPipe, length: usize) -> Vec<u8> {
let mut poll = Poll::new().unwrap();
poll.registry()
.register(client, Token(1), Interest::READABLE)
.unwrap();
let mut events = Events::with_capacity(128);
let mut buf = vec![0; length];
loop {
poll.poll(&mut events, Some(Duration::from_millis(10)))
.unwrap();
match client.read(&mut buf) {
Ok(0) => {
panic!("Read no bytes from pipe")
}
Ok(_) => {
break;
}
Err(e) if e.raw_os_error() == Some(ERROR_PIPE_NOT_CONNECTED as i32) => {
panic!("not connected to the pipe");
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => panic!("Error reading from pipe: {}", e),
}
}
buf.to_vec()
}
fn create_client(pipe_name: &str) -> mio::windows::NamedPipe {
let mut opts = OpenOptions::new();
opts.read(true)
.write(true)
.custom_flags(FILE_FLAG_OVERLAPPED);
let file = opts.open(pipe_name).unwrap();
unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) }
}
}

View File

@ -26,13 +26,13 @@ prost-types.workspace = true
serde.workspace = true
thiserror.workspace = true
tonic.workspace = true
tokio = { workspace = true, features = ["sync"] }
tokio-stream = "0.1.8"
[dev-dependencies]
futures.workspace = true
log.workspace = true
simple_logger.workspace = true
tokio = { workspace = true, features = ["sync"] }
[build-dependencies]
tonic-build.workspace = true

169
deny.toml
View File

@ -9,6 +9,11 @@
# The values provided in this template are the default values that will be used
# when any section or field is not specified in your own configuration
# Root options
# The graph table configures how the dependency graph is constructed and thus
# which crates the checks are performed against
[graph]
# If 1 or more target triples (and optionally, target_features) are specified,
# only the specified targets will be checked when running `cargo deny check`.
# This means, if a particular package is only ever used as a target specific
@ -20,82 +25,78 @@
targets = [
# The triple can be any string, but only the target triples built in to
# rustc (as of 1.40) can be checked against actual config expressions
#{ triple = "x86_64-unknown-linux-musl" },
#"x86_64-unknown-linux-musl",
# You can also specify which target_features you promise are enabled for a
# particular target. target_features are currently not validated against
# the actual valid features supported by the target architecture.
#{ triple = "wasm32-unknown-unknown", features = ["atomics"] },
]
# When creating the dependency graph used as the source of truth when checks are
# executed, this field can be used to prune crates from the graph, removing them
# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate
# is pruned from the graph, all of its dependencies will also be pruned unless
# they are connected to another crate in the graph that hasn't been pruned,
# so it should be used with care. The identifiers are [Package ID Specifications]
# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html)
#exclude = []
# If true, metadata will be collected with `--all-features`. Note that this can't
# be toggled off if true, if you want to conditionally enable `--all-features` it
# is recommended to pass `--all-features` on the cmd line instead
all-features = false
# If true, metadata will be collected with `--no-default-features`. The same
# caveat with `all-features` applies
no-default-features = false
# If set, these feature will be enabled when collecting metadata. If `--features`
# is specified on the cmd line they will take precedence over this option.
#features = []
# The output table provides options for how/if diagnostics are outputted
[output]
# When outputting inclusion graphs in diagnostics that include features, this
# option can be used to specify the depth at which feature edges will be added.
# This option is included since the graphs can be quite large and the addition
# of features from the crate(s) to all of the graph roots can be far too verbose.
# This option can be overridden via `--feature-depth` on the cmd line
feature-depth = 1
# This section is considered when running `cargo deny check advisories`
# More documentation for the advisories section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
[advisories]
# The path where the advisory database is cloned/fetched into
db-path = "~/.cargo/advisory-db"
# The path where the advisory databases are cloned/fetched into
#db-path = "$CARGO_HOME/advisory-dbs"
# The url(s) of the advisory databases to use
db-urls = ["https://github.com/rustsec/advisory-db"]
# The lint level for security vulnerabilities
vulnerability = "deny"
# The lint level for unmaintained crates
unmaintained = "warn"
# The lint level for crates that have been yanked from their source registry
yanked = "warn"
# The lint level for crates with security notices. Note that as of
# 2019-12-17 there are no security notice advisories in
# https://github.com/rustsec/advisory-db
notice = "warn"
#db-urls = ["https://github.com/rustsec/advisory-db"]
# A list of advisory IDs to ignore. Note that ignored advisories will still
# output a note when they are encountered.
ignore = [
"RUSTSEC-2024-0370", # proc-macro-error is unmaintained
"RUSTSEC-2024-0437", # Crash due to uncontrolled recursion in protobuf crate
#"RUSTSEC-0000-0000",
#{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" },
#"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish
#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },
]
# Threshold for security vulnerabilities, any vulnerability with a CVSS score
# lower than the range specified will be ignored. Note that ignored advisories
# will still output a note when they are encountered.
# * None - CVSS Score 0.0
# * Low - CVSS Score 0.1 - 3.9
# * Medium - CVSS Score 4.0 - 6.9
# * High - CVSS Score 7.0 - 8.9
# * Critical - CVSS Score 9.0 - 10.0
#severity-threshold =
# If this is true, then cargo deny will use the git executable to fetch advisory database.
# If this is false, then it uses a built-in git library.
# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support.
# See Git Authentication for more information about setting up git authentication.
#git-fetch-with-cli = true
# This section is considered when running `cargo deny check licenses`
# More documentation for the licenses section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
[licenses]
# The lint level for crates which do not have a detectable license
unlicensed = "deny"
# List of explictly allowed licenses
# List of explicitly allowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
allow = [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"Unicode-3.0",
#"Apache-2.0 WITH LLVM-exception",
]
# List of explictly disallowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
deny = [
#"Nokia",
]
# Lint level for licenses considered copyleft
copyleft = "warn"
# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses
# * both - The license will be approved if it is both OSI-approved *AND* FSF
# * either - The license will be approved if it is either OSI-approved *OR* FSF
# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF
# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved
# * neither - This predicate is ignored and the default lint level is used
allow-osi-fsf-free = "neither"
# Lint level used when no other predicates are matched
# 1. License isn't in the allow or deny lists
# 2. License isn't copyleft
# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither"
default = "deny"
# The confidence threshold for detecting a license from license text.
# The higher the value, the more closely the license text must be to the
# canonical license text of a valid SPDX license file.
@ -106,6 +107,7 @@ confidence-threshold = 0.8
exceptions = [
# Each entry is the crate and version constraint, and its specific allow
# list
#{ allow = ["Zlib"], crate = "adler32" },
{ allow = ["Unicode-DFS-2016"], name = "unicode-ident", version = "*" },
]
@ -113,10 +115,8 @@ exceptions = [
# adding a clarification entry for it allows you to manually specify the
# licensing information
#[[licenses.clarify]]
# The name of the crate the clarification applies to
#name = "ring"
# The optional version constraint for the crate
#version = "*"
# The package spec the clarification applies to
#crate = "ring"
# The SPDX expression for the license requirements of the crate
#expression = "MIT AND ISC AND OpenSSL"
# One or more files in the crate's source used as the "source of truth" for
@ -125,13 +125,15 @@ exceptions = [
# and the crate will be checked normally, which may produce warnings or errors
# depending on the rest of your configuration
#license-files = [
# Each entry is a crate relative path, and the (opaque) hash of its contents
#{ path = "LICENSE", hash = 0xbd0eed23 }
# Each entry is a crate relative path, and the (opaque) hash of its contents
#{ path = "LICENSE", hash = 0xbd0eed23 }
#]
[licenses.private]
# If true, ignores workspace crates that aren't published, or are only
# published to private registries
# published to private registries.
# To see how to mark a crate as unpublished (to the official registry),
# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field.
ignore = false
# One or more private registries that you might publish crates to, if a crate
# is only published to private registries, and ignore is true, the crate will
@ -154,30 +156,63 @@ wildcards = "allow"
# * simplest-path - The path to the version with the fewest edges is highlighted
# * all - Both lowest-version and simplest-path are used
highlight = "all"
# The default lint level for `default` features for crates that are members of
# the workspace that is being checked. This can be overridden by allowing/denying
# `default` on a crate-by-crate basis if desired.
workspace-default-features = "allow"
# The default lint level for `default` features for external crates that are not
# members of the workspace. This can be overridden by allowing/denying `default`
# on a crate-by-crate basis if desired.
external-default-features = "allow"
# List of crates that are allowed. Use with care!
allow = [
#{ name = "ansi_term", version = "=0.11.0" },
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" },
]
# List of crates to deny
deny = [
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#{ name = "ansi_term", version = "=0.11.0" },
#
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" },
# Wrapper crates can optionally be specified to allow the crate when it
# is a direct dependency of the otherwise banned crate
#{ name = "ansi_term", version = "=0.11.0", wrappers = [] },
#{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] },
]
# List of features to allow/deny
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#[[bans.features]]
#crate = "reqwest"
# Features to not allow
#deny = ["json"]
# Features to allow
#allow = [
# "rustls",
# "__rustls",
# "__tls",
# "hyper-rustls",
# "rustls",
# "rustls-pemfile",
# "rustls-tls-webpki-roots",
# "tokio-rustls",
# "webpki-roots",
#]
# If true, the allowed features must exactly match the enabled feature set. If
# this is set there is no point setting `deny`
#exact = true
# Certain crates/versions that will be skipped when doing duplicate detection.
skip = [
#{ name = "ansi_term", version = "=0.11.0" },
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" },
]
# Similarly to `skip` allows you to skip certain crates during duplicate
# detection. Unlike skip, it also includes the entire tree of transitive
# dependencies starting at the specified crate, up to a certain depth, which is
# by default infinite
# by default infinite.
skip-tree = [
#{ name = "ansi_term", version = "=0.11.0", depth = 20 },
#"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies
#{ crate = "ansi_term@0.11.0", depth = 20 },
]
# This section is considered when running `cargo deny check sources`.
@ -195,3 +230,11 @@ unknown-git = "warn"
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
# List of URLs for allowed Git repositories
allow-git = []
[sources.allow-org]
# github.com organizations to allow git sources for
github = []
# gitlab.com organizations to allow git sources for
gitlab = []
# bitbucket.org organizations to allow git sources for
bitbucket = []

View File

@ -8,7 +8,7 @@
# For each crate, the script expects a text file named `rsync.txt` in the crate's directory.
# The file should contain a list of proto files that should be synchronized from containerd.
VERSION="v2.0.1"
VERSION="v2.1.1"
set -x