diff --git a/testcontainers/src/core/containers/async_container.rs b/testcontainers/src/core/containers/async_container.rs index f618d74a..d819de32 100644 --- a/testcontainers/src/core/containers/async_container.rs +++ b/testcontainers/src/core/containers/async_container.rs @@ -421,12 +421,7 @@ where mod tests { use tokio::io::AsyncBufReadExt; - use crate::{ - core::{ContainerPort, ContainerState, ExecCommand, WaitFor}, - images::generic::GenericImage, - runners::AsyncRunner, - Image, - }; + use crate::{images::generic::GenericImage, runners::AsyncRunner}; #[tokio::test] async fn async_logs_are_accessible() -> anyhow::Result<()> { diff --git a/testcontainers/src/core/containers/request.rs b/testcontainers/src/core/containers/request.rs index c993dd81..6e74d920 100644 --- a/testcontainers/src/core/containers/request.rs +++ b/testcontainers/src/core/containers/request.rs @@ -41,6 +41,64 @@ pub struct ContainerRequest { pub(crate) startup_timeout: Option, pub(crate) working_dir: Option, pub(crate) log_consumers: Vec>, + + /// The length of a CPU period in microseconds. Default is 100000, this configures how + /// CFS will schedule the threads for this container. Normally you don't adjust this and + /// just set the CPU quota or nano CPUs. You might want to set this if you want to increase + /// or reduce context-switching the container is subjected to. + pub(crate) cpu_period: Option, + + /// Microseconds of CPU time that the container can get in a CPU period. + /// Most users will want to set CPU quota to their desired CPU count * 100000. + /// For example, to limit a container to 2 CPUs, set CPU quota to 200000. + /// This is based on the default CPU period of 100000. + /// If CPU quota is set to 0, the container will not be limited. + pub(crate) cpu_quota: Option, + + /// The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. + pub(crate) cpu_realtime_period: Option, + + /// The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. + pub(crate) cpu_realtime_runtime: Option, + + /// CPUs in which to allow execution (e.g., `0-3`, `0,1`). + /// Core pinning should help with performance consistency and context switching in some cases. + pub(crate) cpuset_cpus: Option, + + /// CPU quota in units of 10-9 CPUs. This is basically what the --cpus flag turns into, but the + /// raw value is denominated in billionths of a CPU. cpu_period and cpu_quota give you more control over the scheduler. + pub nano_cpus: Option, + + /// Memory limit for the container, the _minimum_ is 6 MiB. + /// This is the same as `HostConfig::memory`. + pub(crate) memory: Option, + + /// Memory reservation, soft limit. Analogous to the JVM's `-Xms` option. + /// The _minimum_ is 6 MiB. + /// This is the same as `HostConfig::memory_reservation`. + pub(crate) memory_reservation: Option, + + /// Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. + /// Same 6 MiB minimum as `memory`. + pub memory_swap: Option, + + /// Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + pub memory_swappiness: Option, + + /// Disable OOM Killer for the container. This will not do anything unless -m (memory limit, cf. memory on this struct) is set. + /// You can disable OOM-killer by writing "1" to memory.oom_control file, as: + /// ```ignore + /// echo 1 > memory.oom_control + /// ``` + /// This operation is only allowed to the top cgroup of sub-hierarchy. + /// If OOM-killer is disabled, tasks under cgroup will hang/sleep + /// in memory cgroup's OOM-waitqueue when they request accountable memory. + /// https://lwn.net/Articles/432224/ + pub oom_kill_disable: Option, + + /// Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. + pub pids_limit: Option, + #[cfg(feature = "reusable-containers")] pub(crate) reuse: crate::ReuseDirective, } @@ -187,6 +245,42 @@ impl ContainerRequest { self.working_dir.as_deref() } + pub fn cpu_period(&self) -> Option { + self.cpu_period + } + pub fn cpu_quota(&self) -> Option { + self.cpu_quota + } + pub fn cpu_realtime_period(&self) -> Option { + self.cpu_realtime_period + } + pub fn cpu_realtime_runtime(&self) -> Option { + self.cpu_realtime_runtime + } + pub fn cpuset_cpus(&self) -> Option<&str> { + self.cpuset_cpus.as_deref() + } + pub fn nano_cpus(&self) -> Option { + self.nano_cpus + } + pub fn memory(&self) -> Option { + self.memory + } + pub fn memory_reservation(&self) -> Option { + self.memory_reservation + } + pub fn memory_swap(&self) -> Option { + self.memory_swap + } + pub fn memory_swappiness(&self) -> Option { + self.memory_swappiness + } + pub fn oom_kill_disable(&self) -> Option { + self.oom_kill_disable + } + pub fn pids_limit(&self) -> Option { + self.pids_limit + } /// Indicates that the container will not be stopped when it is dropped #[cfg(feature = "reusable-containers")] pub fn reuse(&self) -> crate::ReuseDirective { @@ -219,6 +313,18 @@ impl From for ContainerRequest { startup_timeout: None, working_dir: None, log_consumers: vec![], + cpu_period: None, + cpu_quota: None, + cpu_realtime_period: None, + cpu_realtime_runtime: None, + cpuset_cpus: None, + nano_cpus: None, + memory: None, + memory_reservation: None, + memory_swap: None, + memory_swappiness: None, + oom_kill_disable: None, + pids_limit: None, #[cfg(feature = "reusable-containers")] reuse: crate::ReuseDirective::Never, } @@ -265,8 +371,12 @@ impl Debug for ContainerRequest { .field("cgroupns_mode", &self.cgroupns_mode) .field("userns_mode", &self.userns_mode) .field("startup_timeout", &self.startup_timeout) - .field("working_dir", &self.working_dir); - + .field("working_dir", &self.working_dir) + .field("cpu_period", &self.cpu_period) + .field("cpu_quota", &self.cpu_quota) + .field("cpu_realtime_period", &self.cpu_realtime_period) + .field("cpu_realtime_runtime", &self.cpu_realtime_runtime) + .field("cpuset_cpus", &self.cpuset_cpus); #[cfg(feature = "reusable-containers")] repr.field("reusable", &self.reuse); diff --git a/testcontainers/src/core/image/image_ext.rs b/testcontainers/src/core/image/image_ext.rs index c7497f37..14e6105c 100644 --- a/testcontainers/src/core/image/image_ext.rs +++ b/testcontainers/src/core/image/image_ext.rs @@ -157,6 +157,66 @@ pub trait ImageExt { /// Allows to follow the container logs for the whole lifecycle of the container, starting from the creation. fn with_log_consumer(self, log_consumer: impl LogConsumer + 'static) -> ContainerRequest; + /// Sets the CPU period for the container. + /// The default is defined by the underlying image. + /// The length of a CPU period in microseconds. + /// https://docs.docker.com/engine/reference/commandline/run/#cpu-period + fn with_cpu_period(self, cpu_period: impl Into) -> ContainerRequest; + + /// Sets the CPU quota for the container. + /// The default is defined by the underlying image. + /// Microseconds of CPU time that the container can get in a CPU period. + /// https://docs.docker.com/engine/reference/commandline/run/#cpu-quota + /// Most users will want to set CPU quota to their desired CPU count * 100000. + /// For example, to limit a container to 2 CPUs, set CPU quota to 200000. + /// This is based on the default CPU period of 100000. + /// If CPU quota is set to 0, the container will not be limited. + fn with_cpu_quota(self, cpu_quota: impl Into) -> ContainerRequest; + + /// Sets the CPU realtime period for the container. + /// The default is defined by the underlying image. + /// The length of a CPU real-time period in microseconds. + fn with_cpu_realtime_period(self, cpu_realtime_period: impl Into) -> ContainerRequest; + + /// Sets the CPU realtime runtime for the container. + fn with_cpu_realtime_runtime(self, cpu_realtime_runtime: impl Into) + -> ContainerRequest; + + /// Sets the CPUs in which to allow execution (e.g., `0-3`, `0,1`). + /// Core pinning should help with performance consistency and context switching in some cases. + /// The default is defined by the underlying image. + fn with_cpuset_cpus(self, cpuset_cpus: impl Into) -> ContainerRequest; + + /// Memory limit for the container, the _minimum_ is 6 MiB. + /// This is the same as `HostConfig::memory`. + fn with_memory(self, bytes: i64) -> ContainerRequest; + + /// Memory reservation, soft limit. Analogous to the JVM's `-Xms` option. + /// The _minimum_ is 6 MiB. + /// This is the same as `HostConfig::memory_reservation`. + fn with_memory_reservation(self, bytes: i64) -> ContainerRequest; + + /// Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. + /// Same 6 MiB minimum as `memory`. I do not know why everything is i64. + fn with_memory_swap(self, bytes: i64) -> ContainerRequest; + + /// Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + fn with_memory_swappiness(self, swappiness: i64) -> ContainerRequest; + + /// Disable OOM Killer for the container. This will not do anything unless -m (memory limit, cf. memory on this struct) is set. + /// You can disable OOM-killer by writing "1" to memory.oom_control file, as: + /// ```ignore + /// echo 1 > memory.oom_control + /// ``` + /// This operation is only allowed to the top cgroup of sub-hierarchy. + /// If OOM-killer is disabled, tasks under cgroup will hang/sleep + /// in memory cgroup's OOM-waitqueue when they request accountable memory. + /// https://lwn.net/Articles/432224/ + fn with_oom_kill_disable(self, disable: bool) -> ContainerRequest; + + /// Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. + fn with_pids_limit(self, limit: i64) -> ContainerRequest; + /// Flag the container as being exempt from the default `testcontainers` remove-on-drop lifecycle, /// indicating that the container should be kept running, and that executions with the same configuration /// reuse it instead of starting a "fresh" container instance. @@ -373,6 +433,97 @@ impl>, I: Image> ImageExt for RI { container_req } + fn with_cpu_period(self, cpu_period: impl Into) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + cpu_period: Some(cpu_period.into()), + ..container_req + } + } + + fn with_cpu_quota(self, cpu_quota: impl Into) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + cpu_quota: Some(cpu_quota.into()), + ..container_req + } + } + + fn with_cpu_realtime_period(self, cpu_realtime_period: impl Into) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + cpu_realtime_period: Some(cpu_realtime_period.into()), + ..container_req + } + } + + fn with_cpu_realtime_runtime( + self, + cpu_realtime_runtime: impl Into, + ) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + cpu_realtime_runtime: Some(cpu_realtime_runtime.into()), + ..container_req + } + } + + fn with_cpuset_cpus(self, cpuset_cpus: impl Into) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + cpuset_cpus: Some(cpuset_cpus.into()), + ..container_req + } + } + + fn with_memory(self, bytes: i64) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + memory: Some(bytes), + ..container_req + } + } + + fn with_memory_reservation(self, bytes: i64) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + memory_reservation: Some(bytes), + ..container_req + } + } + + fn with_memory_swap(self, bytes: i64) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + memory_swap: Some(bytes), + ..container_req + } + } + + fn with_memory_swappiness(self, swappiness: i64) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + memory_swappiness: Some(swappiness), + ..container_req + } + } + + fn with_oom_kill_disable(self, disable: bool) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + oom_kill_disable: Some(disable), + ..container_req + } + } + + fn with_pids_limit(self, limit: i64) -> ContainerRequest { + let container_req = self.into(); + ContainerRequest { + pids_limit: Some(limit), + ..container_req + } + } + #[cfg(feature = "reusable-containers")] fn with_reuse(self, reuse: ReuseDirective) -> ContainerRequest { ContainerRequest { diff --git a/testcontainers/src/runners/async_runner.rs b/testcontainers/src/runners/async_runner.rs index 0c81030e..59f99191 100644 --- a/testcontainers/src/runners/async_runner.rs +++ b/testcontainers/src/runners/async_runner.rs @@ -135,7 +135,6 @@ where } } } - let mut config: Config = Config { image: Some(container_req.descriptor()), labels: Some(labels), @@ -152,6 +151,101 @@ where ..Default::default() }; + // CPU period + if let Some(cpu_period) = container_req.cpu_period() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.cpu_period = Some(cpu_period); + host_config + }); + } + // CPU quota + if let Some(cpu_quota) = container_req.cpu_quota() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.cpu_quota = Some(cpu_quota); + host_config + }); + } + + // cpu_realtime_period + if let Some(cpu_realtime_period) = container_req.cpu_realtime_period() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.cpu_realtime_period = Some(cpu_realtime_period); + host_config + }); + } + + // cpu_realtime_runtime + if let Some(cpu_realtime_runtime) = container_req.cpu_realtime_runtime() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.cpu_realtime_runtime = Some(cpu_realtime_runtime); + host_config + }); + } + + // cpuset_cpus + if let Some(cpuset_cpus) = container_req.cpuset_cpus() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.cpuset_cpus = Some(cpuset_cpus.to_owned()); + host_config + }); + } + + // nano_cpus + if let Some(nano_cpus) = container_req.nano_cpus() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.nano_cpus = Some(nano_cpus); + host_config + }); + } + + // memory + if let Some(bytes) = container_req.memory() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.memory = Some(bytes); + host_config + }); + } + + // memory reservation + if let Some(bytes) = container_req.memory_reservation() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.memory_reservation = Some(bytes); + host_config + }); + } + + // memory swap + if let Some(bytes) = container_req.memory_swap() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.memory_swap = Some(bytes); + host_config + }); + } + + // memory swappiness + if let Some(swappiness) = container_req.memory_swappiness() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.memory_swappiness = Some(swappiness); + host_config + }); + } + + // oom_kill_disable + if let Some(oom_kill_disable) = container_req.oom_kill_disable() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.oom_kill_disable = Some(oom_kill_disable); + host_config + }); + } + + // pids_limit + if let Some(pids_limit) = container_req.pids_limit() { + config.host_config = config.host_config.map(|mut host_config| { + host_config.pids_limit = Some(pids_limit); + host_config + }); + } + // shared memory if let Some(bytes) = container_req.shm_size() { config.host_config = config.host_config.map(|mut host_config| { @@ -289,8 +383,7 @@ where res => res, }?; - let copy_to_sources: Vec<&CopyToContainer> = - container_req.copy_to_sources().map(Into::into).collect(); + let copy_to_sources: Vec<&CopyToContainer> = container_req.copy_to_sources().collect(); for copy_to_source in copy_to_sources { client