diff --git a/Cargo.lock b/Cargo.lock index 5fe88a4a..d583e72c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1363,7 +1363,7 @@ name = "cordyceps" version = "0.3.2" source = "git+https://github.com/hawkw/mycelium.git?rev=101a4abaa19afdd131b334a16d92c9fb4909c064#101a4abaa19afdd131b334a16d92c9fb4909c064" dependencies = [ - "loom", + "loom 0.5.6", "tracing 0.1.37", ] @@ -1372,7 +1372,7 @@ name = "cordyceps" version = "0.3.2" source = "git+https://github.com/hawkw/mycelium#1f125194902cd4970b72eab0aa1d85d1b6ec1489" dependencies = [ - "loom", + "loom 0.5.6", "tracing 0.1.37", ] @@ -4677,6 +4677,22 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "loom" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86a17963e5073acf8d3e2637402657c6b467218f36fe10d696b3e1095ae019bf" +dependencies = [ + "cfg-if 1.0.0", + "generator", + "pin-utils", + "scoped-tls", + "serde", + "serde_json", + "tracing 0.1.37", + "tracing-subscriber", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4732,6 +4748,19 @@ dependencies = [ "tracing 0.1.37", ] +[[package]] +name = "maitake-sync" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68d76dcfa3b14b75b60ff187f5df11c10fa76f227741a42070f3d36215756f24" +dependencies = [ + "cordyceps 0.3.2 (git+https://github.com/hawkw/mycelium.git?rev=101a4abaa19afdd131b334a16d92c9fb4909c064)", + "loom 0.7.0", + "mycelium-bitfield 0.1.3 (git+https://github.com/hawkw/mycelium.git?rev=101a4abaa19afdd131b334a16d92c9fb4909c064)", + "pin-project", + "portable-atomic", +] + [[package]] name = "manganese" version = "0.1.0" @@ -5089,8 +5118,13 @@ dependencies = [ name = "mnemos-bitslab" version = "0.1.0" dependencies = [ + "futures", + "loom 0.7.0", + "maitake-sync", "portable-atomic", "proptest", + "tracing 0.1.37", + "tracing-subscriber", ] [[package]] @@ -5271,7 +5305,7 @@ version = "0.1.0" source = "git+https://github.com/hawkw/mycelium.git?rev=101a4abaa19afdd131b334a16d92c9fb4909c064#101a4abaa19afdd131b334a16d92c9fb4909c064" dependencies = [ "cordyceps 0.3.2 (git+https://github.com/hawkw/mycelium.git?rev=101a4abaa19afdd131b334a16d92c9fb4909c064)", - "loom", + "loom 0.5.6", "mycelium-bitfield 0.1.3 (git+https://github.com/hawkw/mycelium.git?rev=101a4abaa19afdd131b334a16d92c9fb4909c064)", "tracing 0.2.0", ] @@ -5282,7 +5316,7 @@ version = "0.1.0" source = "git+https://github.com/hawkw/mycelium#1f125194902cd4970b72eab0aa1d85d1b6ec1489" dependencies = [ "cordyceps 0.3.2 (git+https://github.com/hawkw/mycelium)", - "loom", + "loom 0.5.6", "mycelium-bitfield 0.1.3 (git+https://github.com/hawkw/mycelium)", "tracing 0.2.0", ] diff --git a/justfile b/justfile index 388d7338..071f1829 100644 --- a/justfile +++ b/justfile @@ -28,6 +28,8 @@ profile := 'release' _cargo := "cargo" + if toolchain != "" { " +" + toolchain } else { "" } +_testcmd := if no-nextest != "" { "test" } else { "nextest run" } + _rustflags := env_var_or_default("RUSTFLAGS", "") # If we're running in Github Actions and cargo-action-fmt is installed, then add @@ -119,7 +121,7 @@ test *ARGS="--all-features": (nextest "run " + ARGS) # run a Nextest command nextest *ARGS: (_get-cargo-command "nextest" "cargo-nextest" no-nextest) - {{ _cargo }} nextest {{ ARGS }} + {{ _cargo }} {{ _testcmd }} {{ ARGS }} # run rustfmt for all crates, across workspaces fmt: @@ -211,6 +213,50 @@ oranda CMD="dev": (_get-cargo-bin "oranda") ./scripts/rfc2book.py oranda {{ CMD }} + +loom crate='' *args='': (_get-cargo-command "nextest" "cargo-nextest" no-nextest) + #!/usr/bin/env bash + set -euo pipefail + source "./scripts/_util.sh" + + export RUSTFLAGS="--cfg loom ${RUSTFLAGS:-}" + export LOOM_MAX_PREEMPTIONS="${LOOM_MAX_PREEMPTIONS:-2}" + export LOOM_LOG="${LOOM_LOG:-mnemos=trace,debug}" + + # if logging is enabled, also enable location tracking. + if [[ "${LOOM_LOG}" != "off" ]]; then + export LOOM_LOCATION=true + status "Enabled" "logging, LOOM_LOG=${LOOM_LOG}" + else + status "Disabled" "logging and location tracking" + fi + + status "Configured" "loom, LOOM_MAX_PREEMPTIONS=${LOOM_MAX_PREEMPTIONS}" + + if [[ "${LOOM_CHECKPOINT_FILE:-}" ]]; then + export LOOM_CHECKPOINT_FILE="${LOOM_CHECKPOINT_FILE:-}" + export LOOM_CHECKPOINT_INTERVAL="${LOOM_CHECKPOINT_INTERVAL:-100}" + status "Saving" "checkpoints to ${LOOM_CHECKPOINT_FILE} every ${LOOM_CHECKPOINT_INTERVAL} iterations" + fi + + # if the loom tests fail, we still want to be able to print the checkpoint + # location before exiting. + set +e + + # run loom tests + {{ _cargo }} {{ _testcmd }} \ + --release \ + --lib \ + {{ if crate == '' { '--workspace'} else { '--package' } }} {{ crate }} \ + {{ args }} + status="$?" + + if [[ "${LOOM_CHECKPOINT_FILE:-}" ]]; then + status "Checkpoints" "saved to ${LOOM_CHECKPOINT_FILE}" + fi + + exit "$status" + _get-cargo-command name pkg skip='': #!/usr/bin/env bash set -euo pipefail diff --git a/source/bitslab/Cargo.toml b/source/bitslab/Cargo.toml index ab0ee934..69269fc5 100644 --- a/source/bitslab/Cargo.toml +++ b/source/bitslab/Cargo.toml @@ -6,10 +6,21 @@ repository.workspace = true homepage.workspace = true license.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +# See more keys and their definitions at +# https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] portable-atomic = { version = "1", default-features = false } +maitake-sync = { version = "0.1", default-features = false } [dev-dependencies] -proptest = "1" \ No newline at end of file +proptest = "1" +tracing = "0.1.37" + +[target.'cfg(not(loom))'.dev-dependencies] +futures = { version = "0.3.16" } +tracing-subscriber = { version = "0.3.17", default-features = false, features = ["fmt", "ansi"] } + +[target.'cfg(loom)'.dev-dependencies.loom] +version = "0.7" +features = ["futures", "checkpoint"] \ No newline at end of file diff --git a/source/bitslab/src/index.rs b/source/bitslab/src/index.rs index b2dcc98a..f4044533 100644 --- a/source/bitslab/src/index.rs +++ b/source/bitslab/src/index.rs @@ -1,250 +1,304 @@ -use portable_atomic::{AtomicU16, Ordering::*}; +macro_rules! make_index_allocs { + ( + $( + mod $modname:ident { + pub struct $Name:ident($Atomic:ty, $Int:ty, $capacity:literal); + } + )+ + ) => { + $( + pub use self::$modname::$Name; + mod $modname { + use crate::loom::sync::atomic::{$Atomic, Ordering::*}; -/// An allocator for up to 16 unique indices. -pub struct IndexAlloc16(AtomicU16); + #[doc = concat!("An allocator for up to ", stringify!($cap), " unique indices.")] + pub struct $Name($Atomic); -impl IndexAlloc16 { - /// Returns a new allocator for up to 16 unique indices. - #[must_use] - pub const fn new() -> Self { - Self(AtomicU16::new(0)) - } + impl $Name { + #[doc = concat!("An allocator for up to ", stringify!($cap), " unique indices.")] + #[must_use] + #[cfg(not(loom))] + pub const fn new() -> Self { + Self(<$Atomic>::new(0)) + } - /// Allocate an index from the pool. - /// - /// If this method returns [`Some`], the returned [`u8`] index will not be - /// returned again until after it has been [`free`](Self::free)d. - #[must_use] - pub fn allocate(&self) -> Option { - let mut bitmap = self.0.load(Acquire); - loop { - let idx = find_zero(bitmap)?; - let new_bitmap = bitmap | (1 << idx); - match self - .0 - .compare_exchange_weak(bitmap, new_bitmap, AcqRel, Acquire) - { - Ok(_) => return Some(idx), - Err(actual) => bitmap = actual, - } - } - } + #[doc = concat!("An allocator for up to ", stringify!($cap), " unique indices.")] + #[must_use] + #[cfg(loom)] + pub fn new() -> Self { + Self(<$Atomic>::new(0)) + } - /// The *total* number of indices in this allocator. - pub const CAPACITY: u8 = 16; + /// Allocate an index from the pool. + /// + /// If this method returns [`Some`], the returned [`u8`] index will not be + /// returned again until after it has been [`free`](Self::free)d. + #[must_use] + pub fn allocate(&self) -> Option { + let mut bitmap = self.0.load(Acquire); + loop { + let idx = Self::find_zero(bitmap)?; + let new_bitmap = bitmap | (1 << idx); + match self + .0 + .compare_exchange_weak(bitmap, new_bitmap, AcqRel, Acquire) + { + Ok(_) => return Some(idx), + Err(actual) => bitmap = actual, + } + } + } - /// Release an index back to the pool. - /// - /// The freed index may now be returned by a subsequent call to - /// [`allocate`](Self::allocate). - #[inline] - pub fn free(&self, index: u8) { - self.0.fetch_and(!(1 << index), Release); - } + /// The *total* number of indices in this allocator. + const CAPACITY: u8 = $capacity; - /// Returns `true` if *all* indices in the allocator have been allocated. - /// - /// This is the inverse of [`any_free`](Self::any_free). - /// - /// # Examples - /// - /// ``` - /// use mnemos_bitslab::index::IndexAlloc16; - /// - /// let alloc = IndexAlloc16::new(); - /// assert!(!alloc.all_allocated()); - /// - /// // allocate all but one index - /// for _ in 0..15 { - /// alloc.allocate().expect("should have free indices"); - /// assert!(!alloc.all_allocated()); - /// } - /// - /// // allocate the last index. - /// let last = alloc.allocate().expect("should have one more index remaining"); - /// assert!(alloc.all_allocated()); - /// - /// // freeing the index should make it available again - /// alloc.free(last); - /// assert!(!alloc.all_allocated()); - /// ``` - #[must_use] - #[inline] - pub fn all_allocated(&self) -> bool { - self.0.load(Acquire) == u16::MAX - } + /// Release an index back to the pool. + /// + /// The freed index may now be returned by a subsequent call to + /// [`allocate`](Self::allocate). + #[inline] + pub fn free(&self, index: u8) { + self.0.fetch_and(!(1 << index), Release); + } - /// Returns `true` if *none* of this allocator's indices have been - /// allocated. - /// - /// This is the inverse of [`any_allocated`](Self::any_allocated). - /// - /// # Examples - /// - /// ``` - /// use mnemos_bitslab::index::IndexAlloc16; - /// - /// let alloc = IndexAlloc16::new(); - /// assert!(alloc.all_free()); - /// - /// let idx = alloc.allocate().expect("a fresh allocator should have indices!"); - /// assert!(!alloc.all_free()); - /// - /// // free the last index. now, `all_free` will return `true` again. - /// alloc.free(idx); - /// assert!(alloc.all_free()); - /// ``` - #[must_use] - #[inline] - pub fn all_free(&self) -> bool { - self.0.load(Acquire) == 0 - } + /// Returns `true` if *all* indices in the allocator have been allocated. + /// + /// This is the inverse of [`any_free`](Self::any_free). + /// + /// # Examples + /// + /// ``` + #[doc = concat!(" use mnemos_bitslab::index::", stringify!($Name), ";")] + /// + #[doc = concat!(" let alloc = ", stringify!($Name), "::new();")] + /// assert!(!alloc.all_allocated()); + /// + /// // allocate all but one index + #[doc = concat!(" for _ in 1..", stringify!($capacity), " {")] + /// alloc.allocate().expect("should have free indices"); + /// assert!(!alloc.all_allocated()); + /// } + /// + /// // allocate the last index. + /// let last = alloc.allocate().expect("should have one more index remaining"); + /// assert!(alloc.all_allocated()); + /// + /// // freeing the index should make it available again + /// alloc.free(last); + /// assert!(!alloc.all_allocated()); + /// ``` + #[must_use] + #[inline] + pub fn all_allocated(&self) -> bool { + self.0.load(Acquire) == <$Int>::MAX + } - /// Returns `true` if *any* index in the allocator has been allocated. - /// - /// This is the inverse of [`all_free`](Self::all_free). - /// - /// # Examples - /// - /// ``` - /// use mnemos_bitslab::index::IndexAlloc16; - /// - /// let alloc = IndexAlloc16::new(); - /// assert!(!alloc.any_allocated()); - /// - /// // allocate all indices - /// for _ in 0..16 { - /// alloc.allocate().expect("should have free indices"); - /// assert!(alloc.any_allocated()); - /// } - /// - /// // free all but one index. - /// for i in 0..15 { - /// alloc.free(i); - /// assert!(alloc.any_allocated()); - /// } - /// - /// // free the last index. now, `any_allocated` will return `false`. - /// alloc.free(15); - /// assert!(!alloc.any_allocated()); - /// ``` - #[must_use] - #[inline] - pub fn any_allocated(&self) -> bool { - self.0.load(Acquire) != 0 - } + /// Returns `true` if *none* of this allocator's indices have been + /// allocated. + /// + /// This is the inverse of [`any_allocated`](Self::any_allocated). + /// + /// # Examples + /// + /// ``` + #[doc = concat!(" use mnemos_bitslab::index::", stringify!($Name), ";")] + /// + #[doc = concat!(" let alloc = ", stringify!($Name), "::new();")] + /// assert!(alloc.all_free()); + /// + /// let idx = alloc.allocate().expect("a fresh allocator should have indices!"); + /// assert!(!alloc.all_free()); + /// + /// // free the last index. now, `all_free` will return `true` again. + /// alloc.free(idx); + /// assert!(alloc.all_free()); + /// ``` + #[must_use] + #[inline] + pub fn all_free(&self) -> bool { + self.0.load(Acquire) == 0 + } - /// Returns `true` if *any* index in the allocator is available. - /// - /// This is the inverse of [`all_allocated`](Self::all_allocated). - /// - /// # Examples - /// - /// ``` - /// use mnemos_bitslab::index::IndexAlloc16; - /// - /// let alloc = IndexAlloc16::new(); - /// assert!(alloc.any_free()); - /// - /// // allocate all but one index - /// for _ in 0..15 { - /// alloc.allocate().expect("should have free indices"); - /// assert!(alloc.any_free()); - /// } - /// - /// // allocate the last index. - /// let last = alloc.allocate().expect("should have one more index remaining"); - /// assert!(!alloc.any_free()); - /// - /// // freeing the index should make it available again - /// alloc.free(last); - /// assert!(alloc.any_free()); - /// ``` - #[must_use] - #[inline] - pub fn any_free(&self) -> bool { - self.0.load(Acquire) != u16::MAX - } + /// Returns `true` if *any* index in the allocator has been allocated. + /// + /// This is the inverse of [`all_free`](Self::all_free). + /// + /// # Examples + /// + /// ``` + #[doc = concat!(" use mnemos_bitslab::index::", stringify!($Name), ";")] + /// + #[doc = concat!(" let alloc = ", stringify!($Name), "::new();")] + /// assert!(!alloc.any_allocated()); + /// + /// // allocate all indices + #[doc = concat!(" for _ in 0..", stringify!($capacity), " {")] + /// alloc.allocate().expect("should have free indices"); + /// assert!(alloc.any_allocated()); + /// } + /// + /// // free all but one index. + #[doc = concat!(" for i in 1..", stringify!($capacity), " {")] + /// alloc.free(i); + /// assert!(alloc.any_allocated()); + /// } + /// + /// // free the last index. now, `any_allocated` will return `false`. + /// alloc.free(0); + /// assert!(!alloc.any_allocated()); + /// ``` + #[must_use] + #[inline] + pub fn any_allocated(&self) -> bool { + self.0.load(Acquire) != 0 + } - /// Returns the current number of free indices in the allocator. - /// - /// This will always be [`Self::CAPACITY`] or less. - /// - /// ``` - /// use mnemos_bitslab::index::IndexAlloc16; - /// - /// let alloc = IndexAlloc16::new(); - /// assert_eq!(alloc.free_count(), 16); - /// - /// let idx1 = alloc.allocate().expect("all indices should be free"); - /// assert_eq!(alloc.free_count(), 15); - /// - /// let idx2 = alloc.allocate().expect("15 indices should be free"); - /// assert_eq!(alloc.free_count(), 14); - /// - /// alloc.free(idx1); - /// assert_eq!(alloc.free_count(), 15); - /// ``` - #[must_use] - #[inline] - pub fn free_count(&self) -> u8 { - self.0.load(Acquire).count_zeros() as u8 - } + /// Returns `true` if *any* index in the allocator is available. + /// + /// This is the inverse of [`all_allocated`](Self::all_allocated). + /// + /// # Examples + /// + /// ``` + #[doc = concat!(" use mnemos_bitslab::index::", stringify!($Name), ";")] + /// + #[doc = concat!(" let alloc = ", stringify!($Name), "::new();")] + /// assert!(alloc.any_free()); + /// + /// // allocate all but one index + #[doc = concat!(" for _ in 1..", stringify!($capacity), " {")] + /// alloc.allocate().expect("should have free indices"); + /// assert!(alloc.any_free()); + /// } + /// + /// // allocate the last index. + /// let last = alloc.allocate().expect("should have one more index remaining"); + /// assert!(!alloc.any_free()); + /// + /// // freeing the index should make it available again + /// alloc.free(last); + /// assert!(alloc.any_free()); + /// ``` + #[must_use] + #[inline] + pub fn any_free(&self) -> bool { + self.0.load(Acquire) != <$Int>::MAX + } - /// Returns the current number of allocated indices in the allocator. - /// - /// This will always be [`Self::CAPACITY`] or less. - /// - /// # Examples - /// - /// ``` - /// use mnemos_bitslab::index::IndexAlloc16; - /// - /// let alloc = IndexAlloc16::new(); - /// assert_eq!(alloc.allocated_count(), 0); - /// - /// let idx1 = alloc.allocate().expect("all indices should be free"); - /// assert_eq!(alloc.allocated_count(), 1); - /// - /// let idx2 = alloc.allocate().expect("15 indices should be free"); - /// assert_eq!(alloc.allocated_count(), 2); - /// - /// alloc.free(idx1); - /// assert_eq!(alloc.allocated_count(), 1); - /// ``` - #[must_use] - #[inline] - pub fn allocated_count(&self) -> u8 { - self.0.load(Acquire).count_ones() as u8 - } -} + /// Returns the current number of free indices in the allocator. + /// + /// This will always be [`self.capacity()`] or less. + /// + /// ``` + #[doc = concat!(" use mnemos_bitslab::index::", stringify!($Name), ";")] + /// + #[doc = concat!(" let alloc = ", stringify!($Name), "::new();")] + /// assert_eq!(alloc.free_count(), alloc.capacity()); + /// + /// let idx1 = alloc.allocate().expect("all indices should be free"); + /// assert_eq!(alloc.free_count(), alloc.capacity() - 1); + /// + /// let idx2 = alloc.allocate().expect("most indices should be free"); + /// assert_eq!(alloc.free_count(), alloc.capacity() - 2); + /// + /// alloc.free(idx1); + /// assert_eq!(alloc.free_count(), alloc.capacity() - 1); + /// ``` + #[must_use] + #[inline] + pub fn free_count(&self) -> u8 { + self.0.load(Acquire).count_zeros() as u8 + } -fn find_zero(u: u16) -> Option { - let trailing_ones = u.trailing_ones(); - if trailing_ones == 16 { - None - } else { - Some(trailing_ones as u8) - } -} + /// Returns the current number of allocated indices in the allocator. + /// + /// This will always be [`self.capacity()`] or less. + /// + /// # Examples + /// + /// ``` + #[doc = concat!(" use mnemos_bitslab::index::", stringify!($Name), ";")] + /// + #[doc = concat!(" let alloc = ", stringify!($Name), "::new();")] + /// assert_eq!(alloc.allocated_count(), 0); + /// + /// let idx1 = alloc.allocate().expect("all indices should be free"); + /// assert_eq!(alloc.allocated_count(), 1); + /// + /// let idx2 = alloc.allocate().expect("most indices should be free"); + /// assert_eq!(alloc.allocated_count(), 2); + /// + /// alloc.free(idx1); + /// assert_eq!(alloc.allocated_count(), 1); + /// ``` + #[must_use] + #[inline] + pub fn allocated_count(&self) -> u8 { + self.0.load(Acquire).count_ones() as u8 + } -#[cfg(test)] -mod tests { - use super::*; - use proptest::{prop_assert_eq, proptest}; - - proptest! { - #[test] - fn find_zero_works(u: u16) { - let mut found_zero = None; - for i in 0..u16::BITS { - if u & (1 << i) == 0 { - found_zero = Some(i as u8); - break; + /// Returns the total capacity of this allocator, including any + /// allocated indices. + #[must_use] + #[inline] + pub const fn capacity(&self) -> u8 { + Self::CAPACITY + } + + pub(crate) fn is_allocated(&self, index: u8) -> bool { + let bit = 1 << index; + self.0.load(Acquire) & bit == bit + } + + fn find_zero(u: $Int) -> Option { + let trailing_ones = u.trailing_ones(); + if trailing_ones == $capacity { + None + } else { + Some(trailing_ones as u8) + } + } + } + + #[cfg(test)] + mod tests { + use super::*; + use proptest::{prop_assert_eq, proptest}; + + proptest! { + #[test] + fn find_zero_works(u: $Int) { + let mut found_zero = None; + for i in 0..<$Int>::BITS as $Int { + if u & (1 << i) == 0 { + found_zero = Some(i as u8); + break; + } + } + + prop_assert_eq!($Name::find_zero(u), found_zero) + } + } } } + )+ + }; +} + +make_index_allocs! { + mod alloc8 { + pub struct IndexAlloc8(AtomicU8, u8, 8); + } + + mod alloc16 { + pub struct IndexAlloc16(AtomicU16, u16, 16); + } + + mod alloc32 { + pub struct IndexAlloc32(AtomicU32, u32, 32); + } - prop_assert_eq!(find_zero(u), found_zero) - } + mod alloc64 { + pub struct IndexAlloc64(AtomicU64, u64, 64); } } diff --git a/source/bitslab/src/lib.rs b/source/bitslab/src/lib.rs index 2f766d2e..ebc08a2d 100644 --- a/source/bitslab/src/lib.rs +++ b/source/bitslab/src/lib.rs @@ -1,2 +1,8 @@ #![cfg_attr(not(test), no_std)] pub mod index; +pub(crate) mod loom; +pub mod slab; + +mod sealed { + pub trait Sealed {} +} diff --git a/source/bitslab/src/loom.rs b/source/bitslab/src/loom.rs new file mode 100644 index 00000000..320ef358 --- /dev/null +++ b/source/bitslab/src/loom.rs @@ -0,0 +1,494 @@ +#[allow(unused_imports)] +pub(crate) use self::inner::*; + +#[cfg(loom)] +mod inner { + #![allow(dead_code)] + #![allow(unused_imports)] + + // #[cfg(feature = "alloc")] + // pub(crate) mod alloc { + // use super::sync::Arc; + // use core::{ + // future::Future, + // pin::Pin, + // task::{Context, Poll}, + // }; + // pub(crate) use loom::alloc::*; + + // #[derive(Debug)] + // #[pin_project::pin_project] + // pub(crate) struct TrackFuture { + // #[pin] + // inner: F, + // track: Arc<()>, + // } + + // impl Future for TrackFuture { + // type Output = TrackFuture; + // fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // let this = self.project(); + // this.inner.poll(cx).map(|inner| TrackFuture { + // inner, + // track: this.track.clone(), + // }) + // } + // } + + // impl TrackFuture { + // /// Wrap a `Future` in a `TrackFuture` that participates in Loom's + // /// leak checking. + // #[track_caller] + // pub(crate) fn new(inner: F) -> Self { + // Self { + // inner, + // track: Arc::new(()), + // } + // } + + // /// Stop tracking this future, and return the inner value. + // pub(crate) fn into_inner(self) -> F { + // self.inner + // } + // } + + // #[track_caller] + // pub(crate) fn track_future(inner: F) -> TrackFuture { + // TrackFuture::new(inner) + // } + + // // PartialEq impl so that `assert_eq!(..., Ok(...))` works + // impl PartialEq for TrackFuture { + // fn eq(&self, other: &Self) -> bool { + // self.inner == other.inner + // } + // } + // } + + #[cfg(test)] + pub(crate) use loom::{alloc, cell, future, hint, model, thread}; + + pub(crate) mod sync { + pub(crate) use loom::sync::*; + + pub(crate) mod spin { + pub(crate) use loom::sync::MutexGuard; + + /// Mock version of mycelium's spinlock, but using + /// `loom::sync::Mutex`. The API is slightly different, since the + /// mycelium mutex does not support poisoning. + #[derive(Debug)] + pub(crate) struct Mutex(loom::sync::Mutex); + + impl Mutex { + #[track_caller] + pub(crate) fn new(t: T) -> Self { + Self(loom::sync::Mutex::new(t)) + } + + #[track_caller] + pub fn try_lock(&self) -> Option> { + self.0.try_lock().ok() + } + + #[track_caller] + pub fn lock(&self) -> MutexGuard<'_, T> { + self.0.lock().expect("loom mutex will never poison") + } + } + } + } +} + +#[cfg(not(loom))] +mod inner { + #![allow(dead_code, unused_imports)] + pub(crate) mod sync { + #[cfg(feature = "alloc")] + pub use alloc::sync::*; + pub use core::sync::*; + #[cfg(all(not(feature = "alloc"), test))] + pub use std::sync::Arc; + } + + pub(crate) mod atomic { + pub use portable_atomic::*; + } + + pub(crate) use portable_atomic::hint; + + #[cfg(test)] + pub(crate) mod thread { + pub(crate) use std::thread::{yield_now, JoinHandle}; + pub(crate) fn spawn(f: F) -> JoinHandle + where + F: FnOnce() -> T + Send + 'static, + T: Send + 'static, + { + // let track = super::alloc::track::Registry::current(); + std::thread::spawn(move || { + // let _tracking = track.map(|track| track.set_default()); + f() + }) + } + } + + #[cfg(test)] + pub(crate) mod future { + pub use futures::executor::block_on; + } + + #[cfg(test)] + pub(crate) mod model { + #[non_exhaustive] + #[derive(Default)] + pub(crate) struct Builder { + pub(crate) max_threads: usize, + pub(crate) max_branches: usize, + pub(crate) max_permutations: Option, + // pub(crate) max_duration: Option, + pub(crate) preemption_bound: Option, + // pub(crate) checkpoint_file: Option, + pub(crate) checkpoint_interval: usize, + pub(crate) location: bool, + pub(crate) log: bool, + } + + impl Builder { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn check(&self, f: impl FnOnce()) { + let registry = super::alloc::track::Registry::default(); + let _tracking = registry.set_default(); + f(); + registry.check(); + } + } + } + + #[cfg(test)] + pub(crate) fn model(f: impl FnOnce()) { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + model::Builder::new().check(f) + } + + pub(crate) mod cell { + #[derive(Debug)] + pub(crate) struct UnsafeCell(core::cell::UnsafeCell); + + impl UnsafeCell { + pub const fn new(data: T) -> UnsafeCell { + UnsafeCell(core::cell::UnsafeCell::new(data)) + } + } + + impl UnsafeCell { + #[inline(always)] + pub fn with(&self, f: F) -> R + where + F: FnOnce(*const T) -> R, + { + f(self.0.get()) + } + + #[inline(always)] + pub fn with_mut(&self, f: F) -> R + where + F: FnOnce(*mut T) -> R, + { + f(self.0.get()) + } + + #[inline(always)] + pub(crate) fn get(&self) -> ConstPtr { + ConstPtr(self.0.get()) + } + + #[inline(always)] + pub(crate) fn get_mut(&self) -> MutPtr { + MutPtr(self.0.get()) + } + } + + #[derive(Debug)] + pub(crate) struct ConstPtr(*const T); + + impl ConstPtr { + #[inline(always)] + pub(crate) unsafe fn deref(&self) -> &T { + &*self.0 + } + + #[inline(always)] + pub fn with(&self, f: F) -> R + where + F: FnOnce(*const T) -> R, + { + f(self.0) + } + } + + #[derive(Debug)] + pub(crate) struct MutPtr(*mut T); + + impl MutPtr { + // Clippy knows that it's Bad and Wrong to construct a mutable reference + // from an immutable one...but this function is intended to simulate a raw + // pointer, so we have to do that here. + #[allow(clippy::mut_from_ref)] + #[inline(always)] + pub(crate) unsafe fn deref(&self) -> &mut T { + &mut *self.0 + } + + #[inline(always)] + pub fn with(&self, f: F) -> R + where + F: FnOnce(*mut T) -> R, + { + f(self.0) + } + } + } + + #[cfg(test)] + pub(crate) mod alloc { + // use core::{ + // future::Future, + // pin::Pin, + // task::{Context, Poll}, + // }; + + use std::sync::Arc; + + pub(in crate::loom) mod track { + use std::{ + cell::RefCell, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, Weak, + }, + }; + + #[derive(Clone, Debug, Default)] + pub(crate) struct Registry(Arc>); + + #[derive(Debug, Default)] + struct RegistryInner { + tracks: Vec>, + next_id: usize, + } + + #[derive(Debug)] + pub(super) struct TrackData { + was_leaked: AtomicBool, + type_name: &'static str, + location: &'static core::panic::Location<'static>, + id: usize, + } + + thread_local! { + static REGISTRY: RefCell> = RefCell::new(None); + } + + impl Registry { + pub(in crate::loom) fn current() -> Option { + REGISTRY.with(|current| current.borrow().clone()) + } + + pub(in crate::loom) fn set_default(&self) -> impl Drop { + struct Unset(Option); + impl Drop for Unset { + fn drop(&mut self) { + let _ = + REGISTRY.try_with(|current| *current.borrow_mut() = self.0.take()); + } + } + + REGISTRY.with(|current| { + let mut current = current.borrow_mut(); + let unset = Unset(current.clone()); + *current = Some(self.clone()); + unset + }) + } + + #[track_caller] + pub(super) fn start_tracking() -> Option> { + // we don't use `Option::map` here because it creates a + // closure, which breaks `#[track_caller]`, since the caller + // of `insert` becomes the closure, which cannot have a + // `#[track_caller]` attribute on it. + #[allow(clippy::manual_map)] + match Self::current() { + Some(registry) => Some(registry.insert::()), + _ => None, + } + } + + #[track_caller] + pub(super) fn insert(&self) -> Arc { + let mut inner = self.0.lock().unwrap(); + let id = inner.next_id; + inner.next_id += 1; + let location = core::panic::Location::caller(); + let type_name = std::any::type_name::(); + let data = Arc::new(TrackData { + type_name, + location, + id, + was_leaked: AtomicBool::new(false), + }); + let weak = Arc::downgrade(&data); + tracing::trace!( + target: "maitake_sync::alloc", + id, + "type" = %type_name, + %location, + "started tracking allocation", + ); + inner.tracks.push(weak); + data + } + + pub(in crate::loom) fn check(&self) { + let leaked = self + .0 + .lock() + .unwrap() + .tracks + .iter() + .filter_map(|weak| { + let data = weak.upgrade()?; + data.was_leaked.store(true, Ordering::SeqCst); + Some(format!( + " - id {}, {} allocated at {}", + data.id, data.type_name, data.location + )) + }) + .collect::>(); + if !leaked.is_empty() { + let leaked = leaked.join("\n "); + panic!("the following allocations were leaked:\n {leaked}"); + } + } + } + + impl Drop for TrackData { + fn drop(&mut self) { + if !self.was_leaked.load(Ordering::SeqCst) { + tracing::trace!( + target: "maitake_sync::alloc", + id = self.id, + "type" = %self.type_name, + location = %self.location, + "dropped all references to a tracked allocation", + ); + } + } + } + } + + // #[cfg(test)] + // #[derive(Debug)] + // #[pin_project::pin_project] + // pub(crate) struct TrackFuture { + // #[pin] + // inner: F, + // track: Option>, + // } + + // #[cfg(test)] + // impl Future for TrackFuture { + // type Output = TrackFuture; + // fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // let this = self.project(); + // this.inner.poll(cx).map(|inner| TrackFuture { + // inner, + // track: this.track.clone(), + // }) + // } + // } + + // #[cfg(test)] + // impl TrackFuture { + // /// Wrap a `Future` in a `TrackFuture` that participates in Loom's + // /// leak checking. + // #[track_caller] + // pub(crate) fn new(inner: F) -> Self { + // let track = track::Registry::start_tracking::(); + // Self { inner, track } + // } + + // /// Stop tracking this future, and return the inner value. + // pub(crate) fn into_inner(self) -> F { + // self.inner + // } + // } + + // #[cfg(test)] + // #[track_caller] + // pub(crate) fn track_future(inner: F) -> TrackFuture { + // TrackFuture::new(inner) + // } + + // // PartialEq impl so that `assert_eq!(..., Ok(...))` works + // #[cfg(test)] + // impl PartialEq for TrackFuture { + // fn eq(&self, other: &Self) -> bool { + // self.inner == other.inner + // } + // } + + /// Track allocations, detecting leaks + #[derive(Debug)] + pub struct Track { + value: T, + + #[cfg(test)] + track: Option>, + } + + impl Track { + /// Track a value for leaks + #[inline(always)] + #[track_caller] + pub fn new(value: T) -> Track { + Track { + value, + + #[cfg(test)] + track: track::Registry::start_tracking::(), + } + } + + /// Get a reference to the value + #[inline(always)] + pub fn get_ref(&self) -> &T { + &self.value + } + + /// Get a mutable reference to the value + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + &mut self.value + } + + /// Stop tracking the value for leaks + #[inline(always)] + pub fn into_inner(self) -> T { + self.value + } + } + } + + #[cfg(test)] + pub(crate) fn traceln(args: std::fmt::Arguments) { + eprintln!("{args}"); + } + + #[cfg(not(test))] + pub(crate) fn traceln(_: core::fmt::Arguments) {} +} diff --git a/source/bitslab/src/slab.rs b/source/bitslab/src/slab.rs new file mode 100644 index 00000000..1a789649 --- /dev/null +++ b/source/bitslab/src/slab.rs @@ -0,0 +1,342 @@ +use crate::{ + index::IndexAlloc64, + loom::{ + cell::{MutPtr, UnsafeCell}, + sync::atomic::{AtomicU64, Ordering}, + }, +}; +use core::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, +}; +use maitake_sync::WaitQueue; + +pub struct Bitslab64 { + alloc: IndexAlloc64, + slab: [UnsafeCell>; 64], + initialized: AtomicU64, + initializer: fn() -> T, + free_wait: WaitQueue, +} + +impl Bitslab64 { + #[cfg(not(all(test, loom)))] + #[must_use] + pub const fn new() -> Self { + Self::with_initializer(T::default) + } + + #[cfg(all(test, loom))] + #[must_use] + pub fn new() -> Self { + Self::with_initializer(T::default) + } +} + +#[must_use = "a `RefMut` does nothing if not dereferenced"] +pub struct RefMut<'slab, T> { + value: MutPtr>, + _free: FreeOnDrop<'slab, T>, +} + +struct FreeOnDrop<'slab, T> { + slab: &'slab Bitslab64, + idx: u8, +} + +// Macro for initializing arrays with non-`Copy` initializers. +// Based on https://stackoverflow.com/a/36259524 +// +// TODO(eliza): Maybe this should go in a "utils" crate eventually? +macro_rules! array { + (@accum (0, $($_es:expr),*) -> ($($body:tt)*)) + => {array!(@as_expr [$($body)*])}; + (@accum (1, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))}; + (@accum (2, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))}; + (@accum (3, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (2, $($es),*) -> ($($body)* $($es,)*))}; + (@accum (4, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))}; + (@accum (5, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (4, $($es),*) -> ($($body)* $($es,)*))}; + (@accum (6, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (4, $($es),*) -> ($($body)* $($es,)* $($es,)*))}; + (@accum (7, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (4, $($es),*) -> ($($body)* $($es,)* $($es,)* $($es,)*))}; + (@accum (8, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))}; + (@accum (16, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))}; + (@accum (32, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))}; + (@accum (64, $($es:expr),*) -> ($($body:tt)*)) + => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))}; + + (@as_expr $e:expr) => {$e}; + + [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) }; +} + +impl Bitslab64 { + pub const CAPACITY: usize = 64; + + #[cfg(not(all(test, loom)))] + #[must_use] + pub const fn with_initializer(initializer: fn() -> T) -> Self { + Self { + alloc: IndexAlloc64::new(), + slab: array![UnsafeCell::new(MaybeUninit::uninit()); 64], + initialized: AtomicU64::new(0), + initializer, + free_wait: WaitQueue::new(), + } + } + + #[cfg(all(test, loom))] + #[must_use] + pub fn with_initializer(initializer: fn() -> T) -> Self { + Self { + alloc: IndexAlloc64::new(), + slab: array![UnsafeCell::new(MaybeUninit::uninit()); 64], + initialized: AtomicU64::new(0), + initializer, + free_wait: WaitQueue::new(), + } + } + + pub async fn alloc(&self) -> RefMut<'_, T> { + loop { + #[cfg(test)] + tracing::debug!("try allocate..."); + if let Some(a) = self.try_alloc() { + #[cfg(test)] + tracing::debug!("try allocate -> success"); + return a; + } + + #[cfg(test)] + tracing::debug!("try allocate -> fail"); + + self.free_wait + .wait() + .await + .expect("Bitslab64 WaitQueues are never closed!"); + + #[cfg(test)] + tracing::debug!("try allocate -> fail -> woken"); + } + } + + pub fn try_alloc(&self) -> Option> { + let idx = self.alloc.allocate()?; + let should_init = { + let mask = 1 << idx as u64; + let bitfield = self.initialized.fetch_or(mask, Ordering::AcqRel); + bitfield & mask == 0 + }; + let value = self.slab[idx as usize].get_mut(); + if should_init { + unsafe { + // Safety: we claimed exclusive ownership over this index by + // allocating it from the index allocator. + value.deref().write((self.initializer)()); + } + } + + Some(RefMut { + value, + _free: FreeOnDrop { slab: self, idx }, + }) + } + + unsafe fn free(&self, idx: u8) { + #[cfg(test)] + tracing::debug!(idx, "free"); + + self.alloc.free(idx); + self.free_wait.wake(); + } +} + +// === impl RefMut === + +impl Deref for RefMut<'_, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { + // Safety: we're about to do two unsafe things: dereferencing an + // `UnsafeCell` `MutPtr`, and assuming a `MaybeUninit` is + // initialized. + // + // It's safe to call `value.deref()` here, because we only construct + // a `RefMut` after having claimed exclusive access to the index + // from the index allocator. + // + // Similarly, the call to `assume_init_ref()` is okay, because we + // only construct a `RefMut` after ensuring that the value has been + // initialized. + self.value.deref().assume_init_ref() + } + } +} + +impl DerefMut for RefMut<'_, T> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { + // Safety: we're about to do two unsafe things: dereferencing an + // `UnsafeCell` `MutPtr`, and assuming a `MaybeUninit` is + // initialized. + // + // It's safe to call `value.deref()` here, because we only construct + // a `RefMut` after having claimed exclusive access to the index + // from the index allocator. + // + // Similarly, the call to `assume_init_ref()` is okay, because we + // only construct a `RefMut` after ensuring that the value has been + // initialized. + self.value.deref().assume_init_mut() + } + } +} + +impl Drop for FreeOnDrop<'_, T> { + #[inline] + fn drop(&mut self) { + unsafe { + self.slab.free(self.idx); + } + } +} + +unsafe impl Sync for Bitslab64 {} +unsafe impl Send for Bitslab64 {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::loom::{self, alloc::Track, sync::Arc}; + use tracing::Instrument; + + #[test] + fn items_dropped() { + fn run(slab: Arc>>) -> impl FnOnce() { + move || { + let item1 = slab.try_alloc().unwrap(); + let item2 = slab.try_alloc().unwrap(); + drop((item1, item2)); + } + } + + loom::model(|| { + let slab = Arc::new(Bitslab64::>::with_initializer(|| Track::new(()))); + loom::thread::spawn(run(slab.clone())); + run(slab)(); + }) + } + + #[test] + fn try_alloc_nodrop() { + fn run(thread: usize, slab: &Arc>) -> impl FnOnce() { + let slab = slab.clone(); + move || { + let mut guards = Vec::new(); + for i in 0..32 { + match slab.try_alloc() { + Some(mut item) => { + println!("[thread {thread}] allocated item {i}"); + *item = i; + guards.push(item); + } + None => { + panic!("[thread {thread}] failed to allocate item {i}!"); + } + } + } + } + } + + loom::model(|| { + let slab = Arc::new(Bitslab64::new()); + let t1 = loom::thread::spawn(run(0, &slab)); + run(1, &slab)(); + t1.join().unwrap(); + }) + } + + #[test] + fn alloc_async_nodrop() { + fn run(thread: usize, slab: &Arc>) -> impl FnOnce() { + let slab = slab.clone(); + + move || { + loom::future::block_on(async move { + let mut guards = Vec::new(); + for i in 0..32 { + let span = tracing::info_span!("alloc", item = i, thread); + let mut item = slab.alloc().instrument(span.clone()).await; + let _enter = span.enter(); + tracing::info!("allocated item"); + *item = i; + guards.push(item); + } + }) + } + } + + loom::model(|| { + let slab = Arc::new(Bitslab64::new()); + let t1 = loom::thread::spawn(run(0, &slab)); + run(1, &slab)(); + t1.join().unwrap(); + }) + } + + #[test] + fn alloc_async_drop() { + loom::model(|| { + let slab = Arc::new(Bitslab64::new()); + + let mut guards = Vec::with_capacity(32); + loom::future::block_on(async { + for i in 0..64 { + let span = tracing::info_span!("alloc", item = i, thread = 0); + let mut item = slab.alloc().instrument(span.clone()).await; + let _enter = span.enter(); + tracing::info!("allocated item"); + *item = i; + guards.push(item); + } + }); + + let t1 = loom::thread::spawn({ + let slab = slab.clone(); + move || { + loom::future::block_on(async move { + let mut guards = Vec::with_capacity(32); + for i in 0..64 { + let span = tracing::info_span!("alloc", item = i, thread = 1); + let mut item = slab.alloc().instrument(span.clone()).await; + let _enter = span.enter(); + tracing::info!("allocated item"); + *item = i; + guards.push(item); + } + }) + } + }); + + for (i, guard) in guards.drain(..).enumerate() { + let _span = tracing::info_span!("drop", i, thread = 0).entered(); + drop(guard); + tracing::info!("dropped item"); + } + + t1.join().unwrap(); + }) + } +}