diff --git a/Cargo.toml b/Cargo.toml index f70f184f..db6d2e30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,11 +21,28 @@ maintenance = { status = "experimental" } [features] sanitize = ['crossbeam-epoch/sanitize'] +std = ["crossbeam-epoch/std", "num_cpus", "parking_lot"] +default = ["std"] [dependencies] -crossbeam-epoch = "0.9" -parking_lot = "0.10" -num_cpus = "1.12.0" +lock_api = "0.3.3" + +[dependencies.parking_lot] +version = "0.10" +optional = true + +[dependencies.num_cpus] +version = "1.12.0" +optional = true + +[dependencies.crossbeam-epoch] +version = "0.9" +default-features = false +features = ["alloc"] + +[dependencies.ahash] +version = "0.3.2" +default-features = false [dev-dependencies] rand = "0.7" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a0e2d621..eb8a564b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,6 +29,17 @@ jobs: condition: ne(variables.CACHE_RESTORED, 'true') - script: cargo deny check displayName: cargo deny + - job: no_std + displayName: "Compile-check on no_std target" + pool: + vmImage: ubuntu-16.04 + steps: + - template: install-rust.yml@templates + parameters: + targets: + - thumbv7m-none-eabi + - bash: cargo check --target thumbv7m-none-eabi --no-default-features + displayName: cargo check - job: miri displayName: "Run miri on test suite" dependsOn: deny diff --git a/src/iter/iter.rs b/src/iter/iter.rs index 77e4305a..b83fba7f 100644 --- a/src/iter/iter.rs +++ b/src/iter/iter.rs @@ -1,17 +1,23 @@ use super::NodeIter; +use core::sync::atomic::Ordering; use crossbeam_epoch::Guard; -use std::sync::atomic::Ordering; /// An iterator over a map's entries. /// /// See [`HashMap::iter`](crate::HashMap::iter) for details. #[derive(Debug)] -pub struct Iter<'g, K, V> { - pub(crate) node_iter: NodeIter<'g, K, V>, +pub struct Iter<'g, K, V, L> +where + L: lock_api::RawMutex, +{ + pub(crate) node_iter: NodeIter<'g, K, V, L>, pub(crate) guard: &'g Guard, } -impl<'g, K, V> Iterator for Iter<'g, K, V> { +impl<'g, K, V, L> Iterator for Iter<'g, K, V, L> +where + L: lock_api::RawMutex, +{ type Item = (&'g K, &'g V); fn next(&mut self) -> Option { let node = self.node_iter.next()?; @@ -26,11 +32,17 @@ impl<'g, K, V> Iterator for Iter<'g, K, V> { /// /// See [`HashMap::keys`](crate::HashMap::keys) for details. #[derive(Debug)] -pub struct Keys<'g, K, V> { - pub(crate) node_iter: NodeIter<'g, K, V>, +pub struct Keys<'g, K, V, L> +where + L: lock_api::RawMutex, +{ + pub(crate) node_iter: NodeIter<'g, K, V, L>, } -impl<'g, K, V> Iterator for Keys<'g, K, V> { +impl<'g, K, V, L> Iterator for Keys<'g, K, V, L> +where + L: lock_api::RawMutex, +{ type Item = &'g K; fn next(&mut self) -> Option { let node = self.node_iter.next()?; @@ -42,12 +54,18 @@ impl<'g, K, V> Iterator for Keys<'g, K, V> { /// /// See [`HashMap::values`](crate::HashMap::values) for details. #[derive(Debug)] -pub struct Values<'g, K, V> { - pub(crate) node_iter: NodeIter<'g, K, V>, +pub struct Values<'g, K, V, L> +where + L: lock_api::RawMutex, +{ + pub(crate) node_iter: NodeIter<'g, K, V, L>, pub(crate) guard: &'g Guard, } -impl<'g, K, V> Iterator for Values<'g, K, V> { +impl<'g, K, V, L> Iterator for Values<'g, K, V, L> +where + L: lock_api::RawMutex, +{ type Item = &'g V; fn next(&mut self) -> Option { let node = self.node_iter.next()?; @@ -61,9 +79,9 @@ impl<'g, K, V> Iterator for Values<'g, K, V> { #[cfg(test)] mod tests { use crate::HashMap; + use core::iter::FromIterator; use crossbeam_epoch as epoch; use std::collections::HashSet; - use std::iter::FromIterator; #[test] fn iter() { diff --git a/src/iter/traverser.rs b/src/iter/traverser.rs index 16076932..2cbc0a83 100644 --- a/src/iter/traverser.rs +++ b/src/iter/traverser.rs @@ -1,18 +1,26 @@ +#[cfg(not(feature = "std"))] +extern crate alloc; + use crate::node::{BinEntry, Node}; use crate::raw::Table; +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; +use core::sync::atomic::Ordering; use crossbeam_epoch::{Guard, Shared}; -use std::sync::atomic::Ordering; #[derive(Debug)] -pub(crate) struct NodeIter<'g, K, V> { +pub(crate) struct NodeIter<'g, K, V, L> +where + L: lock_api::RawMutex, +{ /// Current table; update if resized - table: Option<&'g Table>, + table: Option<&'g Table>, - stack: Option>>, - spare: Option>>, + stack: Option>>, + spare: Option>>, /// The last bin entry iterated over - prev: Option<&'g Node>, + prev: Option<&'g Node>, /// Index of bin to use next index: usize, @@ -29,8 +37,11 @@ pub(crate) struct NodeIter<'g, K, V> { guard: &'g Guard, } -impl<'g, K, V> NodeIter<'g, K, V> { - pub(crate) fn new(table: Shared<'g, Table>, guard: &'g Guard) -> Self { +impl<'g, K, V, L> NodeIter<'g, K, V, L> +where + L: lock_api::RawMutex, +{ + pub(crate) fn new(table: Shared<'g, Table>, guard: &'g Guard) -> Self { let (table, len) = if table.is_null() { (None, 0) } else { @@ -53,7 +64,7 @@ impl<'g, K, V> NodeIter<'g, K, V> { } } - fn push_state(&mut self, t: &'g Table, i: usize, n: usize) { + fn push_state(&mut self, t: &'g Table, i: usize, n: usize) { let mut s = self.spare.take(); if let Some(ref mut s) = s { self.spare = s.next.take(); @@ -109,8 +120,11 @@ impl<'g, K, V> NodeIter<'g, K, V> { } } -impl<'g, K, V> Iterator for NodeIter<'g, K, V> { - type Item = &'g Node; +impl<'g, K, V, L> Iterator for NodeIter<'g, K, V, L> +where + L: lock_api::RawMutex, +{ + type Item = &'g Node; fn next(&mut self) -> Option { let mut e = None; if let Some(prev) = self.prev { @@ -177,11 +191,14 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> { } #[derive(Debug)] -struct TableStack<'g, K, V> { +struct TableStack<'g, K, V, L> +where + L: lock_api::RawMutex, +{ length: usize, index: usize, - table: &'g Table, - next: Option>>, + table: &'g Table, + next: Option>>, } #[cfg(test)] @@ -189,18 +206,20 @@ mod tests { use super::*; use crate::raw::Table; use crossbeam_epoch::{self as epoch, Atomic, Owned}; - use parking_lot::Mutex; + use lock_api::Mutex; + + type L = parking_lot::RawMutex; #[test] fn iter_new() { let guard = epoch::pin(); - let iter = NodeIter::::new(Shared::null(), &guard); + let iter = NodeIter::::new(Shared::null(), &guard); assert_eq!(iter.count(), 0); } #[test] fn iter_empty() { - let table = Owned::new(Table::::new(16)); + let table = Owned::new(Table::::new(16)); let guard = epoch::pin(); let table = table.into_shared(&guard); let iter = NodeIter::new(table, &guard); @@ -219,7 +238,7 @@ mod tests { key: 0usize, value: Atomic::new(0usize), next: Atomic::null(), - lock: Mutex::new(()), + lock: Mutex::::new(()), })); let table = Owned::new(Table::from(bins)); @@ -254,7 +273,7 @@ mod tests { for bin in &mut bins[8..] { *bin = Atomic::new(BinEntry::Moved(&*deep_table as *const _)); } - let table = Owned::new(Table::::from(bins)); + let table = Owned::new(Table::::from(bins)); let guard = epoch::pin(); let table = table.into_shared(&guard); { diff --git a/src/lib.rs b/src/lib.rs index 8aa7365f..f1683fd8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -193,12 +193,14 @@ //! more efficient operation than if everything had to be atomically reference-counted. //! //! [`crossbeam::epoch`]: https://docs.rs/crossbeam/0.7/crossbeam/epoch/index.html -#![deny( - missing_docs, - missing_debug_implementations, - intra_doc_link_resolution_failure -)] +#![deny(missing_docs, intra_doc_link_resolution_failure)] #![warn(rust_2018_idioms)] +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(feature = "std", deny(missing_debug_implementations))] + +#[cfg(not(feature = "std"))] +#[macro_use] +extern crate alloc; mod map; mod node; @@ -211,5 +213,7 @@ pub use map::HashMap; /// Types needed to safely access shared data concurrently. pub mod epoch { - pub use crossbeam_epoch::{pin, Guard}; + #[cfg(feature = "std")] + pub use crossbeam_epoch::pin; + pub use crossbeam_epoch::Guard; } diff --git a/src/map.rs b/src/map.rs index 6dd0283b..55ea8889 100644 --- a/src/map.rs +++ b/src/map.rs @@ -1,29 +1,25 @@ use crate::iter::*; use crate::node::*; use crate::raw::*; +pub use ahash::RandomState; +use core::borrow::Borrow; +use core::hash::{BuildHasher, Hash, Hasher}; +use core::iter::FromIterator; +use core::sync::atomic::{AtomicIsize, AtomicUsize, Ordering}; use crossbeam_epoch::{self as epoch, Atomic, Guard, Owned, Shared}; -use std::borrow::Borrow; -use std::collections::hash_map::RandomState; +#[cfg(feature = "std")] use std::fmt::{self, Debug, Formatter}; -use std::hash::{BuildHasher, Hash, Hasher}; -use std::iter::FromIterator; -use std::sync::{ - atomic::{AtomicIsize, AtomicUsize, Ordering}, - Once, -}; - -macro_rules! isize_bits { - () => { - std::mem::size_of::() * 8 - }; -} +#[cfg(feature = "std")] +use std::sync::Once; + +const ISIZE_BITS: usize = core::mem::size_of::() * 8; /// The largest possible table capacity. This value must be /// exactly 1<<30 to stay within Java array allocation and indexing /// bounds for power of two table sizes, and is further required /// because the top two bits of 32bit hash fields are used for /// control purposes. -const MAXIMUM_CAPACITY: usize = 1 << 30; // TODO: use isize_bits!() +const MAXIMUM_CAPACITY: usize = 1 << 30; // TODO: use ISIZE_BITS /// The default initial table capacity. Must be a power of 2 /// (i.e., at least 1) and at most `MAXIMUM_CAPACITY`. @@ -38,18 +34,20 @@ const MIN_TRANSFER_STRIDE: isize = 16; /// The number of bits used for generation stamp in `size_ctl`. /// Must be at least 6 for 32bit arrays. -const RESIZE_STAMP_BITS: usize = isize_bits!() / 2; +const RESIZE_STAMP_BITS: usize = ISIZE_BITS / 2; /// The maximum number of threads that can help resize. /// Must fit in `32 - RESIZE_STAMP_BITS` bits for 32 bit architectures /// and `64 - RESIZE_STAMP_BITS` bits for 64 bit architectures -const MAX_RESIZERS: isize = (1 << (isize_bits!() - RESIZE_STAMP_BITS)) - 1; +const MAX_RESIZERS: isize = (1 << (ISIZE_BITS - RESIZE_STAMP_BITS)) - 1; /// The bit shift for recording size stamp in `size_ctl`. -const RESIZE_STAMP_SHIFT: usize = isize_bits!() - RESIZE_STAMP_BITS; +const RESIZE_STAMP_SHIFT: usize = ISIZE_BITS - RESIZE_STAMP_BITS; +#[cfg(feature = "std")] static NCPU_INITIALIZER: Once = Once::new(); -static NCPU: AtomicUsize = AtomicUsize::new(0); +#[cfg(feature = "std")] +static NCPU: AtomicUsize = AtomicUsize::new(1); macro_rules! load_factor { ($n: expr) => { @@ -60,14 +58,25 @@ macro_rules! load_factor { /// A concurrent hash table. /// +/// Note that `ahash::RandomState`, the default value of `S`, is not +/// cryptographically secure. Therefore it is strongly recommended that you do +/// not use this hash for cryptographic purproses. +/// See [`ahash`](https://github.com/tkaitchuck/ahash) for more information. +/// /// See the [crate-level documentation](index.html) for details. -pub struct HashMap { +#[cfg(feature = "std")] +pub struct HashMap +where + L: lock_api::RawMutex, + S: BuildHasher, +{ + // NOTE: if you change any field here, you must _also_ change it in the copy below /// The array of bins. Lazily initialized upon first insertion. /// Size is always a power of two. Accessed directly by iterators. - table: Atomic>, + table: Atomic>, /// The next table to use; non-null only while resizing. - next_table: Atomic>, + next_table: Atomic>, /// The next table index (plus one) to split while resizing. transfer_index: AtomicIsize, @@ -85,37 +94,67 @@ pub struct HashMap { build_hasher: S, } -impl Default for HashMap +/// A concurrent hash table. +/// +/// Note that `ahash::RandomState`, the default value of `S`, is not +/// cryptographically secure. Therefore it is strongly recommended that you do +/// not use this hash for cryptographic purproses. +/// See [`ahash`](https://github.com/tkaitchuck/ahash) for more information. +/// +/// See the [crate-level documentation](index.html) for details. +#[cfg(not(feature = "std"))] +pub struct HashMap +where + L: lock_api::RawMutex, + S: BuildHasher, +{ + // NOTE: this is, and must be, an exact copy of the `HashMap` definition above, with just the + // default type for `L` unset. This is because in no_std environments, there is no sensible + // default lock type for us to use. + table: Atomic>, + next_table: Atomic>, + transfer_index: AtomicIsize, + count: AtomicUsize, + size_ctl: AtomicIsize, + build_hasher: S, +} + +impl Default for HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send, + L: lock_api::RawMutex, + S: BuildHasher + Default, { fn default() -> Self { Self::new() } } -impl HashMap +impl HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send, + L: lock_api::RawMutex, + S: BuildHasher + Default, { /// Creates a new, empty map with the default initial table size (16). pub fn new() -> Self { - Self::with_hasher(RandomState::new()) + Self::with_hasher(S::default()) } /// Creates a new, empty map with an initial table size accommodating the specified number of /// elements without the need to dynamically resize. pub fn with_capacity(n: usize) -> Self { - Self::with_capacity_and_hasher(RandomState::new(), n) + Self::with_capacity_and_hasher(S::default(), n) } } -impl HashMap +impl HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send, + L: lock_api::RawMutex, S: BuildHasher, { /// Creates an empty map which will use `hash_builder` to hash keys. @@ -160,10 +199,11 @@ where } } -impl HashMap +impl HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send, + L: lock_api::RawMutex, S: BuildHasher, { fn hash(&self, key: &Q) -> u64 { @@ -172,20 +212,20 @@ where h.finish() } + #[inline] /// Tests if `key` is a key in this table. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form must match those for the key type. - pub fn contains_key(&self, key: &Q) -> bool + pub fn contains_key(&self, key: &Q, guard: &Guard) -> bool where K: Borrow, Q: ?Sized + Hash + Eq, { - let guard = crossbeam_epoch::pin(); self.get(key, &guard).is_some() } - fn get_node<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<&'g Node> + fn get_node<'g, Q>(&'g self, key: &Q, guard: &'g Guard) -> Option<&'g Node> where K: Borrow, Q: ?Sized + Hash + Eq, @@ -261,19 +301,19 @@ where unsafe { v.as_ref() } } + #[inline] /// Obtains the value to which `key` is mapped and passes it through the closure `then`. /// /// Returns `None` if this map contains no mapping for `key`. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form must match those for the key type. - pub fn get_and(&self, key: &Q, then: F) -> Option + pub fn get_and(&self, key: &Q, then: F, guard: &Guard) -> Option where K: Borrow, Q: ?Sized + Hash + Eq, F: FnOnce(&V) -> R, { - let guard = &crossbeam_epoch::pin(); self.get(key, guard).map(then) } @@ -299,7 +339,7 @@ where unsafe { v.as_ref() }.map(|v| (&node.key, v)) } - fn init_table<'g>(&'g self, guard: &'g Guard) -> Shared<'g, Table> { + fn init_table<'g>(&'g self, guard: &'g Guard) -> Shared<'g, Table> { loop { let table = self.table.load(Ordering::SeqCst, guard); // safety: we loaded the table while epoch was pinned. table won't be deallocated until @@ -310,6 +350,15 @@ where // try to allocate the table let mut sc = self.size_ctl.load(Ordering::SeqCst); if sc < 0 { + #[cfg(not(feature = "std"))] + // for there to be a race, there must be another thread running + // concurrently with us. That thread cannot be blocked on us, + // since we are not in any mutually-exclusive section. So our + // goal is just to not waste cycles and give it some time to + // complete. It is not a requirement that we fully yield. + core::sync::atomic::spin_loop_hint(); + + #[cfg(feature = "std")] // we lost the initialization race; just spin std::thread::yield_now(); continue; @@ -362,7 +411,7 @@ where value: Atomic::new(value), hash: h, next: Atomic::null(), - lock: parking_lot::Mutex::new(()), + lock: lock_api::Mutex::new(()), })); loop { @@ -544,10 +593,10 @@ where fn help_transfer<'g>( &'g self, - table: Shared<'g, Table>, - next_table: *const Table, + table: Shared<'g, Table>, + next_table: *const Table, guard: &'g Guard, - ) -> Shared<'g, Table> { + ) -> Shared<'g, Table> { if table.is_null() || next_table.is_null() { return table; } @@ -583,7 +632,7 @@ where fn add_count(&self, n: isize, resize_hint: Option, guard: &Guard) { // TODO: implement the Java CounterCell business here - use std::cmp; + use core::cmp; let mut count = match n.cmp(&0) { cmp::Ordering::Greater => { let n = n as usize; @@ -660,8 +709,8 @@ where fn transfer<'g>( &'g self, - table: Shared<'g, Table>, - mut next_table: Shared<'g, Table>, + table: Shared<'g, Table>, + mut next_table: Shared<'g, Table>, guard: &'g Guard, ) { // safety: table was read while `guard` was held. the code that drops table only drops it @@ -672,7 +721,7 @@ where let ncpu = num_cpus(); let stride = if ncpu > 1 { (n >> 3) / ncpu } else { n }; - let stride = std::cmp::max(stride as isize, MIN_TRANSFER_STRIDE); + let stride = core::cmp::max(stride as isize, MIN_TRANSFER_STRIDE); if next_table.is_null() { // we are initiating a resize @@ -897,7 +946,7 @@ where *link = Owned::new(BinEntry::Node(Node { hash: node.hash, key: node.key.clone(), - lock: parking_lot::Mutex::new(()), + lock: lock_api::Mutex::new(()), value: node.value.clone(), next: Atomic::from(*link), })) @@ -957,7 +1006,7 @@ where // TODO: find out if this is neccessary let size = size + (size >> 1) + 1; - std::cmp::min(MAXIMUM_CAPACITY, size.next_power_of_two()) + core::cmp::min(MAXIMUM_CAPACITY, size.next_power_of_two()) } as isize; loop { @@ -1060,11 +1109,9 @@ where #[inline] /// Tries to reserve capacity for at least additional more elements. /// The collection may reserve more space to avoid frequent reallocations. - pub fn reserve(&self, additional: usize) { + pub fn reserve(&self, additional: usize, guard: &Guard) { let absolute = self.len() + additional; - - let guard = epoch::pin(); - self.try_presize(absolute, &guard); + self.try_presize(absolute, guard); } /// Removes the key (and its corresponding value) from this map. @@ -1159,7 +1206,7 @@ where // TODO: tree nodes let mut e = bin; - let mut pred: Shared<'_, BinEntry> = Shared::null(); + let mut pred: Shared<'_, BinEntry> = Shared::null(); loop { // safety: either e is bin, in which case it is valid due to the above, // or e was obtained from a next pointer. Any next pointer obtained from @@ -1257,16 +1304,15 @@ where /// If `f` returns `false` for a given key/value pair, but the value for that pair is concurrently /// modified before the removal takes place, the entry will not be removed. /// If you want the removal to happen even in the case of concurrent modification, use [`HashMap::retain_force`]. - pub fn retain(&self, mut f: F) + pub fn retain(&self, mut f: F, guard: &Guard) where F: FnMut(&K, &V) -> bool, { - let guard = epoch::pin(); // removed selected keys for (k, v) in self.iter(&guard) { if !f(k, v) { let old_value: Shared<'_, V> = Shared::from(v as *const V); - self.replace_node(k, None, Some(old_value), &guard); + self.replace_node(k, None, Some(old_value), guard); } } } @@ -1277,15 +1323,14 @@ where /// /// This method always deletes any key/value pair that `f` returns `false` for, /// even if if the value is updated concurrently. If you do not want that behavior, use [`HashMap::retain`]. - pub fn retain_force(&self, mut f: F) + pub fn retain_force(&self, mut f: F, guard: &Guard) where F: FnMut(&K, &V) -> bool, { - let guard = epoch::pin(); // removed selected keys for (k, v) in self.iter(&guard) { if !f(k, v) { - self.replace_node(k, None, None, &guard); + self.replace_node(k, None, None, guard); } } } @@ -1294,7 +1339,7 @@ where /// The iterator element type is `(&'g K, &'g V)`. /// /// To obtain a `Guard`, use [`epoch::pin`]. - pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, K, V> { + pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, K, V, L> { let table = self.table.load(Ordering::SeqCst, guard); let node_iter = NodeIter::new(table, guard); Iter { node_iter, guard } @@ -1304,7 +1349,7 @@ where /// The iterator element type is `&'g K`. /// /// To obtain a `Guard`, use [`epoch::pin`]. - pub fn keys<'g>(&'g self, guard: &'g Guard) -> Keys<'g, K, V> { + pub fn keys<'g>(&'g self, guard: &'g Guard) -> Keys<'g, K, V, L> { let table = self.table.load(Ordering::SeqCst, guard); let node_iter = NodeIter::new(table, guard); Keys { node_iter } @@ -1314,7 +1359,7 @@ where /// The iterator element type is `&'g V`. /// /// To obtain a `Guard`, use [`epoch::pin`]. - pub fn values<'g>(&'g self, guard: &'g Guard) -> Values<'g, K, V> { + pub fn values<'g>(&'g self, guard: &'g Guard) -> Values<'g, K, V, L> { let table = self.table.load(Ordering::SeqCst, guard); let node_iter = NodeIter::new(table, guard); Values { node_iter, guard } @@ -1348,11 +1393,13 @@ where } } -impl PartialEq for HashMap +#[cfg(feature = "std")] +impl PartialEq for HashMap where K: Sync + Send + Clone + Eq + Hash, V: Sync + Send + PartialEq, S: BuildHasher, + L: lock_api::RawMutex, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -1365,19 +1412,23 @@ where } } -impl Eq for HashMap +#[cfg(feature = "std")] +impl Eq for HashMap where K: Sync + Send + Clone + Eq + Hash, V: Sync + Send + Eq, S: BuildHasher, + L: lock_api::RawMutex, { } -impl fmt::Debug for HashMap +#[cfg(feature = "std")] +impl fmt::Debug for HashMap where K: Sync + Send + Clone + Debug + Eq + Hash, V: Sync + Send + Debug, S: BuildHasher, + L: lock_api::RawMutex, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let guard = epoch::pin(); @@ -1385,7 +1436,11 @@ where } } -impl Drop for HashMap { +impl Drop for HashMap +where + L: lock_api::RawMutex, + S: BuildHasher, +{ fn drop(&mut self) { // safety: we have &mut self _and_ all references we have returned are bound to the // lifetime of their borrow of self, so there cannot be any outstanding references to @@ -1409,11 +1464,13 @@ impl Drop for HashMap { } } -impl Extend<(K, V)> for &HashMap +#[cfg(feature = "std")] +impl Extend<(K, V)> for &HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send, S: BuildHasher, + L: lock_api::RawMutex, { #[inline] fn extend>(&mut self, iter: T) { @@ -1429,18 +1486,20 @@ where (iter.size_hint().0 + 1) / 2 }; - self.reserve(reserve); + let guard = epoch::pin(); - let guard = crossbeam_epoch::pin(); + self.reserve(reserve, &guard); (*self).put_all(iter.into_iter(), &guard); } } -impl<'a, K, V, S> Extend<(&'a K, &'a V)> for &HashMap +#[cfg(feature = "std")] +impl<'a, K, V, L, S> Extend<(&'a K, &'a V)> for &HashMap where K: Sync + Send + Copy + Hash + Eq, V: Sync + Send + Copy, S: BuildHasher, + L: lock_api::RawMutex, { #[inline] fn extend>(&mut self, iter: T) { @@ -1448,10 +1507,12 @@ where } } -impl FromIterator<(K, V)> for HashMap +impl FromIterator<(K, V)> for HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send, + S: BuildHasher + Default, + L: lock_api::RawMutex, { fn from_iter>(iter: T) -> Self { let mut iter = iter.into_iter(); @@ -1473,10 +1534,12 @@ where } } -impl<'a, K, V> FromIterator<(&'a K, &'a V)> for HashMap +impl<'a, K, V, L, S> FromIterator<(&'a K, &'a V)> for HashMap where K: Sync + Send + Copy + Hash + Eq, V: Sync + Send + Copy, + S: BuildHasher + Default, + L: lock_api::RawMutex, { #[inline] fn from_iter>(iter: T) -> Self { @@ -1484,10 +1547,12 @@ where } } -impl<'a, K, V> FromIterator<&'a (K, V)> for HashMap +impl<'a, K, V, L, S> FromIterator<&'a (K, V)> for HashMap where K: Sync + Send + Copy + Hash + Eq, V: Sync + Send + Copy, + S: BuildHasher + Default, + L: lock_api::RawMutex, { #[inline] fn from_iter>(iter: T) -> Self { @@ -1495,13 +1560,15 @@ where } } -impl Clone for HashMap +#[cfg(feature = "std")] +impl Clone for HashMap where K: Sync + Send + Clone + Hash + Eq, V: Sync + Send + Clone, S: BuildHasher + Clone, + L: lock_api::RawMutex, { - fn clone(&self) -> HashMap { + fn clone(&self) -> HashMap { let cloned_map = Self::with_capacity_and_hasher(self.build_hasher.clone(), self.len()); { let guard = epoch::pin(); @@ -1513,17 +1580,19 @@ where } } -#[cfg(not(miri))] #[inline] -/// Returns the number of physical CPUs in the machine (_O(1)_). +#[cfg(all(not(miri), feature = "std"))] +/// Returns the number of physical CPUs in the machine. +/// Returns `1` in `no_std` environment. fn num_cpus() -> usize { NCPU_INITIALIZER.call_once(|| NCPU.store(num_cpus::get_physical(), Ordering::Relaxed)); - NCPU.load(Ordering::Relaxed) } -#[cfg(miri)] #[inline] +#[cfg(any(miri, not(feature = "std")))] +/// Returns the number of physical CPUs in the machine. +/// Returns `1` in `no_std` environment. const fn num_cpus() -> usize { 1 } @@ -1549,6 +1618,7 @@ fn capacity() { // The table has been resized once (and it's capacity doubled), // since we inserted more elements than it can hold } + #[cfg(test)] mod tests { use super::*; @@ -1559,7 +1629,7 @@ mod tests { map.insert(42, 0, &guard); - map.reserve(32); + map.reserve(32, &guard); let capacity = map.capacity(&guard); assert!(capacity >= 16 + 32); @@ -1570,7 +1640,7 @@ mod tests { let map = HashMap::::new(); let guard = epoch::pin(); - map.reserve(32); + map.reserve(32, &guard); let capacity = map.capacity(&guard); assert!(capacity >= 32); diff --git a/src/node.rs b/src/node.rs index 5fbb8082..84cacfdb 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,15 +1,18 @@ use crate::raw::Table; +use core::borrow::Borrow; +use core::sync::atomic::Ordering; use crossbeam_epoch::{Atomic, Guard, Shared}; -use parking_lot::Mutex; -use std::borrow::Borrow; -use std::sync::atomic::Ordering; +use lock_api::Mutex; /// Entry in a bin. /// /// Will _generally_ be `Node`. Any entry that is not first in the bin, will be a `Node`. #[derive(Debug)] -pub(crate) enum BinEntry { - Node(Node), +pub(crate) enum BinEntry +where + L: lock_api::RawMutex, +{ + Node(Node), // safety: the pointer t to the next table inside Moved(t) is a valid pointer if the Moved(t) // entry was read after loading `map::HashMap.table` while the guard used to load that table is // still alive: @@ -40,29 +43,36 @@ pub(crate) enum BinEntry { // // Since finishing a resize is the only time a table is `defer_destroy`ed, the above covers // all cases. - Moved(*const Table), + Moved(*const Table), } -unsafe impl Send for BinEntry +unsafe impl Send for BinEntry where K: Send, V: Send, - Node: Send, - Table: Send, + L: Send, + Node: Send, + Table: Send, + L: lock_api::RawMutex, { } -unsafe impl Sync for BinEntry +unsafe impl Sync for BinEntry where K: Sync, V: Sync, - Node: Sync, - Table: Sync, + L: Sync, + Node: Sync, + Table: Sync, + L: lock_api::RawMutex, { } -impl BinEntry { - pub(crate) fn as_node(&self) -> Option<&Node> { +impl BinEntry +where + L: lock_api::RawMutex, +{ + pub(crate) fn as_node(&self) -> Option<&Node> { if let BinEntry::Node(ref n) = *self { Some(n) } else { @@ -71,13 +81,16 @@ impl BinEntry { } } -impl BinEntry { +impl BinEntry +where + L: lock_api::RawMutex, +{ pub(crate) fn find<'g, Q>( &'g self, hash: u64, key: &Q, guard: &'g Guard, - ) -> Shared<'g, BinEntry> + ) -> Shared<'g, BinEntry> where K: Borrow, Q: ?Sized + Eq, @@ -139,10 +152,13 @@ impl BinEntry { /// Key-value entry. #[derive(Debug)] -pub(crate) struct Node { +pub(crate) struct Node +where + L: lock_api::RawMutex, +{ pub(crate) hash: u64, pub(crate) key: K, pub(crate) value: Atomic, - pub(crate) next: Atomic>, - pub(crate) lock: Mutex<()>, + pub(crate) next: Atomic>, + pub(crate) lock: Mutex, } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index ceb1c51a..8e04df49 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -1,22 +1,35 @@ use crate::node::*; +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use core::fmt::Debug; +use core::sync::atomic::Ordering; use crossbeam_epoch::{Atomic, Guard, Owned, Shared}; -use std::fmt::Debug; -use std::sync::atomic::Ordering; #[derive(Debug)] -pub(crate) struct Table { - bins: Box<[Atomic>]>, +pub(crate) struct Table +where + L: lock_api::RawMutex, +{ + bins: Box<[Atomic>]>, } -impl From>>> for Table { - fn from(bins: Vec>>) -> Self { +impl From>>> for Table +where + L: lock_api::RawMutex, +{ + fn from(bins: Vec>>) -> Self { Self { bins: bins.into_boxed_slice(), } } } -impl Table { +impl Table +where + L: lock_api::RawMutex, +{ pub(crate) fn new(bins: usize) -> Self { Self::from(vec![Atomic::null(); bins]) } @@ -35,7 +48,10 @@ impl Table { // anything in the map. let guard = unsafe { crossbeam_epoch::unprotected() }; - for bin in Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice())) { + for bin in Vec::from(core::mem::replace( + &mut self.bins, + vec![].into_boxed_slice(), + )) { if bin.load(Ordering::SeqCst, guard).is_null() { // bin was never used continue; @@ -74,7 +90,10 @@ impl Table { } } -impl Drop for Table { +impl Drop for Table +where + L: lock_api::RawMutex, +{ fn drop(&mut self) { // we need to drop any forwarding nodes (since they are heap allocated). @@ -99,7 +118,10 @@ impl Drop for Table { } } -impl Table { +impl Table +where + L: lock_api::RawMutex, +{ #[inline] pub(crate) fn bini(&self, hash: u64) -> usize { let mask = self.bins.len() as u64 - 1; @@ -107,7 +129,7 @@ impl Table { } #[inline] - pub(crate) fn bin<'g>(&'g self, i: usize, guard: &'g Guard) -> Shared<'g, BinEntry> { + pub(crate) fn bin<'g>(&'g self, i: usize, guard: &'g Guard) -> Shared<'g, BinEntry> { self.bins[i].load(Ordering::Acquire, guard) } @@ -116,18 +138,22 @@ impl Table { pub(crate) fn cas_bin<'g>( &'g self, i: usize, - current: Shared<'_, BinEntry>, - new: Owned>, + current: Shared<'_, BinEntry>, + new: Owned>, guard: &'g Guard, ) -> Result< - Shared<'g, BinEntry>, - crossbeam_epoch::CompareAndSetError<'g, BinEntry, Owned>>, + Shared<'g, BinEntry>, + crossbeam_epoch::CompareAndSetError<'g, BinEntry, Owned>>, > { self.bins[i].compare_and_set(current, new, Ordering::AcqRel, guard) } #[inline] - pub(crate) fn store_bin>>(&self, i: usize, new: P) { + pub(crate) fn store_bin>>( + &self, + i: usize, + new: P, + ) { self.bins[i].store(new, Ordering::Release) } } diff --git a/tests/basic.rs b/tests/basic.rs index b61d6df6..edf7beba 100644 --- a/tests/basic.rs +++ b/tests/basic.rs @@ -106,7 +106,7 @@ impl Hasher for OneBucketHasher { #[test] fn one_bucket() { let guard = epoch::pin(); - let map = HashMap::<&'static str, usize, _>::with_hasher(OneBucketState); + let map = HashMap::<&'static str, usize, parking_lot::RawMutex, _>::with_hasher(OneBucketState); // we want to check that all operations work regardless on whether // we are operating on the head of a bucket, the tail of the bucket, @@ -368,7 +368,7 @@ fn get_and() { let guard = epoch::pin(); map.insert(42, 32, &guard); - assert_eq!(map.get_and(&42, |value| *value + 10), Some(42)); + assert_eq!(map.get_and(&42, |value| *value + 10, &guard), Some(42)); } #[test] @@ -448,47 +448,53 @@ fn from_iter_empty() { #[test] fn retain_empty() { + let guard = epoch::pin(); let map = HashMap::<&'static str, u32>::new(); - map.retain(|_, _| false); + map.retain(|_, _| false, &guard); assert_eq!(map.len(), 0); } #[test] fn retain_all_false() { + let guard = epoch::pin(); let map: HashMap = (0..10 as u32).map(|x| (x, x)).collect(); - map.retain(|_, _| false); + map.retain(|_, _| false, &guard); assert_eq!(map.len(), 0); } #[test] fn retain_all_true() { let size = 10usize; + let guard = epoch::pin(); let map: HashMap = (0..size).map(|x| (x, x)).collect(); - map.retain(|_, _| true); + map.retain(|_, _| true, &guard); assert_eq!(map.len(), size); } #[test] fn retain_some() { + let guard = epoch::pin(); let map: HashMap = (0..10).map(|x| (x, x)).collect(); let expected_map: HashMap = (5..10).map(|x| (x, x)).collect(); - map.retain(|_, v| *v >= 5); + map.retain(|_, v| *v >= 5, &guard); assert_eq!(map.len(), 5); assert_eq!(map, expected_map); } #[test] fn retain_force_empty() { + let guard = epoch::pin(); let map = HashMap::<&'static str, u32>::new(); - map.retain_force(|_, _| false); + map.retain_force(|_, _| false, &guard); assert_eq!(map.len(), 0); } #[test] fn retain_force_some() { + let guard = epoch::pin(); let map: HashMap = (0..10).map(|x| (x, x)).collect(); let expected_map: HashMap = (5..10).map(|x| (x, x)).collect(); - map.retain_force(|_, v| *v >= 5); + map.retain_force(|_, v| *v >= 5, &guard); assert_eq!(map.len(), 5); assert_eq!(map, expected_map); } diff --git a/tests/jdk/map_check.rs b/tests/jdk/map_check.rs index 28548baf..42efaaaf 100644 --- a/tests/jdk/map_check.rs +++ b/tests/jdk/map_check.rs @@ -57,8 +57,9 @@ where K: Sync + Send + Copy + Hash + Eq, { let mut sum = 0; + let guard = epoch::pin(); for i in 0..keys.len() { - if map.contains_key(&keys[i]) { + if map.contains_key(&keys[i], &guard) { sum += 1; } } @@ -86,11 +87,12 @@ where K: Sync + Send + Copy + Hash + Eq, { let mut sum = 0; + let guard = epoch::pin(); for i in 0..k1.len() { - if map.contains_key(&k1[i]) { + if map.contains_key(&k1[i], &guard) { sum += 1; } - if map.contains_key(&k2[i]) { + if map.contains_key(&k2[i], &guard) { sum += 1; } } diff --git a/tests/jsr166.rs b/tests/jsr166.rs index a915558d..a108639f 100644 --- a/tests/jsr166.rs +++ b/tests/jsr166.rs @@ -7,7 +7,7 @@ const ITER: [(usize, &'static str); 5] = [(1, "A"), (2, "B"), (3, "C"), (4, "D") fn test_from_iter() { let guard = unsafe { crossbeam_epoch::unprotected() }; let map1 = from_iter_contron(); - let map2 = HashMap::from_iter(ITER.iter()); + let map2: HashMap<_, _> = HashMap::from_iter(ITER.iter()); // TODO: improve when `Map: Eq` let mut fst: Vec<_> = map1.iter(&guard).collect(); @@ -56,5 +56,5 @@ fn test_remove() { map.remove(&5, &guard); // TODO: add len check once method exists // assert_eq!(map.len(), 4); - assert!(!map.contains_key(&5)); + assert!(!map.contains_key(&5, &guard)); }