Skip to content

Commit be50ac0

Browse files
committed
Prepare library for no_std
This is essentially 7b32a16 rebased on top of #47.
1 parent 5f999e5 commit be50ac0

File tree

8 files changed

+86
-31
lines changed

8 files changed

+86
-31
lines changed

Cargo.toml

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,20 @@ maintenance = { status = "experimental" }
2121

2222
[features]
2323
sanitize = ['crossbeam-epoch/sanitize']
24+
std = ["crossbeam-epoch/std", "num_cpus"]
25+
default = ["std"]
2426

2527
[dependencies]
26-
crossbeam-epoch = "0.9"
2728
parking_lot = "0.10"
28-
num_cpus = "1.12.0"
29+
30+
[dependencies.num_cpus]
31+
version = "1.12.0"
32+
optional = true
33+
34+
[dependencies.crossbeam-epoch]
35+
version = "0.9"
36+
default-features = false
37+
features = ["alloc"]
2938

3039
[dependencies.ahash]
3140
version = "0.3.2"

azure-pipelines.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,17 @@ jobs:
2929
condition: ne(variables.CACHE_RESTORED, 'true')
3030
- script: cargo deny check
3131
displayName: cargo deny
32+
- job: no_std
33+
displayName: "Compile-check on no_std target"
34+
pool:
35+
vmImage: ubuntu-16.04
36+
steps:
37+
- template: install-rust.yml@templates
38+
parameters:
39+
targets:
40+
- thumbv7m-none-eabi
41+
- bash: cargo check --target thumbv7m-none-eabi --no-default-features
42+
displayName: cargo check
3243
- job: miri
3344
displayName: "Run miri on test suite"
3445
dependsOn: deny

src/iter/iter.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
use super::NodeIter;
2+
use core::sync::atomic::Ordering;
23
use crossbeam_epoch::Guard;
3-
use std::sync::atomic::Ordering;
44

55
/// An iterator over a map's entries.
66
///
@@ -61,9 +61,9 @@ impl<'g, K, V> Iterator for Values<'g, K, V> {
6161
#[cfg(test)]
6262
mod tests {
6363
use crate::HashMap;
64+
use core::iter::FromIterator;
6465
use crossbeam_epoch as epoch;
6566
use std::collections::HashSet;
66-
use std::iter::FromIterator;
6767

6868
#[test]
6969
fn iter() {

src/iter/traverser.rs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
1+
#[cfg(not(feature = "std"))]
2+
extern crate alloc;
3+
14
use crate::node::{BinEntry, Node};
25
use crate::raw::Table;
6+
#[cfg(not(feature = "std"))]
7+
use alloc::boxed::Box;
8+
use core::sync::atomic::Ordering;
39
use crossbeam_epoch::{Guard, Shared};
4-
use std::sync::atomic::Ordering;
510

611
#[derive(Debug)]
712
pub(crate) struct NodeIter<'g, K, V> {

src/lib.rs

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -193,13 +193,14 @@
193193
//! more efficient operation than if everything had to be atomically reference-counted.
194194
//!
195195
//! [`crossbeam::epoch`]: https://docs.rs/crossbeam/0.7/crossbeam/epoch/index.html
196-
#![deny(
197-
missing_docs,
198-
missing_debug_implementations,
199-
unreachable_pub,
200-
intra_doc_link_resolution_failure
201-
)]
196+
#![deny(missing_docs, unreachable_pub, intra_doc_link_resolution_failure)]
202197
#![warn(rust_2018_idioms)]
198+
#![cfg_attr(not(feature = "std"), no_std)]
199+
#![cfg_attr(feature = "std", deny(missing_debug_implementations))]
200+
201+
#[cfg(not(feature = "std"))]
202+
#[macro_use]
203+
extern crate alloc;
203204

204205
mod map;
205206
mod node;
@@ -215,5 +216,7 @@ pub type DefaultHashBuilder = ahash::RandomState;
215216

216217
/// Types needed to safely access shared data concurrently.
217218
pub mod epoch {
218-
pub use crossbeam_epoch::{pin, Guard};
219+
#[cfg(feature = "std")]
220+
pub use crossbeam_epoch::pin;
221+
pub use crossbeam_epoch::Guard;
219222
}

src/map.rs

Lines changed: 34 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
use crate::iter::*;
22
use crate::node::*;
33
use crate::raw::*;
4+
use core::borrow::Borrow;
5+
use core::hash::{BuildHasher, Hash, Hasher};
6+
use core::iter::FromIterator;
7+
use core::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
48
use crossbeam_epoch::{self as epoch, Atomic, Guard, Owned, Shared};
5-
use std::borrow::Borrow;
9+
#[cfg(feature = "std")]
610
use std::fmt::{self, Debug, Formatter};
7-
use std::hash::{BuildHasher, Hash, Hasher};
8-
use std::iter::FromIterator;
9-
use std::sync::{
10-
atomic::{AtomicIsize, AtomicUsize, Ordering},
11-
Once,
12-
};
11+
#[cfg(feature = "std")]
12+
use std::sync::Once;
1313

1414
const ISIZE_BITS: usize = core::mem::size_of::<isize>() * 8;
1515

@@ -43,8 +43,10 @@ const MAX_RESIZERS: isize = (1 << (ISIZE_BITS - RESIZE_STAMP_BITS)) - 1;
4343
/// The bit shift for recording size stamp in `size_ctl`.
4444
const RESIZE_STAMP_SHIFT: usize = ISIZE_BITS - RESIZE_STAMP_BITS;
4545

46+
#[cfg(feature = "std")]
4647
static NCPU_INITIALIZER: Once = Once::new();
47-
static NCPU: AtomicUsize = AtomicUsize::new(0);
48+
#[cfg(feature = "std")]
49+
static NCPU: AtomicUsize = AtomicUsize::new(1);
4850

4951
macro_rules! load_factor {
5052
($n: expr) => {
@@ -306,6 +308,15 @@ where
306308
// try to allocate the table
307309
let mut sc = self.size_ctl.load(Ordering::SeqCst);
308310
if sc < 0 {
311+
#[cfg(not(feature = "std"))]
312+
// for there to be a race, there must be another thread running
313+
// concurrently with us. That thread cannot be blocked on us,
314+
// since we are not in any mutually-exclusive section. So our
315+
// goal is just to not waste cycles and give it some time to
316+
// complete. It is not a requirement that we fully yield.
317+
core::sync::atomic::spin_loop_hint();
318+
319+
#[cfg(feature = "std")]
309320
// we lost the initialization race; just spin
310321
std::thread::yield_now();
311322
continue;
@@ -794,7 +805,7 @@ where
794805
fn add_count(&self, n: isize, resize_hint: Option<usize>, guard: &Guard) {
795806
// TODO: implement the Java CounterCell business here
796807

797-
use std::cmp;
808+
use core::cmp;
798809
let mut count = match n.cmp(&0) {
799810
cmp::Ordering::Greater => {
800811
let n = n as usize;
@@ -883,7 +894,7 @@ where
883894
let ncpu = num_cpus();
884895

885896
let stride = if ncpu > 1 { (n >> 3) / ncpu } else { n };
886-
let stride = std::cmp::max(stride as isize, MIN_TRANSFER_STRIDE);
897+
let stride = core::cmp::max(stride as isize, MIN_TRANSFER_STRIDE);
887898

888899
if next_table.is_null() {
889900
// we are initiating a resize
@@ -1168,7 +1179,7 @@ where
11681179
// TODO: find out if this is neccessary
11691180
let size = size + (size >> 1) + 1;
11701181

1171-
std::cmp::min(MAXIMUM_CAPACITY, size.next_power_of_two())
1182+
core::cmp::min(MAXIMUM_CAPACITY, size.next_power_of_two())
11721183
} as isize;
11731184

11741185
loop {
@@ -1555,6 +1566,7 @@ where
15551566
}
15561567
}
15571568

1569+
#[cfg(feature = "std")]
15581570
impl<K, V, S> PartialEq for HashMap<K, V, S>
15591571
where
15601572
K: Sync + Send + Clone + Eq + Hash,
@@ -1572,6 +1584,7 @@ where
15721584
}
15731585
}
15741586

1587+
#[cfg(feature = "std")]
15751588
impl<K, V, S> Eq for HashMap<K, V, S>
15761589
where
15771590
K: Sync + Send + Clone + Eq + Hash,
@@ -1580,6 +1593,7 @@ where
15801593
{
15811594
}
15821595

1596+
#[cfg(feature = "std")]
15831597
impl<K, V, S> fmt::Debug for HashMap<K, V, S>
15841598
where
15851599
K: Sync + Send + Clone + Debug + Eq + Hash,
@@ -1616,6 +1630,7 @@ impl<K, V, S> Drop for HashMap<K, V, S> {
16161630
}
16171631
}
16181632

1633+
#[cfg(feature = "std")]
16191634
impl<K, V, S> Extend<(K, V)> for &HashMap<K, V, S>
16201635
where
16211636
K: Sync + Send + Clone + Hash + Eq,
@@ -1643,6 +1658,7 @@ where
16431658
}
16441659
}
16451660

1661+
#[cfg(feature = "std")]
16461662
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for &HashMap<K, V, S>
16471663
where
16481664
K: Sync + Send + Copy + Hash + Eq,
@@ -1705,6 +1721,7 @@ where
17051721
}
17061722
}
17071723

1724+
#[cfg(feature = "std")]
17081725
impl<K, V, S> Clone for HashMap<K, V, S>
17091726
where
17101727
K: Sync + Send + Clone + Hash + Eq,
@@ -1723,16 +1740,19 @@ where
17231740
}
17241741
}
17251742

1726-
#[cfg(not(miri))]
17271743
#[inline]
1728-
/// Returns the number of physical CPUs in the machine (_O(1)_).
1744+
#[cfg(all(not(miri), feature = "std"))]
1745+
/// Returns the number of physical CPUs in the machine.
1746+
/// Returns `1` in `no_std` environment.
17291747
fn num_cpus() -> usize {
17301748
NCPU_INITIALIZER.call_once(|| NCPU.store(num_cpus::get_physical(), Ordering::Relaxed));
17311749
NCPU.load(Ordering::Relaxed)
17321750
}
17331751

1734-
#[cfg(miri)]
17351752
#[inline]
1753+
#[cfg(any(miri, not(feature = "std")))]
1754+
/// Returns the number of physical CPUs in the machine.
1755+
/// Returns `1` in `no_std` environment.
17361756
const fn num_cpus() -> usize {
17371757
1
17381758
}

src/node.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
use crate::raw::Table;
2+
use core::borrow::Borrow;
3+
use core::sync::atomic::Ordering;
24
use crossbeam_epoch::{Atomic, Guard, Shared};
35
use parking_lot::Mutex;
4-
use std::borrow::Borrow;
5-
use std::sync::atomic::Ordering;
66

77
/// Entry in a bin.
88
///

src/raw/mod.rs

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
use crate::node::*;
2+
#[cfg(not(feature = "std"))]
3+
use alloc::boxed::Box;
4+
#[cfg(not(feature = "std"))]
5+
use alloc::vec::Vec;
6+
use core::fmt::Debug;
7+
use core::sync::atomic::Ordering;
28
use crossbeam_epoch::{Atomic, Guard, Owned, Shared};
3-
use std::fmt::Debug;
4-
use std::sync::atomic::Ordering;
59

610
#[derive(Debug)]
711
pub(crate) struct Table<K, V> {
@@ -35,7 +39,10 @@ impl<K, V> Table<K, V> {
3539
// anything in the map.
3640
let guard = unsafe { crossbeam_epoch::unprotected() };
3741

38-
for bin in Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice())) {
42+
for bin in Vec::from(core::mem::replace(
43+
&mut self.bins,
44+
vec![].into_boxed_slice(),
45+
)) {
3946
if bin.load(Ordering::SeqCst, guard).is_null() {
4047
// bin was never used
4148
continue;

0 commit comments

Comments
 (0)