Skip to content

Commit f746ea2

Browse files
authored
Unrolled build for rust-lang#137922
Rollup merge of rust-lang#137922 - Zalathar:sharded, r=SparrowLii A few cleanups after the removal of `cfg(not(parallel))` I noticed a few small things that are no longer needed after the removal of `cfg(not(parallel))` in rust-lang#132282. One of the later changes adjusts several imports, so viewing the changes individually is recommended. r? SparrowLii (or reroll)
2 parents 2010bba + cfa27fb commit f746ea2

File tree

9 files changed

+61
-74
lines changed

9 files changed

+61
-74
lines changed

compiler/rustc_data_structures/src/sharded.rs

+8-8
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,10 @@ impl<T> Sharded<T> {
4343

4444
/// The shard is selected by hashing `val` with `FxHasher`.
4545
#[inline]
46-
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
46+
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
4747
match self {
4848
Self::Single(single) => single,
49-
Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)),
49+
Self::Shards(..) => self.get_shard_by_hash(make_hash(val)),
5050
}
5151
}
5252

@@ -56,20 +56,20 @@ impl<T> Sharded<T> {
5656
}
5757

5858
#[inline]
59-
pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> {
59+
pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
6060
match self {
6161
Self::Single(single) => single,
6262
Self::Shards(shards) => {
6363
// SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds.
64-
unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 }
64+
unsafe { &shards.get_unchecked(i & (SHARDS - 1)).0 }
6565
}
6666
}
6767
}
6868

6969
/// The shard is selected by hashing `val` with `FxHasher`.
7070
#[inline]
7171
#[track_caller]
72-
pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> {
72+
pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> LockGuard<'_, T> {
7373
match self {
7474
Self::Single(single) => {
7575
// Synchronization is disabled so use the `lock_assume_no_sync` method optimized
@@ -79,7 +79,7 @@ impl<T> Sharded<T> {
7979
// `might_be_dyn_thread_safe` was also false.
8080
unsafe { single.lock_assume(Mode::NoSync) }
8181
}
82-
Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
82+
Self::Shards(..) => self.lock_shard_by_hash(make_hash(val)),
8383
}
8484
}
8585

@@ -91,7 +91,7 @@ impl<T> Sharded<T> {
9191

9292
#[inline]
9393
#[track_caller]
94-
pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> {
94+
pub fn lock_shard_by_index(&self, i: usize) -> LockGuard<'_, T> {
9595
match self {
9696
Self::Single(single) => {
9797
// Synchronization is disabled so use the `lock_assume_no_sync` method optimized
@@ -109,7 +109,7 @@ impl<T> Sharded<T> {
109109
// always inbounds.
110110
// SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating
111111
// the lock thus `might_be_dyn_thread_safe` was also true.
112-
unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) }
112+
unsafe { shards.get_unchecked(i & (SHARDS - 1)).0.lock_assume(Mode::Sync) }
113113
}
114114
}
115115
}

compiler/rustc_data_structures/src/sync.rs

+30-37
Original file line numberDiff line numberDiff line change
@@ -18,42 +18,54 @@
1818
//!
1919
//! | Type | Serial version | Parallel version |
2020
//! | ----------------------- | ------------------- | ------------------------------- |
21-
//! | `LRef<'a, T>` [^2] | `&'a mut T` | `&'a T` |
22-
//! | | | |
2321
//! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or |
2422
//! | | | `parking_lot::Mutex<T>` |
2523
//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
2624
//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
27-
//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
2825
//! | | | |
2926
//! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` |
3027
//!
3128
//! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost
3229
//! of a `RefCell`. This is appropriate when interior mutability is not
3330
//! required.
34-
//!
35-
//! [^2]: `MTRef`, `MTLockRef` are type aliases.
3631
3732
use std::collections::HashMap;
3833
use std::hash::{BuildHasher, Hash};
3934

40-
pub use crate::marker::*;
35+
pub use parking_lot::{
36+
MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard,
37+
RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard,
38+
};
4139

42-
mod lock;
40+
pub use self::atomic::AtomicU64;
41+
pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
4342
#[doc(no_inline)]
44-
pub use lock::{Lock, LockGuard, Mode};
45-
46-
mod worker_local;
47-
pub use worker_local::{Registry, WorkerLocal};
43+
pub use self::lock::{Lock, LockGuard, Mode};
44+
pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
45+
pub use self::parallel::{
46+
join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in,
47+
};
48+
pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec};
49+
pub use self::worker_local::{Registry, WorkerLocal};
50+
pub use crate::marker::*;
4851

52+
mod freeze;
53+
mod lock;
4954
mod parallel;
50-
pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in};
51-
pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
52-
5355
mod vec;
56+
mod worker_local;
5457

55-
mod freeze;
56-
pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
58+
/// Keep the conditional imports together in a submodule, so that import-sorting
59+
/// doesn't split them up.
60+
mod atomic {
61+
// Most hosts can just use a regular AtomicU64.
62+
#[cfg(target_has_atomic = "64")]
63+
pub use std::sync::atomic::AtomicU64;
64+
65+
// Some 32-bit hosts don't have AtomicU64, so use a fallback.
66+
#[cfg(not(target_has_atomic = "64"))]
67+
pub use portable_atomic::AtomicU64;
68+
}
5769

5870
mod mode {
5971
use std::sync::atomic::{AtomicU8, Ordering};
@@ -97,21 +109,6 @@ mod mode {
97109

98110
// FIXME(parallel_compiler): Get rid of these aliases across the compiler.
99111

100-
pub use std::sync::OnceLock;
101-
// Use portable AtomicU64 for targets without native 64-bit atomics
102-
#[cfg(target_has_atomic = "64")]
103-
pub use std::sync::atomic::AtomicU64;
104-
105-
pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
106-
pub use parking_lot::{
107-
MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard,
108-
RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard,
109-
};
110-
#[cfg(not(target_has_atomic = "64"))]
111-
pub use portable_atomic::AtomicU64;
112-
113-
pub type LRef<'a, T> = &'a T;
114-
115112
#[derive(Debug, Default)]
116113
pub struct MTLock<T>(Lock<T>);
117114

@@ -142,14 +139,10 @@ impl<T> MTLock<T> {
142139
}
143140
}
144141

145-
use parking_lot::RwLock as InnerRwLock;
146-
147142
/// This makes locks panic if they are already held.
148143
/// It is only useful when you are running in a single thread
149144
const ERROR_CHECKING: bool = false;
150145

151-
pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>;
152-
153146
#[derive(Default)]
154147
#[repr(align(64))]
155148
pub struct CacheAligned<T>(pub T);
@@ -167,12 +160,12 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S>
167160
}
168161

169162
#[derive(Debug, Default)]
170-
pub struct RwLock<T>(InnerRwLock<T>);
163+
pub struct RwLock<T>(parking_lot::RwLock<T>);
171164

172165
impl<T> RwLock<T> {
173166
#[inline(always)]
174167
pub fn new(inner: T) -> Self {
175-
RwLock(InnerRwLock::new(inner))
168+
RwLock(parking_lot::RwLock::new(inner))
176169
}
177170

178171
#[inline(always)]

compiler/rustc_interface/src/passes.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@ use std::any::Any;
22
use std::ffi::OsString;
33
use std::io::{self, BufWriter, Write};
44
use std::path::{Path, PathBuf};
5-
use std::sync::{Arc, LazyLock};
5+
use std::sync::{Arc, LazyLock, OnceLock};
66
use std::{env, fs, iter};
77

88
use rustc_ast as ast;
99
use rustc_codegen_ssa::traits::CodegenBackend;
1010
use rustc_data_structures::parallel;
1111
use rustc_data_structures::steal::Steal;
12-
use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, OnceLock, WorkerLocal};
12+
use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, WorkerLocal};
1313
use rustc_expand::base::{ExtCtxt, LintStoreExpand};
1414
use rustc_feature::Features;
1515
use rustc_fs_util::try_canonicalize;

compiler/rustc_metadata/src/rmeta/decoder.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
use std::iter::TrustedLen;
44
use std::path::Path;
5-
use std::sync::Arc;
5+
use std::sync::{Arc, OnceLock};
66
use std::{io, iter, mem};
77

88
pub(super) use cstore_impl::provide;
@@ -11,7 +11,7 @@ use rustc_ast as ast;
1111
use rustc_data_structures::fingerprint::Fingerprint;
1212
use rustc_data_structures::fx::FxIndexMap;
1313
use rustc_data_structures::owned_slice::OwnedSlice;
14-
use rustc_data_structures::sync::{Lock, OnceLock};
14+
use rustc_data_structures::sync::Lock;
1515
use rustc_data_structures::unhash::UnhashMap;
1616
use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
1717
use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, DeriveProcMacro};

compiler/rustc_middle/src/mir/basic_blocks.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1+
use std::sync::OnceLock;
2+
13
use rustc_data_structures::fx::FxHashMap;
24
use rustc_data_structures::graph;
35
use rustc_data_structures::graph::dominators::{Dominators, dominators};
46
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
5-
use rustc_data_structures::sync::OnceLock;
67
use rustc_index::{IndexSlice, IndexVec};
78
use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable};
89
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};

compiler/rustc_monomorphize/src/collector.rs

+15-19
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ use std::path::PathBuf;
209209

210210
use rustc_attr_parsing::InlineAttr;
211211
use rustc_data_structures::fx::FxIndexMap;
212-
use rustc_data_structures::sync::{LRef, MTLock, par_for_each_in};
212+
use rustc_data_structures::sync::{MTLock, par_for_each_in};
213213
use rustc_data_structures::unord::{UnordMap, UnordSet};
214214
use rustc_hir as hir;
215215
use rustc_hir::def::DefKind;
@@ -357,7 +357,7 @@ impl<'tcx> Extend<Spanned<MonoItem<'tcx>>> for MonoItems<'tcx> {
357357
fn collect_items_rec<'tcx>(
358358
tcx: TyCtxt<'tcx>,
359359
starting_item: Spanned<MonoItem<'tcx>>,
360-
state: LRef<'_, SharedState<'tcx>>,
360+
state: &SharedState<'tcx>,
361361
recursion_depths: &mut DefIdMap<usize>,
362362
recursion_limit: Limit,
363363
mode: CollectionMode,
@@ -1671,30 +1671,26 @@ pub(crate) fn collect_crate_mono_items<'tcx>(
16711671

16721672
debug!("building mono item graph, beginning at roots");
16731673

1674-
let mut state = SharedState {
1674+
let state = SharedState {
16751675
visited: MTLock::new(UnordSet::default()),
16761676
mentioned: MTLock::new(UnordSet::default()),
16771677
usage_map: MTLock::new(UsageMap::new()),
16781678
};
16791679
let recursion_limit = tcx.recursion_limit();
16801680

1681-
{
1682-
let state: LRef<'_, _> = &mut state;
1683-
1684-
tcx.sess.time("monomorphization_collector_graph_walk", || {
1685-
par_for_each_in(roots, |root| {
1686-
let mut recursion_depths = DefIdMap::default();
1687-
collect_items_rec(
1688-
tcx,
1689-
dummy_spanned(root),
1690-
state,
1691-
&mut recursion_depths,
1692-
recursion_limit,
1693-
CollectionMode::UsedItems,
1694-
);
1695-
});
1681+
tcx.sess.time("monomorphization_collector_graph_walk", || {
1682+
par_for_each_in(roots, |root| {
1683+
let mut recursion_depths = DefIdMap::default();
1684+
collect_items_rec(
1685+
tcx,
1686+
dummy_spanned(root),
1687+
&state,
1688+
&mut recursion_depths,
1689+
recursion_limit,
1690+
CollectionMode::UsedItems,
1691+
);
16961692
});
1697-
}
1693+
});
16981694

16991695
// The set of MonoItems was created in an inherently indeterministic order because
17001696
// of parallelism. We sort it here to ensure that the output is deterministic.

compiler/rustc_query_system/src/query/caches.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
use std::fmt::Debug;
22
use std::hash::Hash;
3+
use std::sync::OnceLock;
34

45
use rustc_data_structures::fx::FxHashMap;
56
use rustc_data_structures::sharded::{self, Sharded};
6-
use rustc_data_structures::sync::OnceLock;
77
pub use rustc_data_structures::vec_cache::VecCache;
88
use rustc_hir::def_id::LOCAL_CRATE;
99
use rustc_index::Idx;

src/doc/rustc-dev-guide/src/parallel-rustc.md

-2
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,8 @@ are implemented differently depending on whether `parallel-compiler` is true.
4646

4747
| data structure | parallel | non-parallel |
4848
| -------------------------------- | --------------------------------------------------- | ------------ |
49-
| OnceCell | std::sync::OnceLock | std::cell::OnceCell |
5049
| Lock\<T> | (parking_lot::Mutex\<T>) | (std::cell::RefCell) |
5150
| RwLock\<T> | (parking_lot::RwLock\<T>) | (std::cell::RefCell) |
52-
| MTRef<'a, T> | &'a T | &'a mut T |
5351
| MTLock\<T> | (Lock\<T>) | (T) |
5452
| ReadGuard | parking_lot::RwLockReadGuard | std::cell::Ref |
5553
| MappedReadGuard | parking_lot::MappedRwLockReadGuard | std::cell::Ref |

src/tools/clippy/clippy_utils/src/macros.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
11
#![allow(clippy::similar_names)] // `expr` and `expn`
22

3-
use std::sync::Arc;
3+
use std::sync::{Arc, OnceLock};
44

55
use crate::get_unique_attr;
66
use crate::visitors::{Descend, for_each_expr_without_closures};
77

88
use arrayvec::ArrayVec;
99
use rustc_ast::{FormatArgs, FormatArgument, FormatPlaceholder};
1010
use rustc_data_structures::fx::FxHashMap;
11-
use rustc_data_structures::sync::OnceLock;
1211
use rustc_hir::{self as hir, Expr, ExprKind, HirId, Node, QPath};
1312
use rustc_lint::{LateContext, LintContext};
1413
use rustc_span::def_id::DefId;

0 commit comments

Comments
 (0)