Skip to content

Commit e453ec4

Browse files
committed
inline some single-use functions, add documentation
1 parent ac19d3a commit e453ec4

File tree

1 file changed

+23
-23
lines changed

1 file changed

+23
-23
lines changed

library/std/src/sys/unix/locks/queue_rwlock.rs

+23-23
Original file line numberDiff line numberDiff line change
@@ -199,20 +199,6 @@ impl Node {
199199
}
200200
}
201201

202-
/// Set the `next` field depending on the lock state. If there are threads
203-
/// queued, the `next` field will be set to a pointer to the next node in
204-
/// the queue. Otherwise the `next` field will be set to the lock count if
205-
/// the state is read-locked or to zero if it is write-locked.
206-
fn set_state(&mut self, state: State) {
207-
self.next.0 = AtomicPtr::new(state.mask(MASK).cast());
208-
}
209-
210-
/// Assuming the node contains a reader lock count, decrement that count.
211-
/// Returns `true` if this thread was the last lock owner.
212-
fn decrement_count(&self) -> bool {
213-
self.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0
214-
}
215-
216202
/// Prepare this node for waiting.
217203
fn prepare(&mut self) {
218204
// Fall back to creating an unnamed `Thread` handle to allow locking in
@@ -312,10 +298,11 @@ impl RwLock {
312298

313299
#[inline]
314300
pub fn try_write(&self) -> bool {
315-
// This is lowered to a single atomic instruction on most modern processors
316-
// (e.g. "lock bts" on x86 and "ldseta" on modern AArch64), and therefore
317-
// is more efficient than `fetch_update(lock(true))`, which can spuriously
318-
// fail if a new node is appended to the queue.
301+
// Atomically set the `LOCKED` bit. This is lowered to a single atomic
302+
// instruction on most modern processors (e.g. "lock bts" on x86 and
303+
// "ldseta" on modern AArch64), and therefore is more efficient than
304+
// `fetch_update(lock(true))`, which can spuriously fail if a new node
305+
// is appended to the queue.
319306
self.state.fetch_or(LOCKED, Acquire).addr() & LOCKED == 0
320307
}
321308

@@ -351,7 +338,12 @@ impl RwLock {
351338
} else {
352339
// Fall back to parking. First, prepare the node.
353340
node.prepare();
354-
node.set_state(state);
341+
342+
// If there are threads queued, set the `next` field to a
343+
// pointer to the next node in the queue. Otherwise set it to
344+
// the lock count if the state is read-locked or to zero if it
345+
// is write-locked.
346+
node.next.0 = AtomicPtr::new(state.mask(MASK).cast());
355347
node.prev = AtomicLink::new(None);
356348
let mut next = ptr::from_ref(&node)
357349
.map_addr(|addr| addr | QUEUED | (state.addr() & LOCKED))
@@ -370,8 +362,8 @@ impl RwLock {
370362
next = next.map_addr(|addr| addr | QUEUE_LOCKED);
371363
}
372364

373-
// Use release ordering to propagate our changes to the waking
374-
// thread.
365+
// Register the node, using release ordering to propagate our
366+
// changes to the waking thread.
375367
if let Err(new) = self.state.compare_exchange_weak(state, next, AcqRel, Relaxed) {
376368
// The state has changed, just try again.
377369
state = new;
@@ -430,8 +422,16 @@ impl RwLock {
430422
// The state was observed with acquire ordering above, so the current
431423
// thread will observe all node initializations.
432424

433-
let tail = unsafe { find_tail(to_node(state)) };
434-
let was_last = unsafe { tail.as_ref().decrement_count() };
425+
// SAFETY:
426+
// Because new read-locks cannot be acquired while threads are queued,
427+
// all queue-lock owners will observe the set `LOCKED` bit. Because they
428+
// do not modify the queue while there is a lock owner, the queue will
429+
// not be removed from here.
430+
let tail = unsafe { find_tail(to_node(state)).as_ref() };
431+
// The lock count is stored in the `next` field of `tail`.
432+
// Decrement it, making sure to observe all changes made to the queue
433+
// by the other lock owners by using acquire-release ordering.
434+
let was_last = tail.next.0.fetch_byte_sub(SINGLE, AcqRel).addr() - SINGLE == 0;
435435
if was_last {
436436
// SAFETY:
437437
// Other threads cannot read-lock while threads are queued. Also,

0 commit comments

Comments
 (0)