@@ -199,20 +199,6 @@ impl Node {
199
199
}
200
200
}
201
201
202
- /// Set the `next` field depending on the lock state. If there are threads
203
- /// queued, the `next` field will be set to a pointer to the next node in
204
- /// the queue. Otherwise the `next` field will be set to the lock count if
205
- /// the state is read-locked or to zero if it is write-locked.
206
- fn set_state ( & mut self , state : State ) {
207
- self . next . 0 = AtomicPtr :: new ( state. mask ( MASK ) . cast ( ) ) ;
208
- }
209
-
210
- /// Assuming the node contains a reader lock count, decrement that count.
211
- /// Returns `true` if this thread was the last lock owner.
212
- fn decrement_count ( & self ) -> bool {
213
- self . next . 0 . fetch_byte_sub ( SINGLE , AcqRel ) . addr ( ) - SINGLE == 0
214
- }
215
-
216
202
/// Prepare this node for waiting.
217
203
fn prepare ( & mut self ) {
218
204
// Fall back to creating an unnamed `Thread` handle to allow locking in
@@ -312,10 +298,11 @@ impl RwLock {
312
298
313
299
#[ inline]
314
300
pub fn try_write ( & self ) -> bool {
315
- // This is lowered to a single atomic instruction on most modern processors
316
- // (e.g. "lock bts" on x86 and "ldseta" on modern AArch64), and therefore
317
- // is more efficient than `fetch_update(lock(true))`, which can spuriously
318
- // fail if a new node is appended to the queue.
301
+ // Atomically set the `LOCKED` bit. This is lowered to a single atomic
302
+ // instruction on most modern processors (e.g. "lock bts" on x86 and
303
+ // "ldseta" on modern AArch64), and therefore is more efficient than
304
+ // `fetch_update(lock(true))`, which can spuriously fail if a new node
305
+ // is appended to the queue.
319
306
self . state . fetch_or ( LOCKED , Acquire ) . addr ( ) & LOCKED == 0
320
307
}
321
308
@@ -351,7 +338,12 @@ impl RwLock {
351
338
} else {
352
339
// Fall back to parking. First, prepare the node.
353
340
node. prepare ( ) ;
354
- node. set_state ( state) ;
341
+
342
+ // If there are threads queued, set the `next` field to a
343
+ // pointer to the next node in the queue. Otherwise set it to
344
+ // the lock count if the state is read-locked or to zero if it
345
+ // is write-locked.
346
+ node. next . 0 = AtomicPtr :: new ( state. mask ( MASK ) . cast ( ) ) ;
355
347
node. prev = AtomicLink :: new ( None ) ;
356
348
let mut next = ptr:: from_ref ( & node)
357
349
. map_addr ( |addr| addr | QUEUED | ( state. addr ( ) & LOCKED ) )
@@ -370,8 +362,8 @@ impl RwLock {
370
362
next = next. map_addr ( |addr| addr | QUEUE_LOCKED ) ;
371
363
}
372
364
373
- // Use release ordering to propagate our changes to the waking
374
- // thread.
365
+ // Register the node, using release ordering to propagate our
366
+ // changes to the waking thread.
375
367
if let Err ( new) = self . state . compare_exchange_weak ( state, next, AcqRel , Relaxed ) {
376
368
// The state has changed, just try again.
377
369
state = new;
@@ -430,8 +422,16 @@ impl RwLock {
430
422
// The state was observed with acquire ordering above, so the current
431
423
// thread will observe all node initializations.
432
424
433
- let tail = unsafe { find_tail ( to_node ( state) ) } ;
434
- let was_last = unsafe { tail. as_ref ( ) . decrement_count ( ) } ;
425
+ // SAFETY:
426
+ // Because new read-locks cannot be acquired while threads are queued,
427
+ // all queue-lock owners will observe the set `LOCKED` bit. Because they
428
+ // do not modify the queue while there is a lock owner, the queue will
429
+ // not be removed from here.
430
+ let tail = unsafe { find_tail ( to_node ( state) ) . as_ref ( ) } ;
431
+ // The lock count is stored in the `next` field of `tail`.
432
+ // Decrement it, making sure to observe all changes made to the queue
433
+ // by the other lock owners by using acquire-release ordering.
434
+ let was_last = tail. next . 0 . fetch_byte_sub ( SINGLE , AcqRel ) . addr ( ) - SINGLE == 0 ;
435
435
if was_last {
436
436
// SAFETY:
437
437
// Other threads cannot read-lock while threads are queued. Also,
0 commit comments