@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
68
68
DEFINE_PER_CPU (struct rcu_data , rcu_preempt_data );
69
69
static struct rcu_state * rcu_state = & rcu_preempt_state ;
70
70
71
+ static void rcu_read_unlock_special (struct task_struct * t );
71
72
static int rcu_preempted_readers_exp (struct rcu_node * rnp );
72
73
73
74
/*
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
147
148
struct rcu_data * rdp ;
148
149
struct rcu_node * rnp ;
149
150
150
- if (t -> rcu_read_lock_nesting &&
151
+ if (t -> rcu_read_lock_nesting > 0 &&
151
152
(t -> rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED ) == 0 ) {
152
153
153
154
/* Possibly blocking in an RCU read-side critical section. */
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
190
191
rnp -> gp_tasks = & t -> rcu_node_entry ;
191
192
}
192
193
raw_spin_unlock_irqrestore (& rnp -> lock , flags );
194
+ } else if (t -> rcu_read_lock_nesting < 0 &&
195
+ t -> rcu_read_unlock_special ) {
196
+
197
+ /*
198
+ * Complete exit from RCU read-side critical section on
199
+ * behalf of preempted instance of __rcu_read_unlock().
200
+ */
201
+ rcu_read_unlock_special (t );
193
202
}
194
203
195
204
/*
@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
284
293
* notify RCU core processing or task having blocked during the RCU
285
294
* read-side critical section.
286
295
*/
287
- static void rcu_read_unlock_special (struct task_struct * t )
296
+ static noinline void rcu_read_unlock_special (struct task_struct * t )
288
297
{
289
298
int empty ;
290
299
int empty_exp ;
@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
309
318
}
310
319
311
320
/* Hardware IRQ handlers cannot block. */
312
- if (in_irq ()) {
321
+ if (in_irq () || in_serving_softirq () ) {
313
322
local_irq_restore (flags );
314
323
return ;
315
324
}
@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
342
351
#ifdef CONFIG_RCU_BOOST
343
352
if (& t -> rcu_node_entry == rnp -> boost_tasks )
344
353
rnp -> boost_tasks = np ;
354
+ /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
355
+ if (t -> rcu_boosted ) {
356
+ special |= RCU_READ_UNLOCK_BOOSTED ;
357
+ t -> rcu_boosted = 0 ;
358
+ }
345
359
#endif /* #ifdef CONFIG_RCU_BOOST */
346
360
t -> rcu_blocked_node = NULL ;
347
361
@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
358
372
#ifdef CONFIG_RCU_BOOST
359
373
/* Unboost if we were boosted. */
360
374
if (special & RCU_READ_UNLOCK_BOOSTED ) {
361
- t -> rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED ;
362
375
rt_mutex_unlock (t -> rcu_boost_mutex );
363
376
t -> rcu_boost_mutex = NULL ;
364
377
}
@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
387
400
struct task_struct * t = current ;
388
401
389
402
barrier (); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
390
- -- t -> rcu_read_lock_nesting ;
391
- barrier (); /* decrement before load of ->rcu_read_unlock_special */
392
- if (t -> rcu_read_lock_nesting == 0 &&
393
- unlikely (ACCESS_ONCE (t -> rcu_read_unlock_special )))
394
- rcu_read_unlock_special (t );
403
+ if (t -> rcu_read_lock_nesting != 1 )
404
+ -- t -> rcu_read_lock_nesting ;
405
+ else {
406
+ t -> rcu_read_lock_nesting = INT_MIN ;
407
+ barrier (); /* assign before ->rcu_read_unlock_special load */
408
+ if (unlikely (ACCESS_ONCE (t -> rcu_read_unlock_special )))
409
+ rcu_read_unlock_special (t );
410
+ barrier (); /* ->rcu_read_unlock_special load before assign */
411
+ t -> rcu_read_lock_nesting = 0 ;
412
+ }
395
413
#ifdef CONFIG_PROVE_LOCKING
396
- WARN_ON_ONCE (ACCESS_ONCE (t -> rcu_read_lock_nesting ) < 0 );
414
+ {
415
+ int rrln = ACCESS_ONCE (t -> rcu_read_lock_nesting );
416
+
417
+ WARN_ON_ONCE (rrln < 0 && rrln > INT_MIN / 2 );
418
+ }
397
419
#endif /* #ifdef CONFIG_PROVE_LOCKING */
398
420
}
399
421
EXPORT_SYMBOL_GPL (__rcu_read_unlock );
@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
589
611
rcu_preempt_qs (cpu );
590
612
return ;
591
613
}
592
- if (per_cpu (rcu_preempt_data , cpu ).qs_pending )
614
+ if (t -> rcu_read_lock_nesting > 0 &&
615
+ per_cpu (rcu_preempt_data , cpu ).qs_pending )
593
616
t -> rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS ;
594
617
}
595
618
@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
695
718
696
719
raw_spin_lock_irqsave (& rnp -> lock , flags );
697
720
for (;;) {
698
- if (!sync_rcu_preempt_exp_done (rnp ))
721
+ if (!sync_rcu_preempt_exp_done (rnp )) {
722
+ raw_spin_unlock_irqrestore (& rnp -> lock , flags );
699
723
break ;
724
+ }
700
725
if (rnp -> parent == NULL ) {
726
+ raw_spin_unlock_irqrestore (& rnp -> lock , flags );
701
727
wake_up (& sync_rcu_preempt_exp_wq );
702
728
break ;
703
729
}
@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
707
733
raw_spin_lock (& rnp -> lock ); /* irqs already disabled */
708
734
rnp -> expmask &= ~mask ;
709
735
}
710
- raw_spin_unlock_irqrestore (& rnp -> lock , flags );
711
736
}
712
737
713
738
/*
@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
1174
1199
t = container_of (tb , struct task_struct , rcu_node_entry );
1175
1200
rt_mutex_init_proxy_locked (& mtx , t );
1176
1201
t -> rcu_boost_mutex = & mtx ;
1177
- t -> rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED ;
1202
+ t -> rcu_boosted = 1 ;
1178
1203
raw_spin_unlock_irqrestore (& rnp -> lock , flags );
1179
1204
rt_mutex_lock (& mtx ); /* Side effect: boosts task t's priority. */
1180
1205
rt_mutex_unlock (& mtx ); /* Keep lockdep happy. */
0 commit comments