diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-01-13 16:57:54 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-03-31 16:34:04 -0400 |
commit | a1e1224849d9610b50fd1dd7d6f44308a59e46af (patch) | |
tree | 2afb8ee96c6471f865484383a2cb1ab8d3b9b2c3 /kernel/rcu | |
parent | 251c617c75f48e03523c43c4ce1dff44bc3ae2bd (diff) |
rcu: Make cond_resched_rcu_qs() supply RCU-sched expedited QS
Although cond_resched_rcu_qs() supplies quiescent states to all flavors
of normal RCU grace periods, it does nothing for expedited RCU-sched
grace periods. This commit therefore adds a check for a need for a
quiescent state from the current CPU by an expedited RCU-sched grace
period, and invokes rcu_sched_qs() to supply that quiescent state if so.
Note that the check is racy in that we might be migrated to some other
CPU just after checking the per-CPU variable. This is OK because the
act of migration will do a context switch, which will supply the needed
quiescent state. The only downside is that we might do an unnecessary
call to rcu_sched_qs(), but the probability is low and the overhead
is small.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 687d8a5f35c7..178575c01d09 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -370,6 +370,21 @@ void rcu_all_qs(void) | |||
370 | rcu_momentary_dyntick_idle(); | 370 | rcu_momentary_dyntick_idle(); |
371 | local_irq_restore(flags); | 371 | local_irq_restore(flags); |
372 | } | 372 | } |
373 | if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) { | ||
374 | /* | ||
375 | * Yes, we just checked a per-CPU variable with preemption | ||
376 | * enabled, so we might be migrated to some other CPU at | ||
377 | * this point. That is OK because in that case, the | ||
378 | * migration will supply the needed quiescent state. | ||
379 | * We might end up needlessly disabling preemption and | ||
380 | * invoking rcu_sched_qs() on the destination CPU, but | ||
381 | * the probability and cost are both quite low, so this | ||
382 | * should not be a problem in practice. | ||
383 | */ | ||
384 | preempt_disable(); | ||
385 | rcu_sched_qs(); | ||
386 | preempt_enable(); | ||
387 | } | ||
373 | this_cpu_inc(rcu_qs_ctr); | 388 | this_cpu_inc(rcu_qs_ctr); |
374 | barrier(); /* Avoid RCU read-side critical sections leaking up. */ | 389 | barrier(); /* Avoid RCU read-side critical sections leaking up. */ |
375 | } | 390 | } |