aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2008-05-12 15:21:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-05-19 04:01:36 -0400
commit8db559b83009bed92e1b5dd13a651ff273d9ff62 (patch)
treeb25e521434747cc11e958a71ad24c47117db4e08 /kernel
parent4446a36ff8c74ac3b32feb009b651048e129c6af (diff)
rcu: add memory barriers and comments to rcu_check_callbacks()
Add comments to the logic that infers quiescent states when interrupting from either user mode or the idle loop. Also add a memory barrier: it appears that James Huang was in fact onto something, as the scheduler is much less synchronization happy than it once was, so we can no longer rely on its memory barriers in all cases. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reported-by: James Huang <jamesclhuang@yahoo.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcuclassic.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index f4ffbd0f306f..d8348792f9f5 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -502,10 +502,38 @@ void rcu_check_callbacks(int cpu, int user)
502 if (user || 502 if (user ||
503 (idle_cpu(cpu) && !in_softirq() && 503 (idle_cpu(cpu) && !in_softirq() &&
504 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 504 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
505
506 /*
507 * Get here if this CPU took its interrupt from user
508 * mode or from the idle loop, and if this is not a
509 * nested interrupt. In this case, the CPU is in
510 * a quiescent state, so count it.
511 *
512 * Also do a memory barrier. This is needed to handle
513 * the case where writes from a preempt-disable section
514 * of code get reordered into schedule() by this CPU's
515 * write buffer. The memory barrier makes sure that
516 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
517 * by other CPUs to happen after any such write.
518 */
519
520 smp_mb(); /* See above block comment. */
505 rcu_qsctr_inc(cpu); 521 rcu_qsctr_inc(cpu);
506 rcu_bh_qsctr_inc(cpu); 522 rcu_bh_qsctr_inc(cpu);
507 } else if (!in_softirq()) 523
524 } else if (!in_softirq()) {
525
526 /*
527 * Get here if this CPU did not take its interrupt from
528 * softirq, in other words, if it is not interrupting
529 * a rcu_bh read-side critical section. This is an _bh
530 * critical section, so count it. The memory barrier
531 * is needed for the same reason as is the above one.
532 */
533
534 smp_mb(); /* See above block comment. */
508 rcu_bh_qsctr_inc(cpu); 535 rcu_bh_qsctr_inc(cpu);
536 }
509 raise_rcu_softirq(); 537 raise_rcu_softirq();
510} 538}
511 539