aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcuclassic.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcuclassic.c')
-rw-r--r--kernel/rcuclassic.c36
1 files changed, 34 insertions, 2 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index adde10388d0c..6f8696c502f4 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -387,6 +387,10 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
387 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); 387 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
388 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); 388 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
389 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); 389 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
390
391 local_irq_disable();
392 this_rdp->qlen += rdp->qlen;
393 local_irq_enable();
390} 394}
391 395
392static void rcu_offline_cpu(int cpu) 396static void rcu_offline_cpu(int cpu)
@@ -516,10 +520,38 @@ void rcu_check_callbacks(int cpu, int user)
516 if (user || 520 if (user ||
517 (idle_cpu(cpu) && !in_softirq() && 521 (idle_cpu(cpu) && !in_softirq() &&
518 hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 522 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
523
524 /*
525 * Get here if this CPU took its interrupt from user
526 * mode or from the idle loop, and if this is not a
527 * nested interrupt. In this case, the CPU is in
528 * a quiescent state, so count it.
529 *
530 * Also do a memory barrier. This is needed to handle
531 * the case where writes from a preempt-disable section
532 * of code get reordered into schedule() by this CPU's
533 * write buffer. The memory barrier makes sure that
534 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
535 * by other CPUs to happen after any such write.
536 */
537
538 smp_mb(); /* See above block comment. */
519 rcu_qsctr_inc(cpu); 539 rcu_qsctr_inc(cpu);
520 rcu_bh_qsctr_inc(cpu); 540 rcu_bh_qsctr_inc(cpu);
521 } else if (!in_softirq()) 541
542 } else if (!in_softirq()) {
543
544 /*
545 * Get here if this CPU did not take its interrupt from
546 * softirq, in other words, if it is not interrupting
547 * a rcu_bh read-side critical section. This is an _bh
548 * critical section, so count it. The memory barrier
549 * is needed for the same reason as is the above one.
550 */
551
552 smp_mb(); /* See above block comment. */
522 rcu_bh_qsctr_inc(cpu); 553 rcu_bh_qsctr_inc(cpu);
554 }
523 raise_rcu_softirq(); 555 raise_rcu_softirq();
524} 556}
525 557
@@ -543,7 +575,7 @@ static void __cpuinit rcu_online_cpu(int cpu)
543 575
544 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); 576 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
545 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); 577 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
546 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL); 578 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
547} 579}
548 580
549static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 581static int __cpuinit rcu_cpu_notify(struct notifier_block *self,