diff options
Diffstat (limited to 'kernel/rcuclassic.c')
| -rw-r--r-- | kernel/rcuclassic.c | 34 | 
1 files changed, 33 insertions, 1 deletions
| diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 65c0906080ef..16eeeaa9d618 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -387,6 +387,10 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |||
| 387 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | 387 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | 
| 388 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 388 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 
| 389 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | 389 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | 
| 390 | |||
| 391 | local_irq_disable(); | ||
| 392 | this_rdp->qlen += rdp->qlen; | ||
| 393 | local_irq_enable(); | ||
| 390 | } | 394 | } | 
| 391 | 395 | ||
| 392 | static void rcu_offline_cpu(int cpu) | 396 | static void rcu_offline_cpu(int cpu) | 
| @@ -516,10 +520,38 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 516 | if (user || | 520 | if (user || | 
| 517 | (idle_cpu(cpu) && !in_softirq() && | 521 | (idle_cpu(cpu) && !in_softirq() && | 
| 518 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 522 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 
| 523 | |||
| 524 | /* | ||
| 525 | * Get here if this CPU took its interrupt from user | ||
| 526 | * mode or from the idle loop, and if this is not a | ||
| 527 | * nested interrupt. In this case, the CPU is in | ||
| 528 | * a quiescent state, so count it. | ||
| 529 | * | ||
| 530 | * Also do a memory barrier. This is needed to handle | ||
| 531 | * the case where writes from a preempt-disable section | ||
| 532 | * of code get reordered into schedule() by this CPU's | ||
| 533 | * write buffer. The memory barrier makes sure that | ||
| 534 | * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see | ||
| 535 | * by other CPUs to happen after any such write. | ||
| 536 | */ | ||
| 537 | |||
| 538 | smp_mb(); /* See above block comment. */ | ||
| 519 | rcu_qsctr_inc(cpu); | 539 | rcu_qsctr_inc(cpu); | 
| 520 | rcu_bh_qsctr_inc(cpu); | 540 | rcu_bh_qsctr_inc(cpu); | 
| 521 | } else if (!in_softirq()) | 541 | |
| 542 | } else if (!in_softirq()) { | ||
| 543 | |||
| 544 | /* | ||
| 545 | * Get here if this CPU did not take its interrupt from | ||
| 546 | * softirq, in other words, if it is not interrupting | ||
| 547 | * a rcu_bh read-side critical section. This is an _bh | ||
| 548 | * critical section, so count it. The memory barrier | ||
| 549 | * is needed for the same reason as is the above one. | ||
| 550 | */ | ||
| 551 | |||
| 552 | smp_mb(); /* See above block comment. */ | ||
| 522 | rcu_bh_qsctr_inc(cpu); | 553 | rcu_bh_qsctr_inc(cpu); | 
| 554 | } | ||
| 523 | raise_rcu_softirq(); | 555 | raise_rcu_softirq(); | 
| 524 | } | 556 | } | 
| 525 | 557 | ||
