diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-11-17 19:55:56 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-02-21 12:06:12 -0500 |
commit | 29e37d814188ac8d60f2120583704d3ef6d634b4 (patch) | |
tree | 1619796097a7aead2c9bec3ba10a8b84f2643013 /kernel/rcutree.c | |
parent | ce5df97be530e4746bf9a4ac14589a1cfdfd8efc (diff) |
rcu: Allow nesting of rcu_idle_enter() and rcu_idle_exit()
Use of RCU in the idle loop is incorrect, quite a few instances of
just that have made their way into mainline, primarily event tracing.
The problem with RCU read-side critical sections on CPUs that RCU believes
to be idle is that RCU is completely ignoring the CPU, along with any
attempts and RCU read-side critical sections.
The approaches of eliminating the offending uses and of pushing the
definition of idle down beyond the offending uses have both proved
impractical. The new approach is to encapsulate offending uses of RCU
with rcu_idle_exit() and rcu_idle_enter(), but this requires nesting
for code that is invoked both during idle and and during normal execution.
Therefore, this commit modifies rcu_idle_enter() and rcu_idle_exit() to
permit nesting.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Acked-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index df0e3c1bb68e..92b47760edf3 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -198,7 +198,7 @@ void rcu_note_context_switch(int cpu) | |||
198 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 198 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
199 | 199 | ||
200 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 200 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
201 | .dynticks_nesting = DYNTICK_TASK_NESTING, | 201 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
202 | .dynticks = ATOMIC_INIT(1), | 202 | .dynticks = ATOMIC_INIT(1), |
203 | }; | 203 | }; |
204 | 204 | ||
@@ -394,7 +394,11 @@ void rcu_idle_enter(void) | |||
394 | local_irq_save(flags); | 394 | local_irq_save(flags); |
395 | rdtp = &__get_cpu_var(rcu_dynticks); | 395 | rdtp = &__get_cpu_var(rcu_dynticks); |
396 | oldval = rdtp->dynticks_nesting; | 396 | oldval = rdtp->dynticks_nesting; |
397 | rdtp->dynticks_nesting = 0; | 397 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); |
398 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | ||
399 | rdtp->dynticks_nesting = 0; | ||
400 | else | ||
401 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
398 | rcu_idle_enter_common(rdtp, oldval); | 402 | rcu_idle_enter_common(rdtp, oldval); |
399 | local_irq_restore(flags); | 403 | local_irq_restore(flags); |
400 | } | 404 | } |
@@ -467,7 +471,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
467 | * Exit idle mode, in other words, -enter- the mode in which RCU | 471 | * Exit idle mode, in other words, -enter- the mode in which RCU |
468 | * read-side critical sections can occur. | 472 | * read-side critical sections can occur. |
469 | * | 473 | * |
470 | * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to | 474 | * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to |
471 | * allow for the possibility of usermode upcalls messing up our count | 475 | * allow for the possibility of usermode upcalls messing up our count |
472 | * of interrupt nesting level during the busy period that is just | 476 | * of interrupt nesting level during the busy period that is just |
473 | * now starting. | 477 | * now starting. |
@@ -481,8 +485,11 @@ void rcu_idle_exit(void) | |||
481 | local_irq_save(flags); | 485 | local_irq_save(flags); |
482 | rdtp = &__get_cpu_var(rcu_dynticks); | 486 | rdtp = &__get_cpu_var(rcu_dynticks); |
483 | oldval = rdtp->dynticks_nesting; | 487 | oldval = rdtp->dynticks_nesting; |
484 | WARN_ON_ONCE(oldval != 0); | 488 | WARN_ON_ONCE(oldval < 0); |
485 | rdtp->dynticks_nesting = DYNTICK_TASK_NESTING; | 489 | if (oldval & DYNTICK_TASK_NEST_MASK) |
490 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | ||
491 | else | ||
492 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
486 | rcu_idle_exit_common(rdtp, oldval); | 493 | rcu_idle_exit_common(rdtp, oldval); |
487 | local_irq_restore(flags); | 494 | local_irq_restore(flags); |
488 | } | 495 | } |
@@ -2253,7 +2260,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
2253 | rdp->qlen_lazy = 0; | 2260 | rdp->qlen_lazy = 0; |
2254 | rdp->qlen = 0; | 2261 | rdp->qlen = 0; |
2255 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 2262 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
2256 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING); | 2263 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); |
2257 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); | 2264 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
2258 | rdp->cpu = cpu; | 2265 | rdp->cpu = cpu; |
2259 | rdp->rsp = rsp; | 2266 | rdp->rsp = rsp; |
@@ -2281,7 +2288,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2281 | rdp->qlen_last_fqs_check = 0; | 2288 | rdp->qlen_last_fqs_check = 0; |
2282 | rdp->n_force_qs_snap = rsp->n_force_qs; | 2289 | rdp->n_force_qs_snap = rsp->n_force_qs; |
2283 | rdp->blimit = blimit; | 2290 | rdp->blimit = blimit; |
2284 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING; | 2291 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
2285 | atomic_set(&rdp->dynticks->dynticks, | 2292 | atomic_set(&rdp->dynticks->dynticks, |
2286 | (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); | 2293 | (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); |
2287 | rcu_prepare_for_idle_init(cpu); | 2294 | rcu_prepare_for_idle_init(cpu); |