diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2011-11-28 19:18:56 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 13:32:07 -0500 |
commit | b6fc6020140db437069d5bec447858fcfd64d31c (patch) | |
tree | 43b4f6aa420bd636a30116443f2417cd2176a679 /kernel | |
parent | 7cb92499000e3c86dae653077b1465458a039ef6 (diff) |
rcu: Don't check irq nesting from rcu idle entry/exit
Because tasks do not nest, rcu_idle_enter() and rcu_idle_exit() do
not need to check for nesting. This commit therefore moves nesting
checks from rcu_idle_enter_common() to rcu_irq_exit() and from
rcu_idle_exit_common() to rcu_irq_enter().
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index bf085d7f6a3f..860c02c7c959 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -350,10 +350,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
350 | */ | 350 | */ |
351 | static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | 351 | static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) |
352 | { | 352 | { |
353 | if (rdtp->dynticks_nesting) { | ||
354 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); | ||
355 | return; | ||
356 | } | ||
357 | trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); | 353 | trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); |
358 | if (!is_idle_task(current)) { | 354 | if (!is_idle_task(current)) { |
359 | struct task_struct *idle = idle_task(smp_processor_id()); | 355 | struct task_struct *idle = idle_task(smp_processor_id()); |
@@ -426,7 +422,10 @@ void rcu_irq_exit(void) | |||
426 | oldval = rdtp->dynticks_nesting; | 422 | oldval = rdtp->dynticks_nesting; |
427 | rdtp->dynticks_nesting--; | 423 | rdtp->dynticks_nesting--; |
428 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); | 424 | WARN_ON_ONCE(rdtp->dynticks_nesting < 0); |
429 | rcu_idle_enter_common(rdtp, oldval); | 425 | if (rdtp->dynticks_nesting) |
426 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); | ||
427 | else | ||
428 | rcu_idle_enter_common(rdtp, oldval); | ||
430 | local_irq_restore(flags); | 429 | local_irq_restore(flags); |
431 | } | 430 | } |
432 | 431 | ||
@@ -439,10 +438,6 @@ void rcu_irq_exit(void) | |||
439 | */ | 438 | */ |
440 | static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | 439 | static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) |
441 | { | 440 | { |
442 | if (oldval) { | ||
443 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); | ||
444 | return; | ||
445 | } | ||
446 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ | 441 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ |
447 | atomic_inc(&rdtp->dynticks); | 442 | atomic_inc(&rdtp->dynticks); |
448 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 443 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
@@ -518,7 +513,10 @@ void rcu_irq_enter(void) | |||
518 | oldval = rdtp->dynticks_nesting; | 513 | oldval = rdtp->dynticks_nesting; |
519 | rdtp->dynticks_nesting++; | 514 | rdtp->dynticks_nesting++; |
520 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); | 515 | WARN_ON_ONCE(rdtp->dynticks_nesting == 0); |
521 | rcu_idle_exit_common(rdtp, oldval); | 516 | if (oldval) |
517 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); | ||
518 | else | ||
519 | rcu_idle_exit_common(rdtp, oldval); | ||
522 | local_irq_restore(flags); | 520 | local_irq_restore(flags); |
523 | } | 521 | } |
524 | 522 | ||