diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-08-27 13:51:17 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-08-27 13:51:17 -0400 |
commit | dd7c4d89730a1be2c1d361a8ae1f0fe9465ccf9c (patch) | |
tree | fa4c50a0e70255ab222dcdf00b36d36a9a1fb92d /kernel/rcutiny_plugin.h | |
parent | 2c96c7751d2bb822542b03ddfaca70933f5aaf02 (diff) |
rcu: performance fixes to TINY_PREEMPT_RCU callback checking
This commit tightens up checks in rcu_preempt_check_callbacks() to avoid
unnecessary special handling at rcu_read_unlock() time.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutiny_plugin.h')
-rw-r--r-- | kernel/rcutiny_plugin.h | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index c5bea1137dcb..6ceca4f745ff 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -72,7 +72,7 @@ static void rcu_report_exp_done(void); | |||
72 | /* | 72 | /* |
73 | * Return true if the CPU has not yet responded to the current grace period. | 73 | * Return true if the CPU has not yet responded to the current grace period. |
74 | */ | 74 | */ |
75 | static int rcu_cpu_cur_gp(void) | 75 | static int rcu_cpu_blocking_cur_gp(void) |
76 | { | 76 | { |
77 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; | 77 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; |
78 | } | 78 | } |
@@ -229,7 +229,7 @@ void rcu_preempt_note_context_switch(void) | |||
229 | * cannot end. | 229 | * cannot end. |
230 | */ | 230 | */ |
231 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | 231 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); |
232 | if (rcu_cpu_cur_gp()) | 232 | if (rcu_cpu_blocking_cur_gp()) |
233 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; | 233 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; |
234 | } | 234 | } |
235 | 235 | ||
@@ -368,12 +368,16 @@ static void rcu_preempt_check_callbacks(void) | |||
368 | { | 368 | { |
369 | struct task_struct *t = current; | 369 | struct task_struct *t = current; |
370 | 370 | ||
371 | if (!rcu_preempt_running_reader() && rcu_preempt_gp_in_progress()) | 371 | if (rcu_preempt_gp_in_progress() && |
372 | (!rcu_preempt_running_reader() || | ||
373 | !rcu_cpu_blocking_cur_gp())) | ||
372 | rcu_preempt_cpu_qs(); | 374 | rcu_preempt_cpu_qs(); |
373 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != | 375 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != |
374 | rcu_preempt_ctrlblk.rcb.donetail) | 376 | rcu_preempt_ctrlblk.rcb.donetail) |
375 | raise_softirq(RCU_SOFTIRQ); | 377 | raise_softirq(RCU_SOFTIRQ); |
376 | if (rcu_preempt_gp_in_progress() && rcu_preempt_running_reader()) | 378 | if (rcu_preempt_gp_in_progress() && |
379 | rcu_cpu_blocking_cur_gp() && | ||
380 | rcu_preempt_running_reader()) | ||
377 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 381 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
378 | } | 382 | } |
379 | 383 | ||