diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-08-13 16:31:47 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:38:41 -0400 |
commit | 9bc8b5586f94be6391458074ecbba8827ba8ba9d (patch) | |
tree | 323804bb7180337c266f88519cea6b1f42114ead /kernel/rcutree_plugin.h | |
parent | 037067a1b6f9a70f862f3ed9d59fe28b7cd55ac4 (diff) |
rcu: Suppress NMI backtraces when stall ends before dump
It is possible for an RCU CPU stall to end just as it is detected, in
which case the current code will uselessly dump all CPU's stacks.
This commit therefore checks for this condition and refrains from
sending needless NMIs.
And yes, the stall might also end just after we checked all CPUs and
tasks, but in that case we would at least have given some clue as
to which CPU/task was at fault.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index eeb38ee8ebba..d3127e8764cb 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -483,16 +483,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |||
483 | * Scan the current list of tasks blocked within RCU read-side critical | 483 | * Scan the current list of tasks blocked within RCU read-side critical |
484 | * sections, printing out the tid of each. | 484 | * sections, printing out the tid of each. |
485 | */ | 485 | */ |
486 | static void rcu_print_task_stall(struct rcu_node *rnp) | 486 | static int rcu_print_task_stall(struct rcu_node *rnp) |
487 | { | 487 | { |
488 | struct task_struct *t; | 488 | struct task_struct *t; |
489 | int ndetected = 0; | ||
489 | 490 | ||
490 | if (!rcu_preempt_blocked_readers_cgp(rnp)) | 491 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
491 | return; | 492 | return 0; |
492 | t = list_entry(rnp->gp_tasks, | 493 | t = list_entry(rnp->gp_tasks, |
493 | struct task_struct, rcu_node_entry); | 494 | struct task_struct, rcu_node_entry); |
494 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | 495 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
495 | printk(" P%d", t->pid); | 496 | printk(" P%d", t->pid); |
497 | ndetected++; | ||
498 | } | ||
499 | return ndetected; | ||
496 | } | 500 | } |
497 | 501 | ||
498 | /* | 502 | /* |
@@ -976,8 +980,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |||
976 | * Because preemptible RCU does not exist, we never have to check for | 980 | * Because preemptible RCU does not exist, we never have to check for |
977 | * tasks blocked within RCU read-side critical sections. | 981 | * tasks blocked within RCU read-side critical sections. |
978 | */ | 982 | */ |
979 | static void rcu_print_task_stall(struct rcu_node *rnp) | 983 | static int rcu_print_task_stall(struct rcu_node *rnp) |
980 | { | 984 | { |
985 | return 0; | ||
981 | } | 986 | } |
982 | 987 | ||
983 | /* | 988 | /* |