diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-11-29 08:49:06 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-01-23 14:37:12 -0500 |
commit | 7aa92230c9e86b2150f718185f70e0af592e290b (patch) | |
tree | a0402e24bc7048f590a7bffce9953845dfdc5fb9 | |
parent | b201fa67371862229f27a1f022196423aa5c7381 (diff) |
rcu: Once again use NMI-based stack traces in stall warnings
This commit is for all intents and purposes a revert of bc1dce514e9b
("rcu: Don't use NMIs to dump other CPUs' stacks"). The reason to suppose
that this can now safely be reverted is the presence of 42a0bb3f7138
("printk/nmi: generic solution for safe printk in NMI"), which is said
to have made NMI-based stack dumps safe.
However, this reversion keeps one nice property of bc1dce514e9b
("rcu: Don't use NMIs to dump other CPUs' stacks"), namely that
only those CPUs blocking the grace period are dumped. The new
trigger_single_cpu_backtrace() is used to make this happen, as
suggested by Josh Poimboeuf.
Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r-- | kernel/rcu/tree.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5a4aaad75e76..d7b63b88434b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1278,7 +1278,10 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) | |||
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* | 1280 | /* |
1281 | * Dump stacks of all tasks running on stalled CPUs. | 1281 | * Dump stacks of all tasks running on stalled CPUs. First try using |
1282 | * NMIs, but fall back to manual remote stack tracing on architectures | ||
1283 | * that don't support NMI-based stack dumps. The NMI-triggered stack | ||
1284 | * traces are more accurate because they are printed by the target CPU. | ||
1282 | */ | 1285 | */ |
1283 | static void rcu_dump_cpu_stacks(struct rcu_state *rsp) | 1286 | static void rcu_dump_cpu_stacks(struct rcu_state *rsp) |
1284 | { | 1287 | { |
@@ -1288,11 +1291,10 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp) | |||
1288 | 1291 | ||
1289 | rcu_for_each_leaf_node(rsp, rnp) { | 1292 | rcu_for_each_leaf_node(rsp, rnp) { |
1290 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 1293 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
1291 | if (rnp->qsmask != 0) { | 1294 | for_each_leaf_node_possible_cpu(rnp, cpu) |
1292 | for_each_leaf_node_possible_cpu(rnp, cpu) | 1295 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) |
1293 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) | 1296 | if (!trigger_single_cpu_backtrace(cpu)) |
1294 | dump_cpu_task(cpu); | 1297 | dump_cpu_task(cpu); |
1295 | } | ||
1296 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 1298 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
1297 | } | 1299 | } |
1298 | } | 1300 | } |