aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-08-13 16:31:47 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:41 -0400
commit9bc8b5586f94be6391458074ecbba8827ba8ba9d (patch)
tree323804bb7180337c266f88519cea6b1f42114ead /kernel/rcutree.c
parent037067a1b6f9a70f862f3ed9d59fe28b7cd55ac4 (diff)
rcu: Suppress NMI backtraces when stall ends before dump
It is possible for an RCU CPU stall to end just as it is detected, in which case the current code will uselessly dump all CPU's stacks. This commit therefore checks for this condition and refrains from sending needless NMIs. And yes, the stall might also end just after we checked all CPUs and tasks, but in that case we would at least have given some clue as to which CPU/task was at fault. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 9970116163ba..ade788320dd6 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -545,6 +545,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
545 int cpu; 545 int cpu;
546 long delta; 546 long delta;
547 unsigned long flags; 547 unsigned long flags;
548 int ndetected;
548 struct rcu_node *rnp = rcu_get_root(rsp); 549 struct rcu_node *rnp = rcu_get_root(rsp);
549 550
550 /* Only let one CPU complain about others per time interval. */ 551 /* Only let one CPU complain about others per time interval. */
@@ -561,7 +562,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
561 * Now rat on any tasks that got kicked up to the root rcu_node 562 * Now rat on any tasks that got kicked up to the root rcu_node
562 * due to CPU offlining. 563 * due to CPU offlining.
563 */ 564 */
564 rcu_print_task_stall(rnp); 565 ndetected = rcu_print_task_stall(rnp);
565 raw_spin_unlock_irqrestore(&rnp->lock, flags); 566 raw_spin_unlock_irqrestore(&rnp->lock, flags);
566 567
567 /* 568 /*
@@ -573,17 +574,21 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
573 rsp->name); 574 rsp->name);
574 rcu_for_each_leaf_node(rsp, rnp) { 575 rcu_for_each_leaf_node(rsp, rnp) {
575 raw_spin_lock_irqsave(&rnp->lock, flags); 576 raw_spin_lock_irqsave(&rnp->lock, flags);
576 rcu_print_task_stall(rnp); 577 ndetected += rcu_print_task_stall(rnp);
577 raw_spin_unlock_irqrestore(&rnp->lock, flags); 578 raw_spin_unlock_irqrestore(&rnp->lock, flags);
578 if (rnp->qsmask == 0) 579 if (rnp->qsmask == 0)
579 continue; 580 continue;
580 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 581 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
581 if (rnp->qsmask & (1UL << cpu)) 582 if (rnp->qsmask & (1UL << cpu)) {
582 printk(" %d", rnp->grplo + cpu); 583 printk(" %d", rnp->grplo + cpu);
584 ndetected++;
585 }
583 } 586 }
584 printk("} (detected by %d, t=%ld jiffies)\n", 587 printk("} (detected by %d, t=%ld jiffies)\n",
585 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 588 smp_processor_id(), (long)(jiffies - rsp->gp_start));
586 if (!trigger_all_cpu_backtrace()) 589 if (ndetected == 0)
590 printk(KERN_ERR "INFO: Stall ended before state dump start\n");
591 else if (!trigger_all_cpu_backtrace())
587 dump_stack(); 592 dump_stack();
588 593
589 /* If so configured, complain about tasks blocking the grace period. */ 594 /* If so configured, complain about tasks blocking the grace period. */