aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c25
1 files changed, 24 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 74df86bd9204..e78538712df0 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -873,6 +873,29 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
873 rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); 873 rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
874} 874}
875 875
876/*
877 * Dump stacks of all tasks running on stalled CPUs. This is a fallback
878 * for architectures that do not implement trigger_all_cpu_backtrace().
879 * The NMI-triggered stack traces are more accurate because they are
880 * printed by the target CPU.
881 */
882static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
883{
884 int cpu;
885 unsigned long flags;
886 struct rcu_node *rnp;
887
888 rcu_for_each_leaf_node(rsp, rnp) {
889 raw_spin_lock_irqsave(&rnp->lock, flags);
890 if (rnp->qsmask != 0) {
891 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
892 if (rnp->qsmask & (1UL << cpu))
893 dump_cpu_task(rnp->grplo + cpu);
894 }
895 raw_spin_unlock_irqrestore(&rnp->lock, flags);
896 }
897}
898
876static void print_other_cpu_stall(struct rcu_state *rsp) 899static void print_other_cpu_stall(struct rcu_state *rsp)
877{ 900{
878 int cpu; 901 int cpu;
@@ -929,7 +952,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
929 if (ndetected == 0) 952 if (ndetected == 0)
930 printk(KERN_ERR "INFO: Stall ended before state dump start\n"); 953 printk(KERN_ERR "INFO: Stall ended before state dump start\n");
931 else if (!trigger_all_cpu_backtrace()) 954 else if (!trigger_all_cpu_backtrace())
932 dump_stack(); 955 rcu_dump_cpu_stacks(rsp);
933 956
934 /* Complain about tasks blocking the grace period. */ 957 /* Complain about tasks blocking the grace period. */
935 958