aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-04-22 21:08:51 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-06 02:16:57 -0400
commit5ece5bab3ed8594ce2c85c6c6e6b82109db36ca7 (patch)
treec9ef8faa5dbb9e7c82893657e5aa2c1040987f96 /kernel
parent15ba0ba860871cf74b48b1bb47c26c91a66126f3 (diff)
rcu: Add forward-progress diagnostic for per-CPU kthreads
Increment a per-CPU counter on each pass through rcu_cpu_kthread()'s service loop, and add it to the rcudata trace output. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/rcutree_trace.c4
2 files changed, 5 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 27b6d8de82f6..575d6414763e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -93,6 +93,7 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
93static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 93static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
94DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 94DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
95DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); 95DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
96DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
96static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); 97static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
97DEFINE_PER_CPU(char, rcu_cpu_has_work); 98DEFINE_PER_CPU(char, rcu_cpu_has_work);
98static char rcu_kthreads_spawnable; 99static char rcu_kthreads_spawnable;
@@ -1625,6 +1626,7 @@ static int rcu_cpu_kthread(void *arg)
1625 break; 1626 break;
1626 } 1627 }
1627 *statusp = RCU_KTHREAD_RUNNING; 1628 *statusp = RCU_KTHREAD_RUNNING;
1629 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1628 local_irq_save(flags); 1630 local_irq_save(flags);
1629 work = *workp; 1631 work = *workp;
1630 *workp = 0; 1632 *workp = 0;
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 564b8fef2a7e..9678cc3650f5 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -48,6 +48,7 @@
48 48
49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); 50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
51DECLARE_PER_CPU(char, rcu_cpu_has_work); 52DECLARE_PER_CPU(char, rcu_cpu_has_work);
52 53
53static char convert_kthread_status(unsigned int kthread_status) 54static char convert_kthread_status(unsigned int kthread_status)
@@ -75,7 +76,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
75 rdp->dynticks_fqs); 76 rdp->dynticks_fqs);
76#endif /* #ifdef CONFIG_NO_HZ */ 77#endif /* #ifdef CONFIG_NO_HZ */
77 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
78 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d b=%ld", 79 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
79 rdp->qlen, 80 rdp->qlen,
80 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 81 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
81 rdp->nxttail[RCU_NEXT_TAIL]], 82 rdp->nxttail[RCU_NEXT_TAIL]],
@@ -88,6 +89,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
88 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 89 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
89 rdp->cpu)), 90 rdp->cpu)),
90 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), 91 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
92 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
91 rdp->blimit); 93 rdp->blimit);
92 seq_printf(m, " ci=%lu co=%lu ca=%lu\n", 94 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
93 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 95 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);