aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-04-06 19:01:16 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-06 02:16:56 -0400
commit15ba0ba860871cf74b48b1bb47c26c91a66126f3 (patch)
tree2043eeca7d6df62fc0ae918b61abada073f81415 /kernel/rcutree.c
parenta9f4793d8900dc5dc09b3951bdcd4731290e06fe (diff)
rcu: add grace-period age and more kthread state to tracing
This commit adds the age in jiffies of the current grace period along with the duration in jiffies of the longest grace period since boot to the rcu/rcugp debugfs file. It also adds an additional "O" state to kthread tracing to differentiate between the kthread waiting due to having nothing to do on the one hand and waiting due to being on the wrong CPU on the other hand. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index bb84deca3319..27b6d8de82f6 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -92,6 +92,7 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
92 */ 92 */
93static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 93static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
94DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 94DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
95DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
95static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); 96static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
96DEFINE_PER_CPU(char, rcu_cpu_has_work); 97DEFINE_PER_CPU(char, rcu_cpu_has_work);
97static char rcu_kthreads_spawnable; 98static char rcu_kthreads_spawnable;
@@ -888,6 +889,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
888static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 889static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
889 __releases(rcu_get_root(rsp)->lock) 890 __releases(rcu_get_root(rsp)->lock)
890{ 891{
892 unsigned long gp_duration;
893
891 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 894 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
892 895
893 /* 896 /*
@@ -895,6 +898,9 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
895 * is seen before the assignment to rsp->completed. 898 * is seen before the assignment to rsp->completed.
896 */ 899 */
897 smp_mb(); /* See above block comment. */ 900 smp_mb(); /* See above block comment. */
901 gp_duration = jiffies - rsp->gp_start;
902 if (gp_duration > rsp->gp_max)
903 rsp->gp_max = gp_duration;
898 rsp->completed = rsp->gpnum; 904 rsp->completed = rsp->gpnum;
899 rsp->signaled = RCU_GP_IDLE; 905 rsp->signaled = RCU_GP_IDLE;
900 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 906 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
@@ -1583,12 +1589,15 @@ static int rcu_cpu_kthread_should_stop(int cpu)
1583 smp_processor_id() != cpu) { 1589 smp_processor_id() != cpu) {
1584 if (kthread_should_stop()) 1590 if (kthread_should_stop())
1585 return 1; 1591 return 1;
1592 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1593 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1586 local_bh_enable(); 1594 local_bh_enable();
1587 schedule_timeout_uninterruptible(1); 1595 schedule_timeout_uninterruptible(1);
1588 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu))) 1596 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1589 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 1597 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1590 local_bh_disable(); 1598 local_bh_disable();
1591 } 1599 }
1600 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1592 return 0; 1601 return 0;
1593} 1602}
1594 1603
@@ -1656,6 +1665,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1656 if (IS_ERR(t)) 1665 if (IS_ERR(t))
1657 return PTR_ERR(t); 1666 return PTR_ERR(t);
1658 kthread_bind(t, cpu); 1667 kthread_bind(t, cpu);
1668 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1659 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); 1669 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1660 per_cpu(rcu_cpu_kthread_task, cpu) = t; 1670 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1661 wake_up_process(t); 1671 wake_up_process(t);