aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:10 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:10 -0400
commitb8efb56172bc55082b8490778b07ef73eea0b551 (patch)
tree2d70e91af4cbd470904ebb4e28ad558c29e73092
parentde8d585a12aef40676f12ddc63e97daaf7752ba1 (diff)
sched debug: BKL usage statistics
add per task and per rq BKL usage statistics. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/sched_debug.c4
3 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d0cc58311b13..920eb7354d0a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -621,6 +621,10 @@ struct sched_info {
621 /* timestamps */ 621 /* timestamps */
622 unsigned long long last_arrival,/* when we last ran on a cpu */ 622 unsigned long long last_arrival,/* when we last ran on a cpu */
623 last_queued; /* when we were last queued to run */ 623 last_queued; /* when we were last queued to run */
624#ifdef CONFIG_SCHEDSTATS
625 /* BKL stats */
626 unsigned long bkl_cnt;
627#endif
624}; 628};
625#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 629#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
626 630
diff --git a/kernel/sched.c b/kernel/sched.c
index f33608e9e1a2..5004dff91850 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -356,6 +356,9 @@ struct rq {
356 /* try_to_wake_up() stats */ 356 /* try_to_wake_up() stats */
357 unsigned long ttwu_cnt; 357 unsigned long ttwu_cnt;
358 unsigned long ttwu_local; 358 unsigned long ttwu_local;
359
360 /* BKL stats */
361 unsigned long bkl_cnt;
359#endif 362#endif
360 struct lock_class_key rq_lock_key; 363 struct lock_class_key rq_lock_key;
361}; 364};
@@ -3414,6 +3417,12 @@ static inline void schedule_debug(struct task_struct *prev)
3414 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3417 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3415 3418
3416 schedstat_inc(this_rq(), sched_cnt); 3419 schedstat_inc(this_rq(), sched_cnt);
3420#ifdef CONFIG_SCHEDSTATS
3421 if (unlikely(prev->lock_depth >= 0)) {
3422 schedstat_inc(this_rq(), bkl_cnt);
3423 schedstat_inc(prev, sched_info.bkl_cnt);
3424 }
3425#endif
3417} 3426}
3418 3427
3419/* 3428/*
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 57ee9d5630a8..823b63a3a3e1 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -136,6 +136,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
136 SPLIT_NS(spread0)); 136 SPLIT_NS(spread0));
137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); 137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
139 SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
140 rq->bkl_cnt);
139} 141}
140 142
141static void print_cpu(struct seq_file *m, int cpu) 143static void print_cpu(struct seq_file *m, int cpu)
@@ -323,6 +325,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
323 PN(se.exec_max); 325 PN(se.exec_max);
324 PN(se.slice_max); 326 PN(se.slice_max);
325 PN(se.wait_max); 327 PN(se.wait_max);
328 P(sched_info.bkl_cnt);
326#endif 329#endif
327 SEQ_printf(m, "%-25s:%20Ld\n", 330 SEQ_printf(m, "%-25s:%20Ld\n",
328 "nr_switches", (long long)(p->nvcsw + p->nivcsw)); 331 "nr_switches", (long long)(p->nvcsw + p->nivcsw));
@@ -350,6 +353,7 @@ void proc_sched_set_task(struct task_struct *p)
350 p->se.exec_max = 0; 353 p->se.exec_max = 0;
351 p->se.slice_max = 0; 354 p->se.slice_max = 0;
352 p->se.wait_max = 0; 355 p->se.wait_max = 0;
356 p->sched_info.bkl_cnt = 0;
353#endif 357#endif
354 p->se.sum_exec_runtime = 0; 358 p->se.sum_exec_runtime = 0;
355 p->se.prev_sum_exec_runtime = 0; 359 p->se.prev_sum_exec_runtime = 0;