From b8efb56172bc55082b8490778b07ef73eea0b551 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 15 Oct 2007 17:00:10 +0200 Subject: sched debug: BKL usage statistics add per task and per rq BKL usage statistics. Signed-off-by: Ingo Molnar Signed-off-by: Peter Zijlstra Reviewed-by: Thomas Gleixner --- kernel/sched.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index f33608e9e1a2..5004dff91850 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -356,6 +356,9 @@ struct rq { /* try_to_wake_up() stats */ unsigned long ttwu_cnt; unsigned long ttwu_local; + + /* BKL stats */ + unsigned long bkl_cnt; #endif struct lock_class_key rq_lock_key; }; @@ -3414,6 +3417,12 @@ static inline void schedule_debug(struct task_struct *prev) profile_hit(SCHED_PROFILING, __builtin_return_address(0)); schedstat_inc(this_rq(), sched_cnt); +#ifdef CONFIG_SCHEDSTATS + if (unlikely(prev->lock_depth >= 0)) { + schedstat_inc(this_rq(), bkl_cnt); + schedstat_inc(prev, sched_info.bkl_cnt); + } +#endif } /* -- cgit v1.2.2