From 02a11ec22bf666f0ecc0b277649e59711b9b5500 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 20 Nov 2010 20:58:56 -0500 Subject: sched: show length of runqueue clock deactivation in /proc/sched_debug The runqueue clock update should obviously not be skipped for prolonged times (otherwise the consumed time is not correctly kept track of). This patch measures the time between setting and clearing the rq->skip_clock_update flag. The maximum observed value is exported in /proc/sched_debug. --- kernel/sched.c | 28 ++++++++++++++++++++++++++-- kernel/sched_debug.c | 4 ++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 651c899a9b74..a6f6869fdefb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -541,6 +541,11 @@ struct rq { unsigned long long rq_cpu_time; /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ + u64 skip_clock_max; + u64 skip_clock_recent_max; + u64 skip_clock_set; + unsigned int skip_clock_count; + /* sys_sched_yield() stats */ unsigned int yld_count; @@ -639,6 +644,22 @@ static inline struct task_group *task_group(struct task_struct *p) static u64 irq_time_cpu(int cpu); static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); +static void clear_skip_clock_update(struct rq *rq) +{ +#ifdef CONFIG_SCHEDSTATS + if (unlikely(rq->skip_clock_update)) { + u64 skipped = sched_clock_cpu(cpu_of(rq)) - rq->skip_clock_set; + rq->skip_clock_max = max(rq->skip_clock_max, skipped); + /* reset infrequently to expose changes */ + if (!(++rq->skip_clock_count % 10000)) + rq->skip_clock_recent_max = 0; + rq->skip_clock_recent_max = max(rq->skip_clock_recent_max, + skipped); + } +#endif + rq->skip_clock_update = 0; +} + inline void update_rq_clock(struct rq *rq) { if (!rq->skip_clock_update) { @@ -652,7 +673,7 @@ inline void update_rq_clock(struct rq *rq) sched_irq_time_avg_update(rq, irq_time); } - rq->skip_clock_update = 0; + clear_skip_clock_update(rq); } /* @@ -2130,8 +2151,11 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) { + schedstat_set(rq->skip_clock_set, + sched_clock_cpu(cpu_of(rq))); rq->skip_clock_update = 1; + } } #ifdef CONFIG_SMP diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 2e1b0d17dd9b..8924be259506 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -298,6 +298,10 @@ static void print_cpu(struct seq_file *m, int cpu) P(bkl_count); + P(skip_clock_count); + P64(skip_clock_recent_max); + P64(skip_clock_max); + #undef P #endif print_cfs_stats(m, cpu); -- cgit v1.2.2