diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-20 20:58:56 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-12-06 09:37:18 -0500 |
commit | 02a11ec22bf666f0ecc0b277649e59711b9b5500 (patch) | |
tree | 7c968923926e382c22dc2623cd6c3e98d7264887 | |
parent | ee0e01a2e4d3b9266ca44a83271b22aa59b289f3 (diff) |
sched: show length of runqueue clock deactivation in /proc/sched_debugwip-fix3
The runqueue clock update should obviously not be skipped for
prolonged times (otherwise the consumed time is not correctly kept
track of). This patch measures the time between setting and clearing
the rq->skip_clock_update flag. The maximum observed value is exported
in /proc/sched_debug.
-rw-r--r-- | kernel/sched.c | 28 | ||||
-rw-r--r-- | kernel/sched_debug.c | 4 |
2 files changed, 30 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 651c899a9b74..a6f6869fdefb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -541,6 +541,11 @@ struct rq { | |||
541 | unsigned long long rq_cpu_time; | 541 | unsigned long long rq_cpu_time; |
542 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | 542 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ |
543 | 543 | ||
544 | u64 skip_clock_max; | ||
545 | u64 skip_clock_recent_max; | ||
546 | u64 skip_clock_set; | ||
547 | unsigned int skip_clock_count; | ||
548 | |||
544 | /* sys_sched_yield() stats */ | 549 | /* sys_sched_yield() stats */ |
545 | unsigned int yld_count; | 550 | unsigned int yld_count; |
546 | 551 | ||
@@ -639,6 +644,22 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
639 | static u64 irq_time_cpu(int cpu); | 644 | static u64 irq_time_cpu(int cpu); |
640 | static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); | 645 | static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); |
641 | 646 | ||
647 | static void clear_skip_clock_update(struct rq *rq) | ||
648 | { | ||
649 | #ifdef CONFIG_SCHEDSTATS | ||
650 | if (unlikely(rq->skip_clock_update)) { | ||
651 | u64 skipped = sched_clock_cpu(cpu_of(rq)) - rq->skip_clock_set; | ||
652 | rq->skip_clock_max = max(rq->skip_clock_max, skipped); | ||
653 | /* reset infrequently to expose changes */ | ||
654 | if (!(++rq->skip_clock_count % 10000)) | ||
655 | rq->skip_clock_recent_max = 0; | ||
656 | rq->skip_clock_recent_max = max(rq->skip_clock_recent_max, | ||
657 | skipped); | ||
658 | } | ||
659 | #endif | ||
660 | rq->skip_clock_update = 0; | ||
661 | } | ||
662 | |||
642 | inline void update_rq_clock(struct rq *rq) | 663 | inline void update_rq_clock(struct rq *rq) |
643 | { | 664 | { |
644 | if (!rq->skip_clock_update) { | 665 | if (!rq->skip_clock_update) { |
@@ -652,7 +673,7 @@ inline void update_rq_clock(struct rq *rq) | |||
652 | 673 | ||
653 | sched_irq_time_avg_update(rq, irq_time); | 674 | sched_irq_time_avg_update(rq, irq_time); |
654 | } | 675 | } |
655 | rq->skip_clock_update = 0; | 676 | clear_skip_clock_update(rq); |
656 | } | 677 | } |
657 | 678 | ||
658 | /* | 679 | /* |
@@ -2130,8 +2151,11 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
2130 | * A queue event has occurred, and we're going to schedule. In | 2151 | * A queue event has occurred, and we're going to schedule. In |
2131 | * this case, we can save a useless back to back clock update. | 2152 | * this case, we can save a useless back to back clock update. |
2132 | */ | 2153 | */ |
2133 | if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) | 2154 | if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) { |
2155 | schedstat_set(rq->skip_clock_set, | ||
2156 | sched_clock_cpu(cpu_of(rq))); | ||
2134 | rq->skip_clock_update = 1; | 2157 | rq->skip_clock_update = 1; |
2158 | } | ||
2135 | } | 2159 | } |
2136 | 2160 | ||
2137 | #ifdef CONFIG_SMP | 2161 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 2e1b0d17dd9b..8924be259506 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -298,6 +298,10 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
298 | 298 | ||
299 | P(bkl_count); | 299 | P(bkl_count); |
300 | 300 | ||
301 | P(skip_clock_count); | ||
302 | P64(skip_clock_recent_max); | ||
303 | P64(skip_clock_max); | ||
304 | |||
301 | #undef P | 305 | #undef P |
302 | #endif | 306 | #endif |
303 | print_cfs_stats(m, cpu); | 307 | print_cfs_stats(m, cpu); |