aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-01-25 15:08:35 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:35 -0500
commit6d082592b62689fb91578d0338d04a9f50991990 (patch)
treefacef621798752724be64c3ded31a3c3fded1643
parent286100a6cf1c1f692e5f81d14b364ff12b7662f5 (diff)
sched: keep total / count stats in addition to the max for
Right now, the linux kernel (with scheduler statistics enabled) keeps track of the maximum time a process is waiting to be scheduled. While the maximum is a very useful metric, tracking average and total is equally useful (at least for latencytop) to figure out the accumulated effect of scheduler delays. The accumulated effect is important to judge the performance impact of scheduler tuning/behavior. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c3
3 files changed, 9 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 734f6d8f6ed5..df5b24ee80b3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -895,6 +895,8 @@ struct sched_entity {
895#ifdef CONFIG_SCHEDSTATS 895#ifdef CONFIG_SCHEDSTATS
896 u64 wait_start; 896 u64 wait_start;
897 u64 wait_max; 897 u64 wait_max;
898 u64 wait_count;
899 u64 wait_sum;
898 900
899 u64 sleep_start; 901 u64 sleep_start;
900 u64 sleep_max; 902 u64 sleep_max;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 9e5de098d471..4b5e24cf2f4a 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -300,6 +300,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
300 PN(se.exec_max); 300 PN(se.exec_max);
301 PN(se.slice_max); 301 PN(se.slice_max);
302 PN(se.wait_max); 302 PN(se.wait_max);
303 PN(se.wait_sum);
304 P(se.wait_count);
303 P(sched_info.bkl_count); 305 P(sched_info.bkl_count);
304 P(se.nr_migrations); 306 P(se.nr_migrations);
305 P(se.nr_migrations_cold); 307 P(se.nr_migrations_cold);
@@ -367,6 +369,8 @@ void proc_sched_set_task(struct task_struct *p)
367{ 369{
368#ifdef CONFIG_SCHEDSTATS 370#ifdef CONFIG_SCHEDSTATS
369 p->se.wait_max = 0; 371 p->se.wait_max = 0;
372 p->se.wait_sum = 0;
373 p->se.wait_count = 0;
370 p->se.sleep_max = 0; 374 p->se.sleep_max = 0;
371 p->se.sum_sleep_runtime = 0; 375 p->se.sum_sleep_runtime = 0;
372 p->se.block_max = 0; 376 p->se.block_max = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 45ff4e9411e0..72e25c7a3a18 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -385,6 +385,9 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
385{ 385{
386 schedstat_set(se->wait_max, max(se->wait_max, 386 schedstat_set(se->wait_max, max(se->wait_max,
387 rq_of(cfs_rq)->clock - se->wait_start)); 387 rq_of(cfs_rq)->clock - se->wait_start));
388 schedstat_set(se->wait_count, se->wait_count + 1);
389 schedstat_set(se->wait_sum, se->wait_sum +
390 rq_of(cfs_rq)->clock - se->wait_start);
388 schedstat_set(se->wait_start, 0); 391 schedstat_set(se->wait_start, 0);
389} 392}
390 393