diff options
author | Ken Chen <kenchen@google.com> | 2008-12-17 02:41:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-18 07:54:01 -0500 |
commit | 9c2c48020ec0dd6ecd27e5a1298f73b40d85a595 (patch) | |
tree | 1a44f56e18012120aff0969094eebd572deb2ea3 | |
parent | e9515c3c9feecd74174c2998add0db51e02abb8d (diff) |
schedstat: consolidate per-task cpu runtime stats
Impact: simplify code
When we turn on CONFIG_SCHEDSTATS, per-task cpu runtime is accumulated
twice. Once in task->se.sum_exec_runtime and once in sched_info.cpu_time.
These two stats are exactly the same.
Given that task->se.sum_exec_runtime is always accumulated by the core
scheduler, sched_info can reuse that data instead of duplicate the accounting.
Signed-off-by: Ken Chen <kenchen@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | fs/proc/base.c | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/delayacct.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_stats.h | 5 |
5 files changed, 7 insertions, 7 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c index d4677603c889..4d745bac768c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -347,7 +347,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer) | |||
347 | static int proc_pid_schedstat(struct task_struct *task, char *buffer) | 347 | static int proc_pid_schedstat(struct task_struct *task, char *buffer) |
348 | { | 348 | { |
349 | return sprintf(buffer, "%llu %llu %lu\n", | 349 | return sprintf(buffer, "%llu %llu %lu\n", |
350 | task->sched_info.cpu_time, | 350 | task->se.sum_exec_runtime, |
351 | task->sched_info.run_delay, | 351 | task->sched_info.run_delay, |
352 | task->sched_info.pcount); | 352 | task->sched_info.pcount); |
353 | } | 353 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8cccd6dc5d66..2d1e840ddd35 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -670,8 +670,7 @@ struct reclaim_state; | |||
670 | struct sched_info { | 670 | struct sched_info { |
671 | /* cumulative counters */ | 671 | /* cumulative counters */ |
672 | unsigned long pcount; /* # of times run on this cpu */ | 672 | unsigned long pcount; /* # of times run on this cpu */ |
673 | unsigned long long cpu_time, /* time spent on the cpu */ | 673 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
674 | run_delay; /* time spent waiting on a runqueue */ | ||
675 | 674 | ||
676 | /* timestamps */ | 675 | /* timestamps */ |
677 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 676 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index b3179dad71be..abb6e17505e2 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
127 | */ | 127 | */ |
128 | t1 = tsk->sched_info.pcount; | 128 | t1 = tsk->sched_info.pcount; |
129 | t2 = tsk->sched_info.run_delay; | 129 | t2 = tsk->sched_info.run_delay; |
130 | t3 = tsk->sched_info.cpu_time; | 130 | t3 = tsk->se.sum_exec_runtime; |
131 | 131 | ||
132 | d->cpu_count += t1; | 132 | d->cpu_count += t1; |
133 | 133 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index f53e2b8ef521..fd835fc320b8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -596,6 +596,8 @@ struct rq { | |||
596 | #ifdef CONFIG_SCHEDSTATS | 596 | #ifdef CONFIG_SCHEDSTATS |
597 | /* latency stats */ | 597 | /* latency stats */ |
598 | struct sched_info rq_sched_info; | 598 | struct sched_info rq_sched_info; |
599 | unsigned long long rq_cpu_time; | ||
600 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | ||
599 | 601 | ||
600 | /* sys_sched_yield() stats */ | 602 | /* sys_sched_yield() stats */ |
601 | unsigned int yld_exp_empty; | 603 | unsigned int yld_exp_empty; |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 7dbf72a2b02c..3b01098164c8 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
31 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, | 31 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, |
32 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 32 | rq->sched_switch, rq->sched_count, rq->sched_goidle, |
33 | rq->ttwu_count, rq->ttwu_local, | 33 | rq->ttwu_count, rq->ttwu_local, |
34 | rq->rq_sched_info.cpu_time, | 34 | rq->rq_cpu_time, |
35 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 35 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); |
36 | 36 | ||
37 | seq_printf(seq, "\n"); | 37 | seq_printf(seq, "\n"); |
@@ -123,7 +123,7 @@ static inline void | |||
123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
124 | { | 124 | { |
125 | if (rq) | 125 | if (rq) |
126 | rq->rq_sched_info.cpu_time += delta; | 126 | rq->rq_cpu_time += delta; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void | 129 | static inline void |
@@ -236,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t) | |||
236 | unsigned long long delta = task_rq(t)->clock - | 236 | unsigned long long delta = task_rq(t)->clock - |
237 | t->sched_info.last_arrival; | 237 | t->sched_info.last_arrival; |
238 | 238 | ||
239 | t->sched_info.cpu_time += delta; | ||
240 | rq_sched_info_depart(task_rq(t), delta); | 239 | rq_sched_info_depart(task_rq(t), delta); |
241 | 240 | ||
242 | if (t->state == TASK_RUNNING) | 241 | if (t->state == TASK_RUNNING) |