aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-28 06:53:24 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-28 06:53:24 -0400
commit213c8af67f21c1dc0d50940b159d9521c95f3c89 (patch)
treeb3f594889a460bcec385192d8675b428616d31b7 /kernel/sched_fair.c
parentb77d69db9f4ba03b2ed17e383c2d73ca89f5ab14 (diff)
sched: small schedstat fix
small schedstat fix: the cfs_rq->wait_runtime 'sum of all runtimes' statistics counters missed newly forked tasks and thus had a constant negative skew. Fix this. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0c718857176f..75f025da6f7c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1121,8 +1121,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1121 * The statistical average of wait_runtime is about 1121 * The statistical average of wait_runtime is about
1122 * -granularity/2, so initialize the task with that: 1122 * -granularity/2, so initialize the task with that:
1123 */ 1123 */
1124 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) 1124 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
1125 p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2); 1125 p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2);
1126 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
1127 }
1126 1128
1127 __enqueue_entity(cfs_rq, se); 1129 __enqueue_entity(cfs_rq, se);
1128} 1130}