aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-30 07:00:37 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-09 04:03:07 -0500
commit6cecd084d0fd27bb1e498e2829fd45846d806856 (patch)
tree90cc079c942ad35669d1a33957a121c1cb3a88a6 /kernel/sched.c
parent3a7e73a2e26fffdbc46ba95fc0425418984f5140 (diff)
sched: Discard some old bits
WAKEUP_RUNNING was an experiment, not sure why that ever ended up being merged... Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 33c903573132..0170735bdafc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2493,7 +2493,6 @@ static void __sched_fork(struct task_struct *p)
2493 p->se.avg_overlap = 0; 2493 p->se.avg_overlap = 0;
2494 p->se.start_runtime = 0; 2494 p->se.start_runtime = 0;
2495 p->se.avg_wakeup = sysctl_sched_wakeup_granularity; 2495 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2496 p->se.avg_running = 0;
2497 2496
2498#ifdef CONFIG_SCHEDSTATS 2497#ifdef CONFIG_SCHEDSTATS
2499 p->se.wait_start = 0; 2498 p->se.wait_start = 0;
@@ -5379,13 +5378,14 @@ static inline void schedule_debug(struct task_struct *prev)
5379#endif 5378#endif
5380} 5379}
5381 5380
5382static void put_prev_task(struct rq *rq, struct task_struct *p) 5381static void put_prev_task(struct rq *rq, struct task_struct *prev)
5383{ 5382{
5384 u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; 5383 if (prev->state == TASK_RUNNING) {
5384 u64 runtime = prev->se.sum_exec_runtime;
5385 5385
5386 update_avg(&p->se.avg_running, runtime); 5386 runtime -= prev->se.prev_sum_exec_runtime;
5387 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
5387 5388
5388 if (p->state == TASK_RUNNING) {
5389 /* 5389 /*
5390 * In order to avoid avg_overlap growing stale when we are 5390 * In order to avoid avg_overlap growing stale when we are
5391 * indeed overlapping and hence not getting put to sleep, grow 5391 * indeed overlapping and hence not getting put to sleep, grow
@@ -5395,12 +5395,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
5395 * correlates to the amount of cache footprint a task can 5395 * correlates to the amount of cache footprint a task can
5396 * build up. 5396 * build up.
5397 */ 5397 */
5398 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); 5398 update_avg(&prev->se.avg_overlap, runtime);
5399 update_avg(&p->se.avg_overlap, runtime);
5400 } else {
5401 update_avg(&p->se.avg_running, 0);
5402 } 5399 }
5403 p->sched_class->put_prev_task(rq, p); 5400 prev->sched_class->put_prev_task(rq, prev);
5404} 5401}
5405 5402
5406/* 5403/*