diff options
author | Peter Zijlstra <peterz@infradead.org> | 2009-09-16 06:31:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-17 04:17:25 -0400 |
commit | ad4b78bbcbab66998b05d422ac6106b645796e54 (patch) | |
tree | 45f3561f4bd6b886948a3b0eea64edab9bab9eda /kernel/sched.c | |
parent | eb24073bc1fe3e569a855cf38d529fb650c35524 (diff) |
sched: Add new wakeup preemption mode: WAKEUP_RUNNING
Create a new wakeup preemption mode, preempt towards tasks that run
shorter on avg. It sets next buddy to be sure we actually run the task
we preempted for.
Test results:
root@twins:~# while :; do :; done &
[1] 6537
root@twins:~# while :; do :; done &
[2] 6538
root@twins:~# while :; do :; done &
[3] 6539
root@twins:~# while :; do :; done &
[4] 6540
root@twins:/home/peter# ./latt -c4 sleep 4
Entries: 48 (clients=4)
Averages:
------------------------------
Max 4750 usec
Avg 497 usec
Stdev 737 usec
root@twins:/home/peter# echo WAKEUP_RUNNING > /debug/sched_features
root@twins:/home/peter# ./latt -c4 sleep 4
Entries: 48 (clients=4)
Averages:
------------------------------
Max 14 usec
Avg 5 usec
Stdev 3 usec
Disabled by default - needs more testing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
LKML-Reference: <new-submission>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 969dfaef2465..3bb4ea2ee6f0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2458,6 +2458,7 @@ static void __sched_fork(struct task_struct *p) | |||
2458 | p->se.avg_overlap = 0; | 2458 | p->se.avg_overlap = 0; |
2459 | p->se.start_runtime = 0; | 2459 | p->se.start_runtime = 0; |
2460 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2460 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; |
2461 | p->se.avg_running = 0; | ||
2461 | 2462 | ||
2462 | #ifdef CONFIG_SCHEDSTATS | 2463 | #ifdef CONFIG_SCHEDSTATS |
2463 | p->se.wait_start = 0; | 2464 | p->se.wait_start = 0; |
@@ -5310,14 +5311,13 @@ static inline void schedule_debug(struct task_struct *prev) | |||
5310 | #endif | 5311 | #endif |
5311 | } | 5312 | } |
5312 | 5313 | ||
5313 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 5314 | static void put_prev_task(struct rq *rq, struct task_struct *p) |
5314 | { | 5315 | { |
5315 | if (prev->state == TASK_RUNNING) { | 5316 | u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; |
5316 | u64 runtime = prev->se.sum_exec_runtime; | ||
5317 | 5317 | ||
5318 | runtime -= prev->se.prev_sum_exec_runtime; | 5318 | update_avg(&p->se.avg_running, runtime); |
5319 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
5320 | 5319 | ||
5320 | if (p->state == TASK_RUNNING) { | ||
5321 | /* | 5321 | /* |
5322 | * In order to avoid avg_overlap growing stale when we are | 5322 | * In order to avoid avg_overlap growing stale when we are |
5323 | * indeed overlapping and hence not getting put to sleep, grow | 5323 | * indeed overlapping and hence not getting put to sleep, grow |
@@ -5327,9 +5327,12 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) | |||
5327 | * correlates to the amount of cache footprint a task can | 5327 | * correlates to the amount of cache footprint a task can |
5328 | * build up. | 5328 | * build up. |
5329 | */ | 5329 | */ |
5330 | update_avg(&prev->se.avg_overlap, runtime); | 5330 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); |
5331 | update_avg(&p->se.avg_overlap, runtime); | ||
5332 | } else { | ||
5333 | update_avg(&p->se.avg_running, 0); | ||
5331 | } | 5334 | } |
5332 | prev->sched_class->put_prev_task(rq, prev); | 5335 | p->sched_class->put_prev_task(rq, p); |
5333 | } | 5336 | } |
5334 | 5337 | ||
5335 | /* | 5338 | /* |