aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:15:51 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:50 -0500
commite12f31d3e5d36328c7fbd0fce40a95e70b59152c (patch)
tree3eaee7fede5ba830395d2e527fdfe60f1aba73f4 /kernel/sched.c
parentb42e0c41a422a212ddea0666d5a3a0e3c35206db (diff)
sched: Remove avg_overlap
Both avg_overlap and avg_wakeup had an inherent problem in that their accuracy was detrimentally affected by cross-cpu wakeups, this because we are missing the necessary call to update_curr(). This can't be fixed without increasing overhead in our already too fat fastpath. Additionally, with recent load balancing changes making us prefer to place tasks in an idle cache domain (which is good for compute bound loads), communicating tasks suffer when a sync wakeup, which would enable affine placement, is turned into a non-sync wakeup by SYNC_LESS. With one task on the runqueue, wake_affine() rejects the affine wakeup request, leaving the unfortunate where placed, taking frequent cache misses. Remove it, and recover some fastpath cycles. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301121.6785.30.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c33
1 files changed, 0 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 35a8626ace7d..68ed6f4f3c13 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1887,11 +1887,6 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1887 1887
1888static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1888static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1889{ 1889{
1890 if (sleep && p->se.last_wakeup) {
1891 update_avg(&p->se.avg_overlap,
1892 p->se.sum_exec_runtime - p->se.last_wakeup);
1893 p->se.last_wakeup = 0;
1894 }
1895 sched_info_dequeued(p); 1890 sched_info_dequeued(p);
1896 p->sched_class->dequeue_task(rq, p, sleep); 1891 p->sched_class->dequeue_task(rq, p, sleep);
1897 p->se.on_rq = 0; 1892 p->se.on_rq = 0;
@@ -2452,15 +2447,6 @@ out_activate:
2452 activate_task(rq, p, 1); 2447 activate_task(rq, p, 1);
2453 success = 1; 2448 success = 1;
2454 2449
2455 /*
2456 * Only attribute actual wakeups done by this task.
2457 */
2458 if (!in_interrupt()) {
2459 struct sched_entity *se = &current->se;
2460
2461 se->last_wakeup = se->sum_exec_runtime;
2462 }
2463
2464out_running: 2450out_running:
2465 trace_sched_wakeup(rq, p, success); 2451 trace_sched_wakeup(rq, p, success);
2466 check_preempt_curr(rq, p, wake_flags); 2452 check_preempt_curr(rq, p, wake_flags);
@@ -2522,8 +2508,6 @@ static void __sched_fork(struct task_struct *p)
2522 p->se.sum_exec_runtime = 0; 2508 p->se.sum_exec_runtime = 0;
2523 p->se.prev_sum_exec_runtime = 0; 2509 p->se.prev_sum_exec_runtime = 0;
2524 p->se.nr_migrations = 0; 2510 p->se.nr_migrations = 0;
2525 p->se.last_wakeup = 0;
2526 p->se.avg_overlap = 0;
2527 2511
2528#ifdef CONFIG_SCHEDSTATS 2512#ifdef CONFIG_SCHEDSTATS
2529 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2513 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -3594,23 +3578,6 @@ static inline void schedule_debug(struct task_struct *prev)
3594 3578
3595static void put_prev_task(struct rq *rq, struct task_struct *prev) 3579static void put_prev_task(struct rq *rq, struct task_struct *prev)
3596{ 3580{
3597 if (prev->state == TASK_RUNNING) {
3598 u64 runtime = prev->se.sum_exec_runtime;
3599
3600 runtime -= prev->se.prev_sum_exec_runtime;
3601 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
3602
3603 /*
3604 * In order to avoid avg_overlap growing stale when we are
3605 * indeed overlapping and hence not getting put to sleep, grow
3606 * the avg_overlap on preemption.
3607 *
3608 * We use the average preemption runtime because that
3609 * correlates to the amount of cache footprint a task can
3610 * build up.
3611 */
3612 update_avg(&prev->se.avg_overlap, runtime);
3613 }
3614 prev->sched_class->put_prev_task(rq, prev); 3581 prev->sched_class->put_prev_task(rq, prev);
3615} 3582}
3616 3583