aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 11:15:51 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-11 12:32:50 -0500
commite12f31d3e5d36328c7fbd0fce40a95e70b59152c (patch)
tree3eaee7fede5ba830395d2e527fdfe60f1aba73f4 /kernel/sched_debug.c
parentb42e0c41a422a212ddea0666d5a3a0e3c35206db (diff)
sched: Remove avg_overlap
Both avg_overlap and avg_wakeup had an inherent problem in that their accuracy was detrimentally affected by cross-cpu wakeups, this because we are missing the necessary call to update_curr(). This can't be fixed without increasing overhead in our already too fat fastpath. Additionally, with recent load balancing changes making us prefer to place tasks in an idle cache domain (which is good for compute bound loads), communicating tasks suffer when a sync wakeup, which would enable affine placement, is turned into a non-sync wakeup by SYNC_LESS. With one task on the runqueue, wake_affine() rejects the affine wakeup request, leaving the unfortunate where placed, taking frequent cache misses. Remove it, and recover some fastpath cycles. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301121.6785.30.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c1
1 files changed, 0 insertions, 1 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 20b95a420fec..8a46a719f367 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -407,7 +407,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
407 PN(se.exec_start); 407 PN(se.exec_start);
408 PN(se.vruntime); 408 PN(se.vruntime);
409 PN(se.sum_exec_runtime); 409 PN(se.sum_exec_runtime);
410 PN(se.avg_overlap);
411 410
412 nr_switches = p->nvcsw + p->nivcsw; 411 nr_switches = p->nvcsw + p->nivcsw;
413 412