aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
commita25707f3aef9cf68c341eba5960d580f364e4e6f (patch)
tree77f13a0d32f68217cf6be32b1ab755bf7c1c0665 /kernel/sched_debug.c
parent8ebc91d93669af39dbed50914d7daf457eeb43be (diff)
sched: remove precise CPU load
CPU load calculations are statistical anyway, and there's little benefit from having it calculated on every scheduling event. So remove this code, it gets rid of a divide from the scheduler wakeup and context-switch fastpath. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index fd080f686f18..6b789dae7fdf 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,8 +145,6 @@ static void print_cpu(struct seq_file *m, int cpu)
145 P(nr_running); 145 P(nr_running);
146 SEQ_printf(m, " .%-30s: %lu\n", "load", 146 SEQ_printf(m, " .%-30s: %lu\n", "load",
147 rq->ls.load.weight); 147 rq->ls.load.weight);
148 P(ls.delta_fair);
149 P(ls.delta_exec);
150 P(nr_switches); 148 P(nr_switches);
151 P(nr_load_updates); 149 P(nr_load_updates);
152 P(nr_uninterruptible); 150 P(nr_uninterruptible);