aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
commita25707f3aef9cf68c341eba5960d580f364e4e6f (patch)
tree77f13a0d32f68217cf6be32b1ab755bf7c1c0665 /kernel
parent8ebc91d93669af39dbed50914d7daf457eeb43be (diff)
sched: remove precise CPU load
CPU load calculations are statistical anyway, and there's little benefit from having it calculated on every scheduling event. So remove this code, it gets rid of a divide from the scheduler wakeup and context-switch fastpath. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c42
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c6
3 files changed, 9 insertions, 41 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d4dabfcc776c..25cc9b2a8c15 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1972,42 +1972,11 @@ unsigned long nr_active(void)
1972 */ 1972 */
1973static void update_cpu_load(struct rq *this_rq) 1973static void update_cpu_load(struct rq *this_rq)
1974{ 1974{
1975 u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
1976 unsigned long total_load = this_rq->ls.load.weight; 1975 unsigned long total_load = this_rq->ls.load.weight;
1977 unsigned long this_load = total_load; 1976 unsigned long this_load = total_load;
1978 struct load_stat *ls = &this_rq->ls;
1979 int i, scale; 1977 int i, scale;
1980 1978
1981 this_rq->nr_load_updates++; 1979 this_rq->nr_load_updates++;
1982 if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
1983 goto do_avg;
1984
1985 /* Update delta_fair/delta_exec fields first */
1986 update_curr_load(this_rq);
1987
1988 fair_delta64 = ls->delta_fair + 1;
1989 ls->delta_fair = 0;
1990
1991 exec_delta64 = ls->delta_exec + 1;
1992 ls->delta_exec = 0;
1993
1994 sample_interval64 = this_rq->clock - ls->load_update_last;
1995 ls->load_update_last = this_rq->clock;
1996
1997 if ((s64)sample_interval64 < (s64)TICK_NSEC)
1998 sample_interval64 = TICK_NSEC;
1999
2000 if (exec_delta64 > sample_interval64)
2001 exec_delta64 = sample_interval64;
2002
2003 idle_delta64 = sample_interval64 - exec_delta64;
2004
2005 tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
2006 tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
2007
2008 this_load = (unsigned long)tmp64;
2009
2010do_avg:
2011 1980
2012 /* Update our load: */ 1981 /* Update our load: */
2013 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 1982 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
@@ -2017,7 +1986,13 @@ do_avg:
2017 1986
2018 old_load = this_rq->cpu_load[i]; 1987 old_load = this_rq->cpu_load[i];
2019 new_load = this_load; 1988 new_load = this_load;
2020 1989 /*
1990 * Round up the averaging division if load is increasing. This
1991 * prevents us from getting stuck on 9 if the load is 10, for
1992 * example.
1993 */
1994 if (new_load > old_load)
1995 new_load += scale-1;
2021 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; 1996 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2022 } 1997 }
2023} 1998}
@@ -6484,7 +6459,6 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
6484 6459
6485void __init sched_init(void) 6460void __init sched_init(void)
6486{ 6461{
6487 u64 now = sched_clock();
6488 int highest_cpu = 0; 6462 int highest_cpu = 0;
6489 int i, j; 6463 int i, j;
6490 6464
@@ -6509,8 +6483,6 @@ void __init sched_init(void)
6509 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6483 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6510 list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); 6484 list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
6511#endif 6485#endif
6512 rq->ls.load_update_last = now;
6513 rq->ls.load_update_start = now;
6514 6486
6515 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 6487 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6516 rq->cpu_load[j] = 0; 6488 rq->cpu_load[j] = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index fd080f686f18..6b789dae7fdf 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,8 +145,6 @@ static void print_cpu(struct seq_file *m, int cpu)
145 P(nr_running); 145 P(nr_running);
146 SEQ_printf(m, " .%-30s: %lu\n", "load", 146 SEQ_printf(m, " .%-30s: %lu\n", "load",
147 rq->ls.load.weight); 147 rq->ls.load.weight);
148 P(ls.delta_fair);
149 P(ls.delta_exec);
150 P(nr_switches); 148 P(nr_switches);
151 P(nr_load_updates); 149 P(nr_load_updates);
152 P(nr_uninterruptible); 150 P(nr_uninterruptible);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2138c40f4836..105d57b41aa2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -94,16 +94,14 @@ enum {
94 SCHED_FEAT_FAIR_SLEEPERS = 1, 94 SCHED_FEAT_FAIR_SLEEPERS = 1,
95 SCHED_FEAT_SLEEPER_AVG = 2, 95 SCHED_FEAT_SLEEPER_AVG = 2,
96 SCHED_FEAT_SLEEPER_LOAD_AVG = 4, 96 SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
97 SCHED_FEAT_PRECISE_CPU_LOAD = 8, 97 SCHED_FEAT_START_DEBIT = 8,
98 SCHED_FEAT_START_DEBIT = 16, 98 SCHED_FEAT_SKIP_INITIAL = 16,
99 SCHED_FEAT_SKIP_INITIAL = 32,
100}; 99};
101 100
102const_debug unsigned int sysctl_sched_features = 101const_debug unsigned int sysctl_sched_features =
103 SCHED_FEAT_FAIR_SLEEPERS *1 | 102 SCHED_FEAT_FAIR_SLEEPERS *1 |
104 SCHED_FEAT_SLEEPER_AVG *0 | 103 SCHED_FEAT_SLEEPER_AVG *0 |
105 SCHED_FEAT_SLEEPER_LOAD_AVG *1 | 104 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
106 SCHED_FEAT_PRECISE_CPU_LOAD *1 |
107 SCHED_FEAT_START_DEBIT *1 | 105 SCHED_FEAT_START_DEBIT *1 |
108 SCHED_FEAT_SKIP_INITIAL *0; 106 SCHED_FEAT_SKIP_INITIAL *0;
109 107