aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-02-12 03:53:51 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-12 12:48:37 -0500
commitff91691bccdb741efb2df0489058a4961fa79598 (patch)
treeeeef6ce3d48df86a7b2c1178a9ba54210b8b8981 /kernel/sched.c
parent0a9ac38246b11892ad20a1eb9deb67adf8c0db2f (diff)
[PATCH] sched: avoid div in rebalance_tick
Avoid expensive integer divide 3 times per CPU per tick. A userspace test of this loop went from 26ns, down to 19ns on a G5; and from 123ns down to 28ns on a P3. (Also avoid a variable bit shift, as suggested by Alan. The effect of this wasn't noticable on the CPUs I tested with). Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1fd67e16cd31..08f86178aa34 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2897,14 +2897,16 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2897static void update_load(struct rq *this_rq) 2897static void update_load(struct rq *this_rq)
2898{ 2898{
2899 unsigned long this_load; 2899 unsigned long this_load;
2900 int i, scale; 2900 unsigned int i, scale;
2901 2901
2902 this_load = this_rq->raw_weighted_load; 2902 this_load = this_rq->raw_weighted_load;
2903 2903
2904 /* Update our load: */ 2904 /* Update our load: */
2905 for (i = 0, scale = 1; i < 3; i++, scale <<= 1) { 2905 for (i = 0, scale = 1; i < 3; i++, scale += scale) {
2906 unsigned long old_load, new_load; 2906 unsigned long old_load, new_load;
2907 2907
2908 /* scale is effectively 1 << i now, and >> i divides by scale */
2909
2908 old_load = this_rq->cpu_load[i]; 2910 old_load = this_rq->cpu_load[i];
2909 new_load = this_load; 2911 new_load = this_load;
2910 /* 2912 /*
@@ -2914,7 +2916,7 @@ static void update_load(struct rq *this_rq)
2914 */ 2916 */
2915 if (new_load > old_load) 2917 if (new_load > old_load)
2916 new_load += scale-1; 2918 new_load += scale-1;
2917 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale; 2919 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2918 } 2920 }
2919} 2921}
2920 2922