diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-12-10 05:20:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 12:55:42 -0500 |
commit | 4211a9a2e94a34df8c02bc39b7ec10678ad5c2ab (patch) | |
tree | 006f3678fbf181cfd172f9dc5cb9077c9efc4165 /kernel/sched.c | |
parent | 571f6d2fb0b1c04798df783db2ba85e96bcce43d (diff) |
[PATCH] sched: remove staggering of load balancing
Timer interrupts already are staggered. We do not need an additional layer of
time staggering for short load balancing actions that take a reasonably small
portion of the time slice.
For load balancing on large sched_domains we will add a serialization later
that avoids concurrent load balance operations and thus has the same effect as
load staggering.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fdd26fffaa20..b5b350135002 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2841,16 +2841,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2841 | * Balancing parameters are set up in arch_init_sched_domains. | 2841 | * Balancing parameters are set up in arch_init_sched_domains. |
2842 | */ | 2842 | */ |
2843 | 2843 | ||
2844 | /* Don't have all balancing operations going off at once: */ | ||
2845 | static inline unsigned long cpu_offset(int cpu) | ||
2846 | { | ||
2847 | return jiffies + cpu * HZ / NR_CPUS; | ||
2848 | } | ||
2849 | |||
2850 | static void | 2844 | static void |
2851 | rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) | 2845 | rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) |
2852 | { | 2846 | { |
2853 | unsigned long this_load, interval, j = cpu_offset(this_cpu); | 2847 | unsigned long this_load, interval; |
2854 | struct sched_domain *sd; | 2848 | struct sched_domain *sd; |
2855 | int i, scale; | 2849 | int i, scale; |
2856 | 2850 | ||
@@ -2885,7 +2879,7 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) | |||
2885 | if (unlikely(!interval)) | 2879 | if (unlikely(!interval)) |
2886 | interval = 1; | 2880 | interval = 1; |
2887 | 2881 | ||
2888 | if (j - sd->last_balance >= interval) { | 2882 | if (jiffies - sd->last_balance >= interval) { |
2889 | if (load_balance(this_cpu, this_rq, sd, idle)) { | 2883 | if (load_balance(this_cpu, this_rq, sd, idle)) { |
2890 | /* | 2884 | /* |
2891 | * We've pulled tasks over so either we're no | 2885 | * We've pulled tasks over so either we're no |