aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-06-23 20:16:33 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-24 11:59:11 -0400
commit92c4ca5c3a5e180e9762438db235f41d192cb955 (patch)
tree814af0cfd84986f75e1e581f854eeb4f6ab42c35
parent849663430268db63a9c3c7467984e4e530ded901 (diff)
sched: fix next_interval determination in idle_balance()
The intervals of domains that do not have SD_BALANCE_NEWIDLE must be considered for the calculation of the time of the next balance. Otherwise we may defer rebalancing forever. Siddha also spotted that the conversion of the balance interval to jiffies is missing. Fix that to. From: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> also continue the loop if !(sd->flags & SD_LOAD_BALANCE). Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> It did in fact trigger under all three of mainline, CFS, and -rt including CFS -- see below for a couple of emails from last Friday giving results for these three on the AMD box (where it happened) and on a single-quad NUMA-Q system (where it did not, at least not with such severity). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/sched.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a7475913b009..50e1a3122699 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2938,17 +2938,21 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
2938 unsigned long next_balance = jiffies + 60 * HZ; 2938 unsigned long next_balance = jiffies + 60 * HZ;
2939 2939
2940 for_each_domain(this_cpu, sd) { 2940 for_each_domain(this_cpu, sd) {
2941 if (sd->flags & SD_BALANCE_NEWIDLE) { 2941 unsigned long interval;
2942
2943 if (!(sd->flags & SD_LOAD_BALANCE))
2944 continue;
2945
2946 if (sd->flags & SD_BALANCE_NEWIDLE)
2942 /* If we've pulled tasks over stop searching: */ 2947 /* If we've pulled tasks over stop searching: */
2943 pulled_task = load_balance_newidle(this_cpu, 2948 pulled_task = load_balance_newidle(this_cpu,
2944 this_rq, sd); 2949 this_rq, sd);
2945 if (time_after(next_balance, 2950
2946 sd->last_balance + sd->balance_interval)) 2951 interval = msecs_to_jiffies(sd->balance_interval);
2947 next_balance = sd->last_balance 2952 if (time_after(next_balance, sd->last_balance + interval))
2948 + sd->balance_interval; 2953 next_balance = sd->last_balance + interval;
2949 if (pulled_task) 2954 if (pulled_task)
2950 break; 2955 break;
2951 }
2952 } 2956 }
2953 if (!pulled_task) 2957 if (!pulled_task)
2954 /* 2958 /*