aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2005-09-10 03:26:21 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 13:06:24 -0400
commitfa3b6ddc3f4a8eadba52234134cdb59c28b5332d (patch)
tree9aa1b8211adb63bb2983be0d4ab2afdfab88e1a3
parent5927ad78ec75870b1bdfa65a10ad1300cd664d36 (diff)
[PATCH] sched: don't kick ALB in the presence of pinned task
Jack Steiner brought this issue at my OLS talk. Take a scenario where two tasks are pinned to two HT threads in a physical package. Idle packages in the system will keep kicking migration_thread on the busy package with out any success. We will run into similar scenarios in the presence of CMP/NUMA. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 103f705b245c..1dc29dec38a9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2125,6 +2125,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2125 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 2125 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
2126 2126
2127 spin_lock(&busiest->lock); 2127 spin_lock(&busiest->lock);
2128
2129 /* don't kick the migration_thread, if the curr
2130 * task on busiest cpu can't be moved to this_cpu
2131 */
2132 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2133 spin_unlock(&busiest->lock);
2134 all_pinned = 1;
2135 goto out_one_pinned;
2136 }
2137
2128 if (!busiest->active_balance) { 2138 if (!busiest->active_balance) {
2129 busiest->active_balance = 1; 2139 busiest->active_balance = 1;
2130 busiest->push_cpu = this_cpu; 2140 busiest->push_cpu = this_cpu;
@@ -2165,6 +2175,8 @@ out_balanced:
2165 schedstat_inc(sd, lb_balanced[idle]); 2175 schedstat_inc(sd, lb_balanced[idle]);
2166 2176
2167 sd->nr_balance_failed = 0; 2177 sd->nr_balance_failed = 0;
2178
2179out_one_pinned:
2168 /* tune up the balancing interval */ 2180 /* tune up the balancing interval */
2169 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || 2181 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
2170 (sd->balance_interval < sd->max_interval)) 2182 (sd->balance_interval < sd->max_interval))
@@ -2357,7 +2369,8 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2357 2369
2358 if (j - sd->last_balance >= interval) { 2370 if (j - sd->last_balance >= interval) {
2359 if (load_balance(this_cpu, this_rq, sd, idle)) { 2371 if (load_balance(this_cpu, this_rq, sd, idle)) {
2360 /* We've pulled tasks over so either we're no 2372 /*
2373 * We've pulled tasks over so either we're no
2361 * longer idle, or one of our SMT siblings is 2374 * longer idle, or one of our SMT siblings is
2362 * not idle. 2375 * not idle.
2363 */ 2376 */