aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-09-10 03:26:18 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 13:06:23 -0400
commite17224bf1d01b461ec02a60f5a9b7657a89bdd23 (patch)
tree30dbb20798fde88a09680e9d82bd32ad8c343692 /kernel/sched.c
parentd6d5cfaf4551aa7713ca6ab73bb77e832602204b (diff)
[PATCH] sched: less locking
During periodic load balancing, don't hold this runqueue's lock while scanning remote runqueues, which can take a non trivial amount of time especially on very large systems. Holding the runqueue lock will only help to stabilise ->nr_running, however this doesn't do much to help because tasks being woken will simply get held up on the runqueue lock, so ->nr_running would not provide a really accurate picture of runqueue load in that case anyway. What's more, ->nr_running (and possibly the cpu_load averages) of remote runqueues won't be stable anyway, so load balancing is always an inexact operation. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 930189540f3b..8535e5c68f5b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2075,7 +2075,6 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2075 int nr_moved, all_pinned = 0; 2075 int nr_moved, all_pinned = 0;
2076 int active_balance = 0; 2076 int active_balance = 0;
2077 2077
2078 spin_lock(&this_rq->lock);
2079 schedstat_inc(sd, lb_cnt[idle]); 2078 schedstat_inc(sd, lb_cnt[idle]);
2080 2079
2081 group = find_busiest_group(sd, this_cpu, &imbalance, idle); 2080 group = find_busiest_group(sd, this_cpu, &imbalance, idle);
@@ -2102,18 +2101,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2102 * still unbalanced. nr_moved simply stays zero, so it is 2101 * still unbalanced. nr_moved simply stays zero, so it is
2103 * correctly treated as an imbalance. 2102 * correctly treated as an imbalance.
2104 */ 2103 */
2105 double_lock_balance(this_rq, busiest); 2104 double_rq_lock(this_rq, busiest);
2106 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2105 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2107 imbalance, sd, idle, &all_pinned); 2106 imbalance, sd, idle, &all_pinned);
2108 spin_unlock(&busiest->lock); 2107 double_rq_unlock(this_rq, busiest);
2109 2108
2110 /* All tasks on this runqueue were pinned by CPU affinity */ 2109 /* All tasks on this runqueue were pinned by CPU affinity */
2111 if (unlikely(all_pinned)) 2110 if (unlikely(all_pinned))
2112 goto out_balanced; 2111 goto out_balanced;
2113 } 2112 }
2114 2113
2115 spin_unlock(&this_rq->lock);
2116
2117 if (!nr_moved) { 2114 if (!nr_moved) {
2118 schedstat_inc(sd, lb_failed[idle]); 2115 schedstat_inc(sd, lb_failed[idle]);
2119 sd->nr_balance_failed++; 2116 sd->nr_balance_failed++;
@@ -2156,8 +2153,6 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2156 return nr_moved; 2153 return nr_moved;
2157 2154
2158out_balanced: 2155out_balanced:
2159 spin_unlock(&this_rq->lock);
2160
2161 schedstat_inc(sd, lb_balanced[idle]); 2156 schedstat_inc(sd, lb_balanced[idle]);
2162 2157
2163 sd->nr_balance_failed = 0; 2158 sd->nr_balance_failed = 0;