diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-09-10 03:26:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:23 -0400 |
commit | d6d5cfaf4551aa7713ca6ab73bb77e832602204b (patch) | |
tree | 2741ff061bca597f26df340feec0a6ee58820e64 /kernel | |
parent | 67f9a619e7460b7d07284a9d0745727a77d3ade6 (diff) |
[PATCH] sched: less newidle locking
Similarly to the earlier change in load_balance, only lock the runqueue in
load_balance_newidle if the busiest queue found has a nr_running > 1. This
will reduce frequency of expensive remote runqueue lock aquisitions in the
schedule() path on some workloads.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c61ee3451a04..930189540f3b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2104,8 +2104,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, | |||
2104 | */ | 2104 | */ |
2105 | double_lock_balance(this_rq, busiest); | 2105 | double_lock_balance(this_rq, busiest); |
2106 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2106 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2107 | imbalance, sd, idle, | 2107 | imbalance, sd, idle, &all_pinned); |
2108 | &all_pinned); | ||
2109 | spin_unlock(&busiest->lock); | 2108 | spin_unlock(&busiest->lock); |
2110 | 2109 | ||
2111 | /* All tasks on this runqueue were pinned by CPU affinity */ | 2110 | /* All tasks on this runqueue were pinned by CPU affinity */ |
@@ -2200,18 +2199,22 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, | |||
2200 | 2199 | ||
2201 | BUG_ON(busiest == this_rq); | 2200 | BUG_ON(busiest == this_rq); |
2202 | 2201 | ||
2203 | /* Attempt to move tasks */ | ||
2204 | double_lock_balance(this_rq, busiest); | ||
2205 | |||
2206 | schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); | 2202 | schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); |
2207 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2203 | |
2204 | nr_moved = 0; | ||
2205 | if (busiest->nr_running > 1) { | ||
2206 | /* Attempt to move tasks */ | ||
2207 | double_lock_balance(this_rq, busiest); | ||
2208 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | ||
2208 | imbalance, sd, NEWLY_IDLE, NULL); | 2209 | imbalance, sd, NEWLY_IDLE, NULL); |
2210 | spin_unlock(&busiest->lock); | ||
2211 | } | ||
2212 | |||
2209 | if (!nr_moved) | 2213 | if (!nr_moved) |
2210 | schedstat_inc(sd, lb_failed[NEWLY_IDLE]); | 2214 | schedstat_inc(sd, lb_failed[NEWLY_IDLE]); |
2211 | else | 2215 | else |
2212 | sd->nr_balance_failed = 0; | 2216 | sd->nr_balance_failed = 0; |
2213 | 2217 | ||
2214 | spin_unlock(&busiest->lock); | ||
2215 | return nr_moved; | 2218 | return nr_moved; |
2216 | 2219 | ||
2217 | out_balanced: | 2220 | out_balanced: |