aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 17:57:08 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:40 -0400
commit16cfb1c04c3cbe3759f339d3333e7e1e7d59712a (patch)
tree23e8778f4eef5df4dc256dcabd1b564a8221f651 /kernel/sched.c
parent8102679447da7fcbcb5226ee0207c3a034bc6d5f (diff)
[PATCH] sched: reduce active load balancing
Fix up active load balancing a bit so it doesn't get called when it shouldn't. Reset the nr_balance_failed counter at more points where we have found conditions to be balanced. This reduces too aggressive active balancing seen on some workloads. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2794c79b9197..03d737791c1a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2021,6 +2021,7 @@ out_balanced:
2021 2021
2022 schedstat_inc(sd, lb_balanced[idle]); 2022 schedstat_inc(sd, lb_balanced[idle]);
2023 2023
2024 sd->nr_balance_failed = 0;
2024 /* tune up the balancing interval */ 2025 /* tune up the balancing interval */
2025 if (sd->balance_interval < sd->max_interval) 2026 if (sd->balance_interval < sd->max_interval)
2026 sd->balance_interval *= 2; 2027 sd->balance_interval *= 2;
@@ -2046,16 +2047,14 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2046 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); 2047 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
2047 group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); 2048 group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
2048 if (!group) { 2049 if (!group) {
2049 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2050 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); 2050 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
2051 goto out; 2051 goto out_balanced;
2052 } 2052 }
2053 2053
2054 busiest = find_busiest_queue(group); 2054 busiest = find_busiest_queue(group);
2055 if (!busiest || busiest == this_rq) { 2055 if (!busiest || busiest == this_rq) {
2056 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2057 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); 2056 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2058 goto out; 2057 goto out_balanced;
2059 } 2058 }
2060 2059
2061 /* Attempt to move tasks */ 2060 /* Attempt to move tasks */
@@ -2066,11 +2065,16 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2066 imbalance, sd, NEWLY_IDLE, NULL); 2065 imbalance, sd, NEWLY_IDLE, NULL);
2067 if (!nr_moved) 2066 if (!nr_moved)
2068 schedstat_inc(sd, lb_failed[NEWLY_IDLE]); 2067 schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
2068 else
2069 sd->nr_balance_failed = 0;
2069 2070
2070 spin_unlock(&busiest->lock); 2071 spin_unlock(&busiest->lock);
2071
2072out:
2073 return nr_moved; 2072 return nr_moved;
2073
2074out_balanced:
2075 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2076 sd->nr_balance_failed = 0;
2077 return 0;
2074} 2078}
2075 2079
2076/* 2080/*