aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 17:57:15 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:41 -0400
commita3f21bce1fefdf92a4d1705e888d390b10f3ac6f (patch)
tree1b77f5f5a8516737e3f1f62290c08fe093cff661 /kernel/sched.c
parent7897986bad8f6cd50d6149345aca7f6480f49464 (diff)
[PATCH] sched: tweak affine wakeups
Do less affine wakeups. We're trying to reduce dbt2-pgsql idle time regressions here... make sure we don't don't move tasks the wrong way in an imbalance condition. Also, remove the cache coldness requirement from the calculation - this seems to induce sharp cutoff points where behaviour will suddenly change on some workloads if the load creeps slightly over or under some point. It is good for periodic balancing because in that case have otherwise have no other context to determine what task to move. But also make a minor tweak to "wake balancing" - the imbalance tolerance is now set at half the domain's imbalance, so we get the opportunity to do wake balancing before the more random periodic rebalancing gets preformed. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b597b07e7911..5ae3568eed0b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1016,38 +1016,45 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
1016 int idx = this_sd->wake_idx; 1016 int idx = this_sd->wake_idx;
1017 unsigned int imbalance; 1017 unsigned int imbalance;
1018 1018
1019 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1020
1019 load = source_load(cpu, idx); 1021 load = source_load(cpu, idx);
1020 this_load = target_load(this_cpu, idx); 1022 this_load = target_load(this_cpu, idx);
1021 1023
1022 /*
1023 * If sync wakeup then subtract the (maximum possible) effect of
1024 * the currently running task from the load of the current CPU:
1025 */
1026 if (sync)
1027 this_load -= SCHED_LOAD_SCALE;
1028
1029 /* Don't pull the task off an idle CPU to a busy one */
1030 if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
1031 goto out_set_cpu;
1032
1033 new_cpu = this_cpu; /* Wake to this CPU if we can */ 1024 new_cpu = this_cpu; /* Wake to this CPU if we can */
1034 1025
1035 if ((this_sd->flags & SD_WAKE_AFFINE) && 1026 if (this_sd->flags & SD_WAKE_AFFINE) {
1036 !task_hot(p, rq->timestamp_last_tick, this_sd)) { 1027 unsigned long tl = this_load;
1037 /*
1038 * This domain has SD_WAKE_AFFINE and p is cache cold
1039 * in this domain.
1040 */
1041 schedstat_inc(this_sd, ttwu_move_affine);
1042 goto out_set_cpu;
1043 } else if ((this_sd->flags & SD_WAKE_BALANCE) &&
1044 imbalance*this_load <= 100*load) {
1045 /* 1028 /*
1046 * This domain has SD_WAKE_BALANCE and there is 1029 * If sync wakeup then subtract the (maximum possible)
1047 * an imbalance. 1030 * effect of the currently running task from the load
1031 * of the current CPU:
1048 */ 1032 */
1049 schedstat_inc(this_sd, ttwu_move_balance); 1033 if (sync)
1050 goto out_set_cpu; 1034 tl -= SCHED_LOAD_SCALE;
1035
1036 if ((tl <= load &&
1037 tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
1038 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) {
1039 /*
1040 * This domain has SD_WAKE_AFFINE and
1041 * p is cache cold in this domain, and
1042 * there is no bad imbalance.
1043 */
1044 schedstat_inc(this_sd, ttwu_move_affine);
1045 goto out_set_cpu;
1046 }
1047 }
1048
1049 /*
1050 * Start passive balancing when half the imbalance_pct
1051 * limit is reached.
1052 */
1053 if (this_sd->flags & SD_WAKE_BALANCE) {
1054 if (imbalance*this_load <= 100*load) {
1055 schedstat_inc(this_sd, ttwu_move_balance);
1056 goto out_set_cpu;
1057 }
1051 } 1058 }
1052 } 1059 }
1053 1060