diff options
-rw-r--r-- | kernel/sched.c | 57 |
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b597b07e7911..5ae3568eed0b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1016,38 +1016,45 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) | |||
1016 | int idx = this_sd->wake_idx; | 1016 | int idx = this_sd->wake_idx; |
1017 | unsigned int imbalance; | 1017 | unsigned int imbalance; |
1018 | 1018 | ||
1019 | imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; | ||
1020 | |||
1019 | load = source_load(cpu, idx); | 1021 | load = source_load(cpu, idx); |
1020 | this_load = target_load(this_cpu, idx); | 1022 | this_load = target_load(this_cpu, idx); |
1021 | 1023 | ||
1022 | /* | ||
1023 | * If sync wakeup then subtract the (maximum possible) effect of | ||
1024 | * the currently running task from the load of the current CPU: | ||
1025 | */ | ||
1026 | if (sync) | ||
1027 | this_load -= SCHED_LOAD_SCALE; | ||
1028 | |||
1029 | /* Don't pull the task off an idle CPU to a busy one */ | ||
1030 | if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2) | ||
1031 | goto out_set_cpu; | ||
1032 | |||
1033 | new_cpu = this_cpu; /* Wake to this CPU if we can */ | 1024 | new_cpu = this_cpu; /* Wake to this CPU if we can */ |
1034 | 1025 | ||
1035 | if ((this_sd->flags & SD_WAKE_AFFINE) && | 1026 | if (this_sd->flags & SD_WAKE_AFFINE) { |
1036 | !task_hot(p, rq->timestamp_last_tick, this_sd)) { | 1027 | unsigned long tl = this_load; |
1037 | /* | ||
1038 | * This domain has SD_WAKE_AFFINE and p is cache cold | ||
1039 | * in this domain. | ||
1040 | */ | ||
1041 | schedstat_inc(this_sd, ttwu_move_affine); | ||
1042 | goto out_set_cpu; | ||
1043 | } else if ((this_sd->flags & SD_WAKE_BALANCE) && | ||
1044 | imbalance*this_load <= 100*load) { | ||
1045 | /* | 1028 | /* |
1046 | * This domain has SD_WAKE_BALANCE and there is | 1029 | * If sync wakeup then subtract the (maximum possible) |
1047 | * an imbalance. | 1030 | * effect of the currently running task from the load |
1031 | * of the current CPU: | ||
1048 | */ | 1032 | */ |
1049 | schedstat_inc(this_sd, ttwu_move_balance); | 1033 | if (sync) |
1050 | goto out_set_cpu; | 1034 | tl -= SCHED_LOAD_SCALE; |
1035 | |||
1036 | if ((tl <= load && | ||
1037 | tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) || | ||
1038 | 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) { | ||
1039 | /* | ||
1040 | * This domain has SD_WAKE_AFFINE and | ||
1041 | * p is cache cold in this domain, and | ||
1042 | * there is no bad imbalance. | ||
1043 | */ | ||
1044 | schedstat_inc(this_sd, ttwu_move_affine); | ||
1045 | goto out_set_cpu; | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | /* | ||
1050 | * Start passive balancing when half the imbalance_pct | ||
1051 | * limit is reached. | ||
1052 | */ | ||
1053 | if (this_sd->flags & SD_WAKE_BALANCE) { | ||
1054 | if (imbalance*this_load <= 100*load) { | ||
1055 | schedstat_inc(this_sd, ttwu_move_balance); | ||
1056 | goto out_set_cpu; | ||
1057 | } | ||
1051 | } | 1058 | } |
1052 | } | 1059 | } |
1053 | 1060 | ||