diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-03-16 16:21:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-03-18 23:27:53 -0400 |
commit | f48273860edfca2306236d0f0de609aab3f773d4 (patch) | |
tree | 2ee68d85fa99e54488d25780234e3e3fb2c0b982 /kernel | |
parent | ac192d3921a14e2c9080799e16959b4bd56f49d6 (diff) |
sched: clean up wakeup balancing, code flow
Clean up the code flow. No code changed:
kernel/sched.o:
text data bss dec hex filename
42521 2858 232 45611 b22b sched.o.before
42521 2858 232 45611 b22b sched.o.after
md5:
09b31c44e9aff8666f72773dc433e2df sched.o.before.asm
09b31c44e9aff8666f72773dc433e2df sched.o.after.asm
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 25 |
1 files changed, 10 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 2d2be02b8e3b..b5a357396b49 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1040,7 +1040,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1040 | new_cpu = prev_cpu; | 1040 | new_cpu = prev_cpu; |
1041 | 1041 | ||
1042 | if (prev_cpu == this_cpu) | 1042 | if (prev_cpu == this_cpu) |
1043 | goto out_set_cpu; | 1043 | goto out; |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * 'this_sd' is the first domain that both | 1046 | * 'this_sd' is the first domain that both |
@@ -1054,13 +1054,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1056 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) |
1057 | goto out_set_cpu; | 1057 | goto out; |
1058 | 1058 | ||
1059 | /* | 1059 | /* |
1060 | * Check for affine wakeup and passive balancing possibilities. | 1060 | * Check for affine wakeup and passive balancing possibilities. |
1061 | */ | 1061 | */ |
1062 | if (!this_sd) | 1062 | if (!this_sd) |
1063 | goto out_keep_cpu; | 1063 | goto out; |
1064 | 1064 | ||
1065 | idx = this_sd->wake_idx; | 1065 | idx = this_sd->wake_idx; |
1066 | 1066 | ||
@@ -1069,11 +1069,11 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1069 | load = source_load(prev_cpu, idx); | 1069 | load = source_load(prev_cpu, idx); |
1070 | this_load = target_load(this_cpu, idx); | 1070 | this_load = target_load(this_cpu, idx); |
1071 | 1071 | ||
1072 | new_cpu = this_cpu; /* Wake to this CPU if we can */ | ||
1073 | |||
1074 | if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx, | 1072 | if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx, |
1075 | load, this_load, imbalance)) | 1073 | load, this_load, imbalance)) { |
1076 | goto out_set_cpu; | 1074 | new_cpu = this_cpu; |
1075 | goto out; | ||
1076 | } | ||
1077 | 1077 | ||
1078 | /* | 1078 | /* |
1079 | * Start passive balancing when half the imbalance_pct | 1079 | * Start passive balancing when half the imbalance_pct |
@@ -1083,17 +1083,12 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1083 | if (imbalance*this_load <= 100*load) { | 1083 | if (imbalance*this_load <= 100*load) { |
1084 | schedstat_inc(this_sd, ttwu_move_balance); | 1084 | schedstat_inc(this_sd, ttwu_move_balance); |
1085 | schedstat_inc(p, se.nr_wakeups_passive); | 1085 | schedstat_inc(p, se.nr_wakeups_passive); |
1086 | goto out_set_cpu; | 1086 | new_cpu = this_cpu; |
1087 | goto out; | ||
1087 | } | 1088 | } |
1088 | } | 1089 | } |
1089 | 1090 | ||
1090 | out_keep_cpu: | 1091 | out: |
1091 | /* | ||
1092 | * Could not wake to this_cpu. | ||
1093 | * Wake to the previous cpu instead: | ||
1094 | */ | ||
1095 | new_cpu = prev_cpu; | ||
1096 | out_set_cpu: | ||
1097 | return wake_idle(new_cpu, p); | 1092 | return wake_idle(new_cpu, p); |
1098 | } | 1093 | } |
1099 | #endif /* CONFIG_SMP */ | 1094 | #endif /* CONFIG_SMP */ |