aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-03-16 15:36:10 -0400
committerIngo Molnar <mingo@elte.hu>2008-03-18 23:27:52 -0400
commit098fb9db2c74cfd6ffdbf61eb026a0c21abc5f75 (patch)
tree1b7d4d35ce9d375f7078937518f92b60f4fda000 /kernel/sched_fair.c
parentf920bb6f5fe21047e669381fe4dd346f6a9d3562 (diff)
sched: clean up wakeup balancing, move wake_affine()
split out the affine-wakeup bits. No code changed: kernel/sched.o: text data bss dec hex filename 42521 2858 232 45611 b22b sched.o.before 42521 2858 232 45611 b22b sched.o.after md5: 9d76738f1272aa82f0b7affd2f51df6b sched.o.before.asm 09b31c44e9aff8666f72773dc433e2df sched.o.after.asm (the md5's changed because stack slots changed and some registers get scheduled by gcc in a different order - but otherwise the before and after assembly is instruction for instruction equivalent.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c134
1 files changed, 75 insertions, 59 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2cc59080efa..70679b266693 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -980,12 +980,59 @@ static inline int wake_idle(int cpu, struct task_struct *p)
980#endif 980#endif
981 981
982#ifdef CONFIG_SMP 982#ifdef CONFIG_SMP
983
984static int
985wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
986 int cpu, int this_cpu, int sync, int idx,
987 unsigned long load, unsigned long this_load,
988 unsigned int imbalance)
989{
990 unsigned long tl = this_load;
991 unsigned long tl_per_task;
992
993 if (!(this_sd->flags & SD_WAKE_AFFINE))
994 return 0;
995
996 /*
997 * Attract cache-cold tasks on sync wakeups:
998 */
999 if (sync && !task_hot(p, rq->clock, this_sd))
1000 return 1;
1001
1002 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1003 tl_per_task = cpu_avg_load_per_task(this_cpu);
1004
1005 /*
1006 * If sync wakeup then subtract the (maximum possible)
1007 * effect of the currently running task from the load
1008 * of the current CPU:
1009 */
1010 if (sync)
1011 tl -= current->se.load.weight;
1012
1013 if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) ||
1014 100*(tl + p->se.load.weight) <= imbalance*load) {
1015 /*
1016 * This domain has SD_WAKE_AFFINE and
1017 * p is cache cold in this domain, and
1018 * there is no bad imbalance.
1019 */
1020 schedstat_inc(this_sd, ttwu_move_affine);
1021 schedstat_inc(p, se.nr_wakeups_affine);
1022
1023 return 1;
1024 }
1025 return 0;
1026}
1027
983static int select_task_rq_fair(struct task_struct *p, int sync) 1028static int select_task_rq_fair(struct task_struct *p, int sync)
984{ 1029{
985 int cpu, this_cpu;
986 struct rq *rq;
987 struct sched_domain *sd, *this_sd = NULL; 1030 struct sched_domain *sd, *this_sd = NULL;
988 int new_cpu; 1031 unsigned long load, this_load;
1032 int cpu, this_cpu, new_cpu;
1033 unsigned int imbalance;
1034 struct rq *rq;
1035 int idx;
989 1036
990 cpu = task_cpu(p); 1037 cpu = task_cpu(p);
991 rq = task_rq(p); 1038 rq = task_rq(p);
@@ -1008,66 +1055,35 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1008 /* 1055 /*
1009 * Check for affine wakeup and passive balancing possibilities. 1056 * Check for affine wakeup and passive balancing possibilities.
1010 */ 1057 */
1011 if (this_sd) { 1058 if (!this_sd)
1012 int idx = this_sd->wake_idx; 1059 goto out_keep_cpu;
1013 unsigned int imbalance;
1014 unsigned long load, this_load;
1015
1016 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1017
1018 load = source_load(cpu, idx);
1019 this_load = target_load(this_cpu, idx);
1020
1021 new_cpu = this_cpu; /* Wake to this CPU if we can */
1022
1023 if (this_sd->flags & SD_WAKE_AFFINE) {
1024 unsigned long tl = this_load;
1025 unsigned long tl_per_task;
1026
1027 /*
1028 * Attract cache-cold tasks on sync wakeups:
1029 */
1030 if (sync && !task_hot(p, rq->clock, this_sd))
1031 goto out_set_cpu;
1032
1033 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1034 tl_per_task = cpu_avg_load_per_task(this_cpu);
1035
1036 /*
1037 * If sync wakeup then subtract the (maximum possible)
1038 * effect of the currently running task from the load
1039 * of the current CPU:
1040 */
1041 if (sync)
1042 tl -= current->se.load.weight;
1043
1044 if ((tl <= load &&
1045 tl + target_load(cpu, idx) <= tl_per_task) ||
1046 100*(tl + p->se.load.weight) <= imbalance*load) {
1047 /*
1048 * This domain has SD_WAKE_AFFINE and
1049 * p is cache cold in this domain, and
1050 * there is no bad imbalance.
1051 */
1052 schedstat_inc(this_sd, ttwu_move_affine);
1053 schedstat_inc(p, se.nr_wakeups_affine);
1054 goto out_set_cpu;
1055 }
1056 }
1057 1060
1058 /* 1061 idx = this_sd->wake_idx;
1059 * Start passive balancing when half the imbalance_pct 1062
1060 * limit is reached. 1063 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1061 */ 1064
1062 if (this_sd->flags & SD_WAKE_BALANCE) { 1065 load = source_load(cpu, idx);
1063 if (imbalance*this_load <= 100*load) { 1066 this_load = target_load(this_cpu, idx);
1064 schedstat_inc(this_sd, ttwu_move_balance); 1067
1065 schedstat_inc(p, se.nr_wakeups_passive); 1068 new_cpu = this_cpu; /* Wake to this CPU if we can */
1066 goto out_set_cpu; 1069
1067 } 1070 if (wake_affine(rq, this_sd, p, cpu, this_cpu, sync, idx,
1071 load, this_load, imbalance))
1072 goto out_set_cpu;
1073
1074 /*
1075 * Start passive balancing when half the imbalance_pct
1076 * limit is reached.
1077 */
1078 if (this_sd->flags & SD_WAKE_BALANCE) {
1079 if (imbalance*this_load <= 100*load) {
1080 schedstat_inc(this_sd, ttwu_move_balance);
1081 schedstat_inc(p, se.nr_wakeups_passive);
1082 goto out_set_cpu;
1068 } 1083 }
1069 } 1084 }
1070 1085
1086out_keep_cpu:
1071 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ 1087 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1072out_set_cpu: 1088out_set_cpu:
1073 return wake_idle(new_cpu, p); 1089 return wake_idle(new_cpu, p);