summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2013-10-18 07:52:21 -0400
committerIngo Molnar <mingo@kernel.org>2013-11-27 07:50:54 -0500
commitc44f2a020072d75d6b0cbf9f139a09719cda9367 (patch)
tree365080b93f4de0173edf06d921ef2031034dfc76 /kernel/sched
parent192301e70af3f6803c6354a464ebfa742da738ae (diff)
sched/fair: Move load idx selection in find_idlest_group
load_idx is used in find_idlest_group but initialized in select_task_rq_fair even when not used. The load_idx initialisation is moved in find_idlest_group and the sd_flag replaces it in the function's args. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Cc: len.brown@intel.com Cc: amit.kucheria@linaro.org Cc: pjt@google.com Cc: l.majewski@samsung.com Cc: Morten.Rasmussen@arm.com Cc: cmetcalf@tilera.com Cc: tony.luck@intel.com Cc: alex.shi@intel.com Cc: preeti@linux.vnet.ibm.com Cc: linaro-kernel@lists.linaro.org Cc: rjw@sisk.pl Cc: paulmck@linux.vnet.ibm.com Cc: corbet@lwn.net Cc: arjan@linux.intel.com Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1382097147-30088-8-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8b652ebe027..6cb36c7ea391 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4110,12 +4110,16 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4110 */ 4110 */
4111static struct sched_group * 4111static struct sched_group *
4112find_idlest_group(struct sched_domain *sd, struct task_struct *p, 4112find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4113 int this_cpu, int load_idx) 4113 int this_cpu, int sd_flag)
4114{ 4114{
4115 struct sched_group *idlest = NULL, *group = sd->groups; 4115 struct sched_group *idlest = NULL, *group = sd->groups;
4116 unsigned long min_load = ULONG_MAX, this_load = 0; 4116 unsigned long min_load = ULONG_MAX, this_load = 0;
4117 int load_idx = sd->forkexec_idx;
4117 int imbalance = 100 + (sd->imbalance_pct-100)/2; 4118 int imbalance = 100 + (sd->imbalance_pct-100)/2;
4118 4119
4120 if (sd_flag & SD_BALANCE_WAKE)
4121 load_idx = sd->wake_idx;
4122
4119 do { 4123 do {
4120 unsigned long load, avg_load; 4124 unsigned long load, avg_load;
4121 int local_group; 4125 int local_group;
@@ -4283,7 +4287,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
4283 } 4287 }
4284 4288
4285 while (sd) { 4289 while (sd) {
4286 int load_idx = sd->forkexec_idx;
4287 struct sched_group *group; 4290 struct sched_group *group;
4288 int weight; 4291 int weight;
4289 4292
@@ -4292,10 +4295,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
4292 continue; 4295 continue;
4293 } 4296 }
4294 4297
4295 if (sd_flag & SD_BALANCE_WAKE) 4298 group = find_idlest_group(sd, p, cpu, sd_flag);
4296 load_idx = sd->wake_idx;
4297
4298 group = find_idlest_group(sd, p, cpu, load_idx);
4299 if (!group) { 4299 if (!group) {
4300 sd = sd->child; 4300 sd = sd->child;
4301 continue; 4301 continue;