aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-04-23 04:27:40 -0400
committerIngo Molnar <mingo@kernel.org>2013-04-24 02:52:44 -0400
commitd31980846f9688db3ee3e5863525c6ff8ace4c7c (patch)
tree92ec80802dd11061e5b1515a0bf41d9b9be22cb1 /kernel/sched
parentcfc03118047172f5bdc58d63c607d16d33ce5305 (diff)
sched: Move up affinity check to mitigate useless redoing overhead
Currently, LBF_ALL_PINNED is cleared after affinity check is passed. So, if task migration is skipped by small load value or small imbalance value in move_tasks(), we don't clear LBF_ALL_PINNED. At last, we trigger 'redo' in load_balance(). Imbalance value is often so small that any tasks cannot be moved to other cpus and, of course, this situation may be continued after we change the target cpu. So this patch move up affinity check code and clear LBF_ALL_PINNED before evaluating load value in order to mitigate useless redoing overhead. In addition, re-order some comments correctly. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Jason Low <jason.low2@hp.com> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1366705662-3587-5-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dfa92b7b3dec..b8ef321641df 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3896,10 +3896,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
3896 int tsk_cache_hot = 0; 3896 int tsk_cache_hot = 0;
3897 /* 3897 /*
3898 * We do not migrate tasks that are: 3898 * We do not migrate tasks that are:
3899 * 1) running (obviously), or 3899 * 1) throttled_lb_pair, or
3900 * 2) cannot be migrated to this CPU due to cpus_allowed, or 3900 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3901 * 3) are cache-hot on their current CPU. 3901 * 3) running (obviously), or
3902 * 4) are cache-hot on their current CPU.
3902 */ 3903 */
3904 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3905 return 0;
3906
3903 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { 3907 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3904 int new_dst_cpu; 3908 int new_dst_cpu;
3905 3909
@@ -3967,9 +3971,6 @@ static int move_one_task(struct lb_env *env)
3967 struct task_struct *p, *n; 3971 struct task_struct *p, *n;
3968 3972
3969 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { 3973 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
3970 if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
3971 continue;
3972
3973 if (!can_migrate_task(p, env)) 3974 if (!can_migrate_task(p, env))
3974 continue; 3975 continue;
3975 3976
@@ -4021,7 +4022,7 @@ static int move_tasks(struct lb_env *env)
4021 break; 4022 break;
4022 } 4023 }
4023 4024
4024 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 4025 if (!can_migrate_task(p, env))
4025 goto next; 4026 goto next;
4026 4027
4027 load = task_h_load(p); 4028 load = task_h_load(p);
@@ -4032,9 +4033,6 @@ static int move_tasks(struct lb_env *env)
4032 if ((load / 2) > env->imbalance) 4033 if ((load / 2) > env->imbalance)
4033 goto next; 4034 goto next;
4034 4035
4035 if (!can_migrate_task(p, env))
4036 goto next;
4037
4038 move_task(p, env); 4036 move_task(p, env);
4039 pulled++; 4037 pulled++;
4040 env->imbalance -= load; 4038 env->imbalance -= load;