aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-08-26 07:06:44 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-19 06:35:24 -0400
commitafdeee0510db918b31bb4aba47452df2ddbdbcf2 (patch)
treef0c307eade01eaf4a2a6d069a2152d67bd090022 /kernel/sched/fair.c
parent0d9e26329b0c9263d4d9e0422d80a0e73268c52f (diff)
sched: Fix imbalance flag reset
The imbalance flag can stay set whereas there is no imbalance. Let assume that we have 3 tasks that run on a dual cores /dual cluster system. We will have some idle load balance which are triggered during tick. Unfortunately, the tick is also used to queue background work so we can reach the situation where short work has been queued on a CPU which already runs a task. The load balance will detect this imbalance (2 tasks on 1 CPU and an idle CPU) and will try to pull the waiting task on the idle CPU. The waiting task is a worker thread that is pinned on a CPU so an imbalance due to pinned task is detected and the imbalance flag is set. Then, we will not be able to clear the flag because we have at most 1 task on each CPU but the imbalance flag will trig to useless active load balance between the idle CPU and the busy CPU. We need to reset of the imbalance flag as soon as we have reached a balanced state. If all tasks are pinned, we don't consider that as a balanced state and let the imbalance flag set. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: riel@redhat.com Cc: Morten.Rasmussen@arm.com Cc: efault@gmx.de Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1409051215-16788-2-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9807a991dc0e..01856a8bcd4c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6765,10 +6765,8 @@ more_balance:
6765 if (sd_parent) { 6765 if (sd_parent) {
6766 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 6766 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6767 6767
6768 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) { 6768 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
6769 *group_imbalance = 1; 6769 *group_imbalance = 1;
6770 } else if (*group_imbalance)
6771 *group_imbalance = 0;
6772 } 6770 }
6773 6771
6774 /* All tasks on this runqueue were pinned by CPU affinity */ 6772 /* All tasks on this runqueue were pinned by CPU affinity */
@@ -6779,7 +6777,7 @@ more_balance:
6779 env.loop_break = sched_nr_migrate_break; 6777 env.loop_break = sched_nr_migrate_break;
6780 goto redo; 6778 goto redo;
6781 } 6779 }
6782 goto out_balanced; 6780 goto out_all_pinned;
6783 } 6781 }
6784 } 6782 }
6785 6783
@@ -6853,6 +6851,23 @@ more_balance:
6853 goto out; 6851 goto out;
6854 6852
6855out_balanced: 6853out_balanced:
6854 /*
6855 * We reach balance although we may have faced some affinity
6856 * constraints. Clear the imbalance flag if it was set.
6857 */
6858 if (sd_parent) {
6859 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6860
6861 if (*group_imbalance)
6862 *group_imbalance = 0;
6863 }
6864
6865out_all_pinned:
6866 /*
6867 * We reach balance because all tasks are pinned at this level so
6868 * we can't migrate them. Let the imbalance flag set so parent level
6869 * can try to migrate them.
6870 */
6856 schedstat_inc(sd, lb_balanced[idle]); 6871 schedstat_inc(sd, lb_balanced[idle]);
6857 6872
6858 sd->nr_balance_failed = 0; 6873 sd->nr_balance_failed = 0;