summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-05-11 10:10:34 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-12 03:55:31 -0400
commitb5179ac70de85ef477cedf8b026a57913754cf1e (patch)
tree9175a80de5ea94bc0948ef239c8eaf0825226d2a /kernel/sched/fair.c
parentc58d25f371f5e4b2dfbec3a7bd6f3c24dd79095b (diff)
sched/fair: Prepare to fix fairness problems on migration
Mike reported that our recent attempt to fix migration problems: 3a47d5124a95 ("sched/fair: Fix fairness issue on migration") broke interactivity and the signal starve test. We reverted that commit and now let's try it again more carefully, with some other underlying problems fixed first. One problem is that I assumed ENQUEUE_WAKING was only set when we do a cross-cpu wakeup (migration), which isn't true. This means we now destroy the vruntime history of tasks and wakeup-preemption suffers. Cure this by making my assumption true, only call sched_class::task_waking() when we do a cross-cpu wakeup. This avoids the indirect call in the case we do a local wakeup. Reported-by: Mike Galbraith <mgalbraith@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Hunter <ahh@google.com> Cc: Ben Segall <bsegall@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Paul Turner <pjt@google.com> Cc: Pavan Kondeti <pkondeti@codeaurora.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: byungchul.park@lge.com Cc: linux-kernel@vger.kernel.org Fixes: 3a47d5124a95 ("sched/fair: Fix fairness issue on migration") Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 48633a1c3b46..445bcd2d7ee1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3254,6 +3254,37 @@ static inline void check_schedstat_required(void)
3254#endif 3254#endif
3255} 3255}
3256 3256
3257
3258/*
3259 * MIGRATION
3260 *
3261 * dequeue
3262 * update_curr()
3263 * update_min_vruntime()
3264 * vruntime -= min_vruntime
3265 *
3266 * enqueue
3267 * update_curr()
3268 * update_min_vruntime()
3269 * vruntime += min_vruntime
3270 *
3271 * this way the vruntime transition between RQs is done when both
3272 * min_vruntime are up-to-date.
3273 *
3274 * WAKEUP (remote)
3275 *
3276 * ->task_waking_fair()
3277 * vruntime -= min_vruntime
3278 *
3279 * enqueue
3280 * update_curr()
3281 * update_min_vruntime()
3282 * vruntime += min_vruntime
3283 *
3284 * this way we don't have the most up-to-date min_vruntime on the originating
3285 * CPU and an up-to-date min_vruntime on the destination CPU.
3286 */
3287
3257static void 3288static void
3258enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3289enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3259{ 3290{
@@ -4810,6 +4841,12 @@ static unsigned long cpu_avg_load_per_task(int cpu)
4810 return 0; 4841 return 0;
4811} 4842}
4812 4843
4844/*
4845 * Called to migrate a waking task; as blocked tasks retain absolute vruntime
4846 * the migration needs to deal with this by subtracting the old and adding the
4847 * new min_vruntime -- the latter is done by enqueue_entity() when placing
4848 * the task on the new runqueue.
4849 */
4813static void task_waking_fair(struct task_struct *p) 4850static void task_waking_fair(struct task_struct *p)
4814{ 4851{
4815 struct sched_entity *se = &p->se; 4852 struct sched_entity *se = &p->se;