aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 62b057603f07..8763bee6b661 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -473,19 +473,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
473 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) 473 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
474 vruntime += sched_vslice(cfs_rq)/2; 474 vruntime += sched_vslice(cfs_rq)/2;
475 475
476 /*
477 * The 'current' period is already promised to the current tasks,
478 * however the extra weight of the new task will slow them down a
479 * little, place the new task so that it fits in the slot that
480 * stays open at the end.
481 */
476 if (initial && sched_feat(START_DEBIT)) 482 if (initial && sched_feat(START_DEBIT))
477 vruntime += sched_vslice_add(cfs_rq, se); 483 vruntime += sched_vslice_add(cfs_rq, se);
478 484
479 if (!initial) { 485 if (!initial) {
486 /* sleeps upto a single latency don't count. */
480 if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && 487 if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
481 task_of(se)->policy != SCHED_BATCH) 488 task_of(se)->policy != SCHED_BATCH)
482 vruntime -= sysctl_sched_latency; 489 vruntime -= sysctl_sched_latency;
483 490
484 vruntime = max_t(s64, vruntime, se->vruntime); 491 /* ensure we never gain time by being placed backwards. */
492 vruntime = max_vruntime(se->vruntime, vruntime);
485 } 493 }
486 494
487 se->vruntime = vruntime; 495 se->vruntime = vruntime;
488
489} 496}
490 497
491static void 498static void