diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 72e25c7a3a18..6c091d6e159d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -520,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
520 | 520 | ||
521 | if (!initial) { | 521 | if (!initial) { |
522 | /* sleeps upto a single latency don't count. */ | 522 | /* sleeps upto a single latency don't count. */ |
523 | if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) | 523 | if (sched_feat(NEW_FAIR_SLEEPERS)) |
524 | vruntime -= sysctl_sched_latency; | 524 | vruntime -= sysctl_sched_latency; |
525 | 525 | ||
526 | /* ensure we never gain time by being placed backwards. */ | 526 | /* ensure we never gain time by being placed backwards. */ |
@@ -1106,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | gran = sysctl_sched_wakeup_granularity; | 1108 | gran = sysctl_sched_wakeup_granularity; |
1109 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 1109 | /* |
1110 | * More easily preempt - nice tasks, while not making | ||
1111 | * it harder for + nice tasks. | ||
1112 | */ | ||
1113 | if (unlikely(se->load.weight > NICE_0_LOAD)) | ||
1110 | gran = calc_delta_fair(gran, &se->load); | 1114 | gran = calc_delta_fair(gran, &se->load); |
1111 | 1115 | ||
1112 | if (pse->vruntime + gran < se->vruntime) | 1116 | if (pse->vruntime + gran < se->vruntime) |