aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0c4bcac54761..a0aa38b10fdd 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -336,7 +336,7 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
336#endif 336#endif
337 337
338/* 338/*
339 * delta *= w / rw 339 * delta *= P[w / rw]
340 */ 340 */
341static inline unsigned long 341static inline unsigned long
342calc_delta_weight(unsigned long delta, struct sched_entity *se) 342calc_delta_weight(unsigned long delta, struct sched_entity *se)
@@ -350,15 +350,13 @@ calc_delta_weight(unsigned long delta, struct sched_entity *se)
350} 350}
351 351
352/* 352/*
353 * delta *= rw / w 353 * delta /= w
354 */ 354 */
355static inline unsigned long 355static inline unsigned long
356calc_delta_fair(unsigned long delta, struct sched_entity *se) 356calc_delta_fair(unsigned long delta, struct sched_entity *se)
357{ 357{
358 for_each_sched_entity(se) { 358 if (unlikely(se->load.weight != NICE_0_LOAD))
359 delta = calc_delta_mine(delta, 359 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
360 cfs_rq_of(se)->load.weight, &se->load);
361 }
362 360
363 return delta; 361 return delta;
364} 362}
@@ -388,26 +386,26 @@ static u64 __sched_period(unsigned long nr_running)
388 * We calculate the wall-time slice from the period by taking a part 386 * We calculate the wall-time slice from the period by taking a part
389 * proportional to the weight. 387 * proportional to the weight.
390 * 388 *
391 * s = p*w/rw 389 * s = p*P[w/rw]
392 */ 390 */
393static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 391static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
394{ 392{
395 return calc_delta_weight(__sched_period(cfs_rq->nr_running), se); 393 unsigned long nr_running = cfs_rq->nr_running;
394
395 if (unlikely(!se->on_rq))
396 nr_running++;
397
398 return calc_delta_weight(__sched_period(nr_running), se);
396} 399}
397 400
398/* 401/*
399 * We calculate the vruntime slice of a to be inserted task 402 * We calculate the vruntime slice of a to be inserted task
400 * 403 *
401 * vs = s*rw/w = p 404 * vs = s/w
402 */ 405 */
403static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 406static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
404{ 407{
405 unsigned long nr_running = cfs_rq->nr_running; 408 return calc_delta_fair(sched_slice(cfs_rq, se), se);
406
407 if (!se->on_rq)
408 nr_running++;
409
410 return __sched_period(nr_running);
411} 409}
412 410
413/* 411/*
@@ -629,7 +627,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
629 * stays open at the end. 627 * stays open at the end.
630 */ 628 */
631 if (initial && sched_feat(START_DEBIT)) 629 if (initial && sched_feat(START_DEBIT))
632 vruntime += sched_vslice_add(cfs_rq, se); 630 vruntime += sched_vslice(cfs_rq, se);
633 631
634 if (!initial) { 632 if (!initial) {
635 /* sleeps upto a single latency don't count. */ 633 /* sleeps upto a single latency don't count. */