aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6b0974c3fb67..c578370cd693 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -306,6 +306,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
306 delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); 306 delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec);
307 delta = calc_delta_mine(delta, curr->load.weight, lw); 307 delta = calc_delta_mine(delta, curr->load.weight, lw);
308 delta = min((u64)delta, cfs_rq->sleeper_bonus); 308 delta = min((u64)delta, cfs_rq->sleeper_bonus);
309 delta = min(delta, (unsigned long)(
310 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
309 cfs_rq->sleeper_bonus -= delta; 311 cfs_rq->sleeper_bonus -= delta;
310 delta_mine -= delta; 312 delta_mine -= delta;
311 } 313 }
@@ -493,6 +495,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
493 unsigned long load = cfs_rq->load.weight, delta_fair; 495 unsigned long load = cfs_rq->load.weight, delta_fair;
494 long prev_runtime; 496 long prev_runtime;
495 497
498 /*
499 * Do not boost sleepers if there's too much bonus 'in flight'
500 * already:
501 */
502 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
503 return;
504
496 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) 505 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
497 load = rq_of(cfs_rq)->cpu_load[2]; 506 load = rq_of(cfs_rq)->cpu_load[2];
498 507
@@ -512,16 +521,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
512 521
513 prev_runtime = se->wait_runtime; 522 prev_runtime = se->wait_runtime;
514 __add_wait_runtime(cfs_rq, se, delta_fair); 523 __add_wait_runtime(cfs_rq, se, delta_fair);
524 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
515 delta_fair = se->wait_runtime - prev_runtime; 525 delta_fair = se->wait_runtime - prev_runtime;
516 526
517 /* 527 /*
518 * Track the amount of bonus we've given to sleepers: 528 * Track the amount of bonus we've given to sleepers:
519 */ 529 */
520 cfs_rq->sleeper_bonus += delta_fair; 530 cfs_rq->sleeper_bonus += delta_fair;
521 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
522 cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit;
523
524 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
525} 531}
526 532
527static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 533static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)