diff options
| author | Ingo Molnar <mingo@elte.hu> | 2007-08-24 14:39:10 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2007-08-24 14:39:10 -0400 |
| commit | b2133c8b1e270b4a7c36f70e29be8738d09e850b (patch) | |
| tree | 9075055664141cc1b91a0b3eac915e6241e38801 | |
| parent | 98fbc798533339be802c6dcd48c2293c712e87db (diff) | |
sched: tidy up and simplify the bonus balance
make the bonus balance more consistent: do not hand out a bonus if
there's too much in flight already, and only deduct as much from a
runner as it has the capacity. This makes the bonus engine a zero-sum
game (as intended).
this also simplifies the code:
text data bss dec hex filename
34770 2998 24 37792 93a0 sched.o.before
34749 2998 24 37771 938b sched.o.after
and it also avoids overscheduling in sleep-happy workloads like
hackbench.c.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/sched_fair.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6b0974c3fb67..c578370cd693 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -306,6 +306,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 306 | delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); | 306 | delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); |
| 307 | delta = calc_delta_mine(delta, curr->load.weight, lw); | 307 | delta = calc_delta_mine(delta, curr->load.weight, lw); |
| 308 | delta = min((u64)delta, cfs_rq->sleeper_bonus); | 308 | delta = min((u64)delta, cfs_rq->sleeper_bonus); |
| 309 | delta = min(delta, (unsigned long)( | ||
| 310 | (long)sysctl_sched_runtime_limit - curr->wait_runtime)); | ||
| 309 | cfs_rq->sleeper_bonus -= delta; | 311 | cfs_rq->sleeper_bonus -= delta; |
| 310 | delta_mine -= delta; | 312 | delta_mine -= delta; |
| 311 | } | 313 | } |
| @@ -493,6 +495,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 493 | unsigned long load = cfs_rq->load.weight, delta_fair; | 495 | unsigned long load = cfs_rq->load.weight, delta_fair; |
| 494 | long prev_runtime; | 496 | long prev_runtime; |
| 495 | 497 | ||
| 498 | /* | ||
| 499 | * Do not boost sleepers if there's too much bonus 'in flight' | ||
| 500 | * already: | ||
| 501 | */ | ||
| 502 | if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) | ||
| 503 | return; | ||
| 504 | |||
| 496 | if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) | 505 | if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) |
| 497 | load = rq_of(cfs_rq)->cpu_load[2]; | 506 | load = rq_of(cfs_rq)->cpu_load[2]; |
| 498 | 507 | ||
| @@ -512,16 +521,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 512 | 521 | ||
| 513 | prev_runtime = se->wait_runtime; | 522 | prev_runtime = se->wait_runtime; |
| 514 | __add_wait_runtime(cfs_rq, se, delta_fair); | 523 | __add_wait_runtime(cfs_rq, se, delta_fair); |
| 524 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
| 515 | delta_fair = se->wait_runtime - prev_runtime; | 525 | delta_fair = se->wait_runtime - prev_runtime; |
| 516 | 526 | ||
| 517 | /* | 527 | /* |
| 518 | * Track the amount of bonus we've given to sleepers: | 528 | * Track the amount of bonus we've given to sleepers: |
| 519 | */ | 529 | */ |
| 520 | cfs_rq->sleeper_bonus += delta_fair; | 530 | cfs_rq->sleeper_bonus += delta_fair; |
| 521 | if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) | ||
| 522 | cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; | ||
| 523 | |||
| 524 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
| 525 | } | 531 | } |
| 526 | 532 | ||
| 527 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 533 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
