diff options
author | Mike Galbraith <efault@gmx.de> | 2009-01-02 06:16:42 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-02 11:10:43 -0500 |
commit | 0a582440ff546e2c6610d1acec325e91b4efd313 (patch) | |
tree | 161a0943091c53bb2154121480b3e26ce5df1769 | |
parent | b58602a4bac012b5f4fc12fe6b46ab237b610d5d (diff) |
sched: fix sched_slice()
Impact: fix bad-interactivity buglet
Fix sched_slice() to emit a sane result whether a task is currently
enqueued or not.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Tested-by: Jayson King <dev@jaysonking.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c | 30 ++++++++++++------------------
1 file changed, 12 insertions(+), 18 deletions(-)
-rw-r--r-- | kernel/sched_fair.c | 30 |
1 files changed, 12 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5ad4440f0fc4..b808563f4f19 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, | |||
386 | #endif | 386 | #endif |
387 | 387 | ||
388 | /* | 388 | /* |
389 | * delta *= P[w / rw] | ||
390 | */ | ||
391 | static inline unsigned long | ||
392 | calc_delta_weight(unsigned long delta, struct sched_entity *se) | ||
393 | { | ||
394 | for_each_sched_entity(se) { | ||
395 | delta = calc_delta_mine(delta, | ||
396 | se->load.weight, &cfs_rq_of(se)->load); | ||
397 | } | ||
398 | |||
399 | return delta; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * delta /= w | 389 | * delta /= w |
404 | */ | 390 | */ |
405 | static inline unsigned long | 391 | static inline unsigned long |
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running) | |||
440 | */ | 426 | */ |
441 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | 427 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
442 | { | 428 | { |
443 | unsigned long nr_running = cfs_rq->nr_running; | 429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); |
444 | 430 | ||
445 | if (unlikely(!se->on_rq)) | 431 | for_each_sched_entity(se) { |
446 | nr_running++; | 432 | struct load_weight *load = &cfs_rq->load; |
447 | 433 | ||
448 | return calc_delta_weight(__sched_period(nr_running), se); | 434 | if (unlikely(!se->on_rq)) { |
435 | struct load_weight lw = cfs_rq->load; | ||
436 | |||
437 | update_load_add(&lw, se->load.weight); | ||
438 | load = &lw; | ||
439 | } | ||
440 | slice = calc_delta_mine(slice, se->load.weight, load); | ||
441 | } | ||
442 | return slice; | ||
449 | } | 443 | } |
450 | 444 | ||
451 | /* | 445 | /* |