diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:28 -0400 |
commit | c9c294a630e28eec5f2865f028ecfc58d45c0a5a (patch) | |
tree | 62fcfc395e98ff7a4ff6bee3364d0bb86024b171 /kernel/sched_fair.c | |
parent | a7be37ac8e1565e00880531f4e2aff421a21c803 (diff) |
sched: fix calc_delta_asym()
calc_delta_asym() is supposed to do the same as calc_delta_fair() except
linearly shrink the result for negative nice processes - this causes them
to have a smaller preemption threshold so that they are more easily preempted.
The problem is that for task groups se->load.weight is the per cpu share of
the actual task group weight; take that into account.
Also provide a debug switch to disable the asymmetry (which I still don't
like - but it does greatly benefit some workloads)
This would explain the interactivity issues reported against group scheduling.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 496500988ce5..2268e634812b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -430,6 +430,29 @@ calc_delta_asym(unsigned long delta, struct sched_entity *se) | |||
430 | for_each_sched_entity(se) { | 430 | for_each_sched_entity(se) { |
431 | struct load_weight *se_lw = &se->load; | 431 | struct load_weight *se_lw = &se->load; |
432 | 432 | ||
433 | #ifdef CONFIG_FAIR_SCHED_GROUP | ||
434 | struct cfs_rq *cfs_rq = se->my_q; | ||
435 | struct task_group *tg = NULL | ||
436 | |||
437 | if (cfs_rq) | ||
438 | tg = cfs_rq->tg; | ||
439 | |||
440 | if (tg && tg->shares < NICE_0_LOAD) { | ||
441 | /* | ||
442 | * scale shares to what it would have been had | ||
443 | * tg->weight been NICE_0_LOAD: | ||
444 | * | ||
445 | * weight = 1024 * shares / tg->weight | ||
446 | */ | ||
447 | lw.weight *= se->load.weight; | ||
448 | lw.weight /= tg->shares; | ||
449 | |||
450 | lw.inv_weight = 0; | ||
451 | |||
452 | se_lw = &lw; | ||
453 | } else | ||
454 | #endif | ||
455 | |||
433 | if (se->load.weight < NICE_0_LOAD) | 456 | if (se->load.weight < NICE_0_LOAD) |
434 | se_lw = &lw; | 457 | se_lw = &lw; |
435 | 458 | ||
@@ -1154,7 +1177,10 @@ static unsigned long wakeup_gran(struct sched_entity *se) | |||
1154 | * More easily preempt - nice tasks, while not making it harder for | 1177 | * More easily preempt - nice tasks, while not making it harder for |
1155 | * + nice tasks. | 1178 | * + nice tasks. |
1156 | */ | 1179 | */ |
1157 | gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); | 1180 | if (sched_feat(ASYM_GRAN)) |
1181 | gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); | ||
1182 | else | ||
1183 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | ||
1158 | 1184 | ||
1159 | return gran; | 1185 | return gran; |
1160 | } | 1186 | } |