aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 13:45:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:45:00 -0400
commitac884dec6d4a7df252150af875cffddf8f1d9c15 (patch)
tree6ba7140a8b6e7b332fd687d24de45d2f6ded8035 /kernel/sched_fair.c
parent58d6c2d72f8628f39e8689fbde8aa177fcf00a37 (diff)
sched: fair-group scheduling vs latency
Currently FAIR_GROUP sched grows the scheduler latency outside of sysctl_sched_latency, invert this so it stays within. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c44
1 files changed, 31 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b89fec93a23..9e301a2bab6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -362,29 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
362 */ 362 */
363static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 363static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
364{ 364{
365 return calc_delta_mine(__sched_period(cfs_rq->nr_running), 365 u64 slice = __sched_period(cfs_rq->nr_running);
366 se->load.weight, &cfs_rq->load); 366
367 for_each_sched_entity(se) {
368 cfs_rq = cfs_rq_of(se);
369
370 slice *= se->load.weight;
371 do_div(slice, cfs_rq->load.weight);
372 }
373
374
375 return slice;
367} 376}
368 377
369/* 378/*
370 * We calculate the vruntime slice. 379 * We calculate the vruntime slice of a to be inserted task
371 * 380 *
372 * vs = s/w = p/rw 381 * vs = s/w = p/rw
373 */ 382 */
374static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) 383static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
375{ 384{
376 u64 vslice = __sched_period(nr_running); 385 unsigned long nr_running = cfs_rq->nr_running;
386 unsigned long weight;
387 u64 vslice;
377 388
378 vslice *= NICE_0_LOAD; 389 if (!se->on_rq)
379 do_div(vslice, rq_weight); 390 nr_running++;
380 391
381 return vslice; 392 vslice = __sched_period(nr_running);
382}
383 393
384static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 394 for_each_sched_entity(se) {
385{ 395 cfs_rq = cfs_rq_of(se);
386 return __sched_vslice(cfs_rq->load.weight + se->load.weight, 396
387 cfs_rq->nr_running + 1); 397 weight = cfs_rq->load.weight;
398 if (!se->on_rq)
399 weight += se->load.weight;
400
401 vslice *= NICE_0_LOAD;
402 do_div(vslice, weight);
403 }
404
405 return vslice;
388} 406}
389 407
390/* 408/*