aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-15 11:00:10 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:10 -0400
commit67e9fb2a39a1d454218d50383094940982be138f (patch)
tree3b981b8d43d6c23f070df73033d731a6f00f0b0b /kernel/sched_fair.c
parent1aa4731eff7dab7bd01747b46f654f449f1cfc2c (diff)
sched: add vslice
add vslice: the load-dependent "virtual slice" a task should run ideally, so that the observed latency stays within the sched_latency window. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c44
1 files changed, 29 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5db7bd18e818..87acc5cedd2d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -243,6 +243,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
243 return period; 243 return period;
244} 244}
245 245
246static u64 __sched_vslice(unsigned long nr_running)
247{
248 u64 period = __sched_period(nr_running);
249
250 do_div(period, nr_running);
251
252 return period;
253}
254
246/* 255/*
247 * Update the current task's runtime statistics. Skip current tasks that 256 * Update the current task's runtime statistics. Skip current tasks that
248 * are not in our scheduling class. 257 * are not in our scheduling class.
@@ -441,32 +450,33 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
441static void 450static void
442place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 451place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
443{ 452{
444 u64 min_runtime, latency; 453 u64 vruntime;
445 454
446 min_runtime = cfs_rq->min_vruntime; 455 vruntime = cfs_rq->min_vruntime;
447 456
448 if (sched_feat(USE_TREE_AVG)) { 457 if (sched_feat(USE_TREE_AVG)) {
449 struct sched_entity *last = __pick_last_entity(cfs_rq); 458 struct sched_entity *last = __pick_last_entity(cfs_rq);
450 if (last) { 459 if (last) {
451 min_runtime = __pick_next_entity(cfs_rq)->vruntime; 460 vruntime += last->vruntime;
452 min_runtime += last->vruntime; 461 vruntime >>= 1;
453 min_runtime >>= 1;
454 } 462 }
455 } else if (sched_feat(APPROX_AVG)) 463 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
456 min_runtime += sysctl_sched_latency/2; 464 vruntime += __sched_vslice(cfs_rq->nr_running)/2;
457 465
458 if (initial && sched_feat(START_DEBIT)) 466 if (initial && sched_feat(START_DEBIT))
459 min_runtime += sched_slice(cfs_rq, se); 467 vruntime += __sched_vslice(cfs_rq->nr_running + 1);
460 468
461 if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) { 469 if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
462 latency = sysctl_sched_latency; 470 s64 latency = cfs_rq->min_vruntime - se->last_min_vruntime;
463 if (min_runtime > latency) 471 if (latency < 0 || !cfs_rq->nr_running)
464 min_runtime -= latency; 472 latency = 0;
465 else 473 else
466 min_runtime = 0; 474 latency = min_t(s64, latency, sysctl_sched_latency);
475 vruntime -= latency;
467 } 476 }
468 477
469 se->vruntime = max(se->vruntime, min_runtime); 478 se->vruntime = vruntime;
479
470} 480}
471 481
472static void 482static void
@@ -478,6 +488,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
478 update_curr(cfs_rq); 488 update_curr(cfs_rq);
479 489
480 if (wakeup) { 490 if (wakeup) {
491 /* se->vruntime += cfs_rq->min_vruntime; */
481 place_entity(cfs_rq, se, 0); 492 place_entity(cfs_rq, se, 0);
482 enqueue_sleeper(cfs_rq, se); 493 enqueue_sleeper(cfs_rq, se);
483 } 494 }
@@ -492,8 +503,8 @@ static void
492dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 503dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
493{ 504{
494 update_stats_dequeue(cfs_rq, se); 505 update_stats_dequeue(cfs_rq, se);
495#ifdef CONFIG_SCHEDSTATS
496 if (sleep) { 506 if (sleep) {
507#ifdef CONFIG_SCHEDSTATS
497 if (entity_is_task(se)) { 508 if (entity_is_task(se)) {
498 struct task_struct *tsk = task_of(se); 509 struct task_struct *tsk = task_of(se);
499 510
@@ -502,8 +513,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
502 if (tsk->state & TASK_UNINTERRUPTIBLE) 513 if (tsk->state & TASK_UNINTERRUPTIBLE)
503 se->block_start = rq_of(cfs_rq)->clock; 514 se->block_start = rq_of(cfs_rq)->clock;
504 } 515 }
505 }
506#endif 516#endif
517 /* se->vruntime = entity_key(cfs_rq, se); */
518 se->last_min_vruntime = cfs_rq->min_vruntime;
519 }
520
507 if (se != cfs_rq->curr) 521 if (se != cfs_rq->curr)
508 __dequeue_entity(cfs_rq, se); 522 __dequeue_entity(cfs_rq, se);
509 account_entity_dequeue(cfs_rq, se); 523 account_entity_dequeue(cfs_rq, se);