aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-15 11:00:10 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:10 -0400
commit67e9fb2a39a1d454218d50383094940982be138f (patch)
tree3b981b8d43d6c23f070df73033d731a6f00f0b0b
parent1aa4731eff7dab7bd01747b46f654f449f1cfc2c (diff)
sched: add vslice
add vslice: the load-dependent "virtual slice" a task should run ideally, so that the observed latency stays within the sched_latency window. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_fair.c44
3 files changed, 32 insertions, 15 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c33227b0f82..d74830cc51eb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -908,6 +908,7 @@ struct sched_entity {
908 u64 sum_exec_runtime; 908 u64 sum_exec_runtime;
909 u64 vruntime; 909 u64 vruntime;
910 u64 prev_sum_exec_runtime; 910 u64 prev_sum_exec_runtime;
911 u64 last_min_vruntime;
911 912
912#ifdef CONFIG_SCHEDSTATS 913#ifdef CONFIG_SCHEDSTATS
913 u64 wait_start; 914 u64 wait_start;
diff --git a/kernel/sched.c b/kernel/sched.c
index 5004dff91850..fe1165b226a1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1615,6 +1615,7 @@ static void __sched_fork(struct task_struct *p)
1615 p->se.exec_start = 0; 1615 p->se.exec_start = 0;
1616 p->se.sum_exec_runtime = 0; 1616 p->se.sum_exec_runtime = 0;
1617 p->se.prev_sum_exec_runtime = 0; 1617 p->se.prev_sum_exec_runtime = 0;
1618 p->se.last_min_vruntime = 0;
1618 1619
1619#ifdef CONFIG_SCHEDSTATS 1620#ifdef CONFIG_SCHEDSTATS
1620 p->se.wait_start = 0; 1621 p->se.wait_start = 0;
@@ -6495,6 +6496,7 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
6495#ifdef CONFIG_FAIR_GROUP_SCHED 6496#ifdef CONFIG_FAIR_GROUP_SCHED
6496 cfs_rq->rq = rq; 6497 cfs_rq->rq = rq;
6497#endif 6498#endif
6499 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6498} 6500}
6499 6501
6500void __init sched_init(void) 6502void __init sched_init(void)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5db7bd18e818..87acc5cedd2d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -243,6 +243,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
243 return period; 243 return period;
244} 244}
245 245
246static u64 __sched_vslice(unsigned long nr_running)
247{
248 u64 period = __sched_period(nr_running);
249
250 do_div(period, nr_running);
251
252 return period;
253}
254
246/* 255/*
247 * Update the current task's runtime statistics. Skip current tasks that 256 * Update the current task's runtime statistics. Skip current tasks that
248 * are not in our scheduling class. 257 * are not in our scheduling class.
@@ -441,32 +450,33 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
441static void 450static void
442place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 451place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
443{ 452{
444 u64 min_runtime, latency; 453 u64 vruntime;
445 454
446 min_runtime = cfs_rq->min_vruntime; 455 vruntime = cfs_rq->min_vruntime;
447 456
448 if (sched_feat(USE_TREE_AVG)) { 457 if (sched_feat(USE_TREE_AVG)) {
449 struct sched_entity *last = __pick_last_entity(cfs_rq); 458 struct sched_entity *last = __pick_last_entity(cfs_rq);
450 if (last) { 459 if (last) {
451 min_runtime = __pick_next_entity(cfs_rq)->vruntime; 460 vruntime += last->vruntime;
452 min_runtime += last->vruntime; 461 vruntime >>= 1;
453 min_runtime >>= 1;
454 } 462 }
455 } else if (sched_feat(APPROX_AVG)) 463 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
456 min_runtime += sysctl_sched_latency/2; 464 vruntime += __sched_vslice(cfs_rq->nr_running)/2;
457 465
458 if (initial && sched_feat(START_DEBIT)) 466 if (initial && sched_feat(START_DEBIT))
459 min_runtime += sched_slice(cfs_rq, se); 467 vruntime += __sched_vslice(cfs_rq->nr_running + 1);
460 468
461 if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) { 469 if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
462 latency = sysctl_sched_latency; 470 s64 latency = cfs_rq->min_vruntime - se->last_min_vruntime;
463 if (min_runtime > latency) 471 if (latency < 0 || !cfs_rq->nr_running)
464 min_runtime -= latency; 472 latency = 0;
465 else 473 else
466 min_runtime = 0; 474 latency = min_t(s64, latency, sysctl_sched_latency);
475 vruntime -= latency;
467 } 476 }
468 477
469 se->vruntime = max(se->vruntime, min_runtime); 478 se->vruntime = vruntime;
479
470} 480}
471 481
472static void 482static void
@@ -478,6 +488,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
478 update_curr(cfs_rq); 488 update_curr(cfs_rq);
479 489
480 if (wakeup) { 490 if (wakeup) {
491 /* se->vruntime += cfs_rq->min_vruntime; */
481 place_entity(cfs_rq, se, 0); 492 place_entity(cfs_rq, se, 0);
482 enqueue_sleeper(cfs_rq, se); 493 enqueue_sleeper(cfs_rq, se);
483 } 494 }
@@ -492,8 +503,8 @@ static void
492dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 503dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
493{ 504{
494 update_stats_dequeue(cfs_rq, se); 505 update_stats_dequeue(cfs_rq, se);
495#ifdef CONFIG_SCHEDSTATS
496 if (sleep) { 506 if (sleep) {
507#ifdef CONFIG_SCHEDSTATS
497 if (entity_is_task(se)) { 508 if (entity_is_task(se)) {
498 struct task_struct *tsk = task_of(se); 509 struct task_struct *tsk = task_of(se);
499 510
@@ -502,8 +513,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
502 if (tsk->state & TASK_UNINTERRUPTIBLE) 513 if (tsk->state & TASK_UNINTERRUPTIBLE)
503 se->block_start = rq_of(cfs_rq)->clock; 514 se->block_start = rq_of(cfs_rq)->clock;
504 } 515 }
505 }
506#endif 516#endif
517 /* se->vruntime = entity_key(cfs_rq, se); */
518 se->last_min_vruntime = cfs_rq->min_vruntime;
519 }
520
507 if (se != cfs_rq->curr) 521 if (se != cfs_rq->curr)
508 __dequeue_entity(cfs_rq, se); 522 __dequeue_entity(cfs_rq, se);
509 account_entity_dequeue(cfs_rq, se); 523 account_entity_dequeue(cfs_rq, se);