diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 11:00:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:05 -0400 |
commit | 94dfb5e75ef59068a8cf68fa6e18f25ebdcd20b9 (patch) | |
tree | 3ff5c162ffbc0e796db250f8a0562292118829f3 /kernel | |
parent | 28a1f6fa2f7ecec7e5da28b03a24abbecbd2e864 (diff) |
sched: add tree based averages
add support for tree based vruntime averages.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_fair.c | 20 |
2 files changed, 18 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index bf85b4b281c5..198b07a6d038 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -399,6 +399,8 @@ enum { | |||
399 | SCHED_FEAT_SLEEPER_AVG = 4, | 399 | SCHED_FEAT_SLEEPER_AVG = 4, |
400 | SCHED_FEAT_SLEEPER_LOAD_AVG = 8, | 400 | SCHED_FEAT_SLEEPER_LOAD_AVG = 8, |
401 | SCHED_FEAT_START_DEBIT = 16, | 401 | SCHED_FEAT_START_DEBIT = 16, |
402 | SCHED_FEAT_USE_TREE_AVG = 32, | ||
403 | SCHED_FEAT_APPROX_AVG = 64, | ||
402 | }; | 404 | }; |
403 | 405 | ||
404 | const_debug unsigned int sysctl_sched_features = | 406 | const_debug unsigned int sysctl_sched_features = |
@@ -406,7 +408,9 @@ const_debug unsigned int sysctl_sched_features = | |||
406 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | | 408 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | |
407 | SCHED_FEAT_SLEEPER_AVG *0 | | 409 | SCHED_FEAT_SLEEPER_AVG *0 | |
408 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | | 410 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | |
409 | SCHED_FEAT_START_DEBIT *1; | 411 | SCHED_FEAT_START_DEBIT *1 | |
412 | SCHED_FEAT_USE_TREE_AVG *0 | | ||
413 | SCHED_FEAT_APPROX_AVG *0; | ||
410 | 414 | ||
411 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 415 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) |
412 | 416 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c8c6b0561391..86e5e8c5d101 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -547,16 +547,22 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
547 | static void | 547 | static void |
548 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 548 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) |
549 | { | 549 | { |
550 | struct sched_entity *last = __pick_last_entity(cfs_rq); | ||
551 | u64 min_runtime, latency; | 550 | u64 min_runtime, latency; |
552 | 551 | ||
553 | min_runtime = cfs_rq->min_vruntime; | 552 | min_runtime = cfs_rq->min_vruntime; |
554 | if (last) { | 553 | |
555 | min_runtime += last->vruntime; | 554 | if (sched_feat(USE_TREE_AVG)) { |
556 | min_runtime >>= 1; | 555 | struct sched_entity *last = __pick_last_entity(cfs_rq); |
557 | if (initial && sched_feat(START_DEBIT)) | 556 | if (last) { |
558 | min_runtime += sysctl_sched_latency/2; | 557 | min_runtime = __pick_next_entity(cfs_rq)->vruntime; |
559 | } | 558 | min_runtime += last->vruntime; |
559 | min_runtime >>= 1; | ||
560 | } | ||
561 | } else if (sched_feat(APPROX_AVG)) | ||
562 | min_runtime += sysctl_sched_latency/2; | ||
563 | |||
564 | if (initial && sched_feat(START_DEBIT)) | ||
565 | min_runtime += sched_slice(cfs_rq, se); | ||
560 | 566 | ||
561 | if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) { | 567 | if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) { |
562 | latency = sysctl_sched_latency; | 568 | latency = sysctl_sched_latency; |