diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-03-21 11:43:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-03-21 11:43:47 -0400 |
commit | 2070ee01d314ecec8a570c07647ccf4ced6340bb (patch) | |
tree | e7a3c95e9c8ae06917e00a40fcb6cae1c4a9c924 | |
parent | ae51801ba5ca27c2c571eb508daa99b392e79bd4 (diff) |
sched: cleanup old and rarely used 'debug' features.
TREE_AVG and APPROX_AVG are initial task placement policies that have been
disabled for a long while.. time to remove them.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/sched_fair.c | 14 |
2 files changed, 2 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3f7c5eb254e2..366a90923a3b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -594,18 +594,14 @@ enum { | |||
594 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, | 594 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, |
595 | SCHED_FEAT_WAKEUP_PREEMPT = 2, | 595 | SCHED_FEAT_WAKEUP_PREEMPT = 2, |
596 | SCHED_FEAT_START_DEBIT = 4, | 596 | SCHED_FEAT_START_DEBIT = 4, |
597 | SCHED_FEAT_TREE_AVG = 8, | 597 | SCHED_FEAT_HRTICK = 8, |
598 | SCHED_FEAT_APPROX_AVG = 16, | 598 | SCHED_FEAT_DOUBLE_TICK = 16, |
599 | SCHED_FEAT_HRTICK = 32, | ||
600 | SCHED_FEAT_DOUBLE_TICK = 64, | ||
601 | }; | 599 | }; |
602 | 600 | ||
603 | const_debug unsigned int sysctl_sched_features = | 601 | const_debug unsigned int sysctl_sched_features = |
604 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | | 602 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | |
605 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | | 603 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | |
606 | SCHED_FEAT_START_DEBIT * 1 | | 604 | SCHED_FEAT_START_DEBIT * 1 | |
607 | SCHED_FEAT_TREE_AVG * 0 | | ||
608 | SCHED_FEAT_APPROX_AVG * 0 | | ||
609 | SCHED_FEAT_HRTICK * 1 | | 605 | SCHED_FEAT_HRTICK * 1 | |
610 | SCHED_FEAT_DOUBLE_TICK * 0; | 606 | SCHED_FEAT_DOUBLE_TICK * 0; |
611 | 607 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b85cac4b5e25..86a93376282c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) | |||
302 | return vslice; | 302 | return vslice; |
303 | } | 303 | } |
304 | 304 | ||
305 | static u64 sched_vslice(struct cfs_rq *cfs_rq) | ||
306 | { | ||
307 | return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running); | ||
308 | } | ||
309 | |||
310 | static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | 305 | static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) |
311 | { | 306 | { |
312 | return __sched_vslice(cfs_rq->load.weight + se->load.weight, | 307 | return __sched_vslice(cfs_rq->load.weight + se->load.weight, |
@@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
504 | } else | 499 | } else |
505 | vruntime = cfs_rq->min_vruntime; | 500 | vruntime = cfs_rq->min_vruntime; |
506 | 501 | ||
507 | if (sched_feat(TREE_AVG)) { | ||
508 | struct sched_entity *last = __pick_last_entity(cfs_rq); | ||
509 | if (last) { | ||
510 | vruntime += last->vruntime; | ||
511 | vruntime >>= 1; | ||
512 | } | ||
513 | } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) | ||
514 | vruntime += sched_vslice(cfs_rq)/2; | ||
515 | |||
516 | /* | 502 | /* |
517 | * The 'current' period is already promised to the current tasks, | 503 | * The 'current' period is already promised to the current tasks, |
518 | * however the extra weight of the new task will slow them down a | 504 | * however the extra weight of the new task will slow them down a |