diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 11:00:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
commit | ce6c131131df442f0d49d064129ecc52d9fe8ca9 (patch) | |
tree | 14b93a32144e7270dd821901ff247f506270a3a3 /kernel | |
parent | e62dd02ed0af35631c6ca473e50758c9594773cf (diff) |
sched: disable forced preemption by default
Implement feature bit to disable forced preemption. This way
it can be checked whether a workload is overscheduling or not.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 24 |
2 files changed, 16 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b7dff36c7c8c..0bd8f2c0fb40 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -444,13 +444,15 @@ enum { | |||
444 | SCHED_FEAT_START_DEBIT = 2, | 444 | SCHED_FEAT_START_DEBIT = 2, |
445 | SCHED_FEAT_TREE_AVG = 4, | 445 | SCHED_FEAT_TREE_AVG = 4, |
446 | SCHED_FEAT_APPROX_AVG = 8, | 446 | SCHED_FEAT_APPROX_AVG = 8, |
447 | SCHED_FEAT_WAKEUP_PREEMPT = 16, | ||
447 | }; | 448 | }; |
448 | 449 | ||
449 | const_debug unsigned int sysctl_sched_features = | 450 | const_debug unsigned int sysctl_sched_features = |
450 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | | 451 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | |
451 | SCHED_FEAT_START_DEBIT *1 | | 452 | SCHED_FEAT_START_DEBIT *1 | |
452 | SCHED_FEAT_TREE_AVG *0 | | 453 | SCHED_FEAT_TREE_AVG *0 | |
453 | SCHED_FEAT_APPROX_AVG *0; | 454 | SCHED_FEAT_APPROX_AVG *0 | |
455 | SCHED_FEAT_WAKEUP_PREEMPT *1; | ||
454 | 456 | ||
455 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 457 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) |
456 | 458 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3ac096e74faf..3843ec71aad5 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -626,7 +626,7 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
626 | */ | 626 | */ |
627 | update_curr(cfs_rq); | 627 | update_curr(cfs_rq); |
628 | 628 | ||
629 | if (cfs_rq->nr_running > 1) | 629 | if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) |
630 | check_preempt_tick(cfs_rq, curr); | 630 | check_preempt_tick(cfs_rq, curr); |
631 | } | 631 | } |
632 | 632 | ||
@@ -828,18 +828,20 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
828 | return; | 828 | return; |
829 | } | 829 | } |
830 | 830 | ||
831 | while (!is_same_group(se, pse)) { | 831 | if (sched_feat(WAKEUP_PREEMPT)) { |
832 | se = parent_entity(se); | 832 | while (!is_same_group(se, pse)) { |
833 | pse = parent_entity(pse); | 833 | se = parent_entity(se); |
834 | } | 834 | pse = parent_entity(pse); |
835 | } | ||
835 | 836 | ||
836 | delta = se->vruntime - pse->vruntime; | 837 | delta = se->vruntime - pse->vruntime; |
837 | gran = sysctl_sched_wakeup_granularity; | 838 | gran = sysctl_sched_wakeup_granularity; |
838 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 839 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
839 | gran = calc_delta_fair(gran, &se->load); | 840 | gran = calc_delta_fair(gran, &se->load); |
840 | 841 | ||
841 | if (delta > gran) | 842 | if (delta > gran) |
842 | resched_task(curr); | 843 | resched_task(curr); |
844 | } | ||
843 | } | 845 | } |
844 | 846 | ||
845 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 847 | static struct task_struct *pick_next_task_fair(struct rq *rq) |