diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-01-15 08:53:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-15 09:07:29 -0500 |
commit | 6bc912b71b6f33b041cfde93ca3f019cbaa852bc (patch) | |
tree | f6128f946bb275aa68f814f5f6a9c08df43965b7 /kernel | |
parent | cce7ade803699463ecc62a065ca522004f7ccb3d (diff) |
sched: SCHED_OTHER vs SCHED_IDLE isolation
Stronger SCHED_IDLE isolation:
- no SCHED_IDLE buddies
- never let SCHED_IDLE preempt on wakeup
- always preempt SCHED_IDLE on wakeup
- limit SLEEPER fairness for SCHED_IDLE.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8e1352c75557..cdebd8089cb0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -677,9 +677,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
677 | unsigned long thresh = sysctl_sched_latency; | 677 | unsigned long thresh = sysctl_sched_latency; |
678 | 678 | ||
679 | /* | 679 | /* |
680 | * convert the sleeper threshold into virtual time | 680 | * Convert the sleeper threshold into virtual time. |
681 | * SCHED_IDLE is a special sub-class. We care about | ||
682 | * fairness only relative to other SCHED_IDLE tasks, | ||
683 | * all of which have the same weight. | ||
681 | */ | 684 | */ |
682 | if (sched_feat(NORMALIZED_SLEEPER)) | 685 | if (sched_feat(NORMALIZED_SLEEPER) && |
686 | task_of(se)->policy != SCHED_IDLE) | ||
683 | thresh = calc_delta_fair(thresh, se); | 687 | thresh = calc_delta_fair(thresh, se); |
684 | 688 | ||
685 | vruntime -= thresh; | 689 | vruntime -= thresh; |
@@ -1340,14 +1344,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1340 | 1344 | ||
1341 | static void set_last_buddy(struct sched_entity *se) | 1345 | static void set_last_buddy(struct sched_entity *se) |
1342 | { | 1346 | { |
1343 | for_each_sched_entity(se) | 1347 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
1344 | cfs_rq_of(se)->last = se; | 1348 | for_each_sched_entity(se) |
1349 | cfs_rq_of(se)->last = se; | ||
1350 | } | ||
1345 | } | 1351 | } |
1346 | 1352 | ||
1347 | static void set_next_buddy(struct sched_entity *se) | 1353 | static void set_next_buddy(struct sched_entity *se) |
1348 | { | 1354 | { |
1349 | for_each_sched_entity(se) | 1355 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
1350 | cfs_rq_of(se)->next = se; | 1356 | for_each_sched_entity(se) |
1357 | cfs_rq_of(se)->next = se; | ||
1358 | } | ||
1351 | } | 1359 | } |
1352 | 1360 | ||
1353 | /* | 1361 | /* |
@@ -1393,12 +1401,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1393 | return; | 1401 | return; |
1394 | 1402 | ||
1395 | /* | 1403 | /* |
1396 | * Batch tasks do not preempt (their preemption is driven by | 1404 | * Batch and idle tasks do not preempt (their preemption is driven by |
1397 | * the tick): | 1405 | * the tick): |
1398 | */ | 1406 | */ |
1399 | if (unlikely(p->policy == SCHED_BATCH)) | 1407 | if (unlikely(p->policy != SCHED_NORMAL)) |
1400 | return; | 1408 | return; |
1401 | 1409 | ||
1410 | /* Idle tasks are by definition preempted by everybody. */ | ||
1411 | if (unlikely(curr->policy == SCHED_IDLE)) { | ||
1412 | resched_task(curr); | ||
1413 | return; | ||
1414 | } | ||
1415 | |||
1402 | if (!sched_feat(WAKEUP_PREEMPT)) | 1416 | if (!sched_feat(WAKEUP_PREEMPT)) |
1403 | return; | 1417 | return; |
1404 | 1418 | ||