diff options
author | Mike Galbraith <efault@gmx.de> | 2007-10-15 11:00:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
commit | 95938a35c5562afa7af7252821e44132391a3db8 (patch) | |
tree | 548cda134d007105f3a819a438ab384ab9f04e27 /kernel/sched_fair.c | |
parent | ce6c131131df442f0d49d064129ecc52d9fe8ca9 (diff) |
sched: prevent wakeup over-scheduling
Prevent wakeup over-scheduling. Once a task has been preempted by a
task of the same or lower priority, it becomes ineligible for repeated
preemption by same until it has been ticked, or slept. Instead, the
task is marked for preemption at the next tick. Tasks of higher
priority still preempt immediately.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3843ec71aad5..f819f943fb86 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -526,6 +526,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
526 | 526 | ||
527 | update_stats_dequeue(cfs_rq, se); | 527 | update_stats_dequeue(cfs_rq, se); |
528 | if (sleep) { | 528 | if (sleep) { |
529 | se->peer_preempt = 0; | ||
529 | #ifdef CONFIG_SCHEDSTATS | 530 | #ifdef CONFIG_SCHEDSTATS |
530 | if (entity_is_task(se)) { | 531 | if (entity_is_task(se)) { |
531 | struct task_struct *tsk = task_of(se); | 532 | struct task_struct *tsk = task_of(se); |
@@ -553,8 +554,10 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
553 | 554 | ||
554 | ideal_runtime = sched_slice(cfs_rq, curr); | 555 | ideal_runtime = sched_slice(cfs_rq, curr); |
555 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 556 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
556 | if (delta_exec > ideal_runtime) | 557 | if (delta_exec > ideal_runtime || |
558 | (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) | ||
557 | resched_task(rq_of(cfs_rq)->curr); | 559 | resched_task(rq_of(cfs_rq)->curr); |
560 | curr->peer_preempt = 0; | ||
558 | } | 561 | } |
559 | 562 | ||
560 | static void | 563 | static void |
@@ -839,8 +842,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
839 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 842 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
840 | gran = calc_delta_fair(gran, &se->load); | 843 | gran = calc_delta_fair(gran, &se->load); |
841 | 844 | ||
842 | if (delta > gran) | 845 | if (delta > gran) { |
843 | resched_task(curr); | 846 | int now = !sched_feat(PREEMPT_RESTRICT); |
847 | |||
848 | if (now || p->prio < curr->prio || !se->peer_preempt++) | ||
849 | resched_task(curr); | ||
850 | } | ||
844 | } | 851 | } |
845 | } | 852 | } |
846 | 853 | ||
@@ -1034,6 +1041,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1034 | check_spread(cfs_rq, curr); | 1041 | check_spread(cfs_rq, curr); |
1035 | __enqueue_entity(cfs_rq, se); | 1042 | __enqueue_entity(cfs_rq, se); |
1036 | account_entity_enqueue(cfs_rq, se); | 1043 | account_entity_enqueue(cfs_rq, se); |
1044 | se->peer_preempt = 0; | ||
1037 | resched_task(rq->curr); | 1045 | resched_task(rq->curr); |
1038 | } | 1046 | } |
1039 | 1047 | ||