diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-01-25 10:30:03 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-26 06:33:22 -0500 |
commit | a8941d7ec81678fb69aea7183338175f112f3e0d (patch) | |
tree | a55aabbbdb060f1d320b47b697b4e8e0b0c2ec38 /kernel/sched_idletask.c | |
parent | 414bee9ba613adb3804965e2d84db32d0599f9c6 (diff) |
sched: Simplify the idle scheduling class
Since commit 48c5ccae88dcd (sched: Simplify cpu-hot-unplug task
migration) this should no longer happen, so remove the code.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_idletask.c')
-rw-r--r-- | kernel/sched_idletask.c | 23 |
1 files changed, 4 insertions, 19 deletions
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 9fa0f402c87c..41eb62a0808b 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -52,31 +52,16 @@ static void set_curr_task_idle(struct rq *rq) | |||
52 | { | 52 | { |
53 | } | 53 | } |
54 | 54 | ||
55 | static void switched_to_idle(struct rq *rq, struct task_struct *p, | 55 | static void |
56 | int running) | 56 | switched_to_idle(struct rq *rq, struct task_struct *p, int running) |
57 | { | 57 | { |
58 | /* Can this actually happen?? */ | 58 | BUG(); |
59 | if (running) | ||
60 | resched_task(rq->curr); | ||
61 | else | ||
62 | check_preempt_curr(rq, p, 0); | ||
63 | } | 59 | } |
64 | 60 | ||
65 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, | 61 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
66 | int oldprio, int running) | 62 | int oldprio, int running) |
67 | { | 63 | { |
68 | /* This can happen for hot plug CPUS */ | 64 | BUG(); |
69 | |||
70 | /* | ||
71 | * Reschedule if we are currently running on this runqueue and | ||
72 | * our priority decreased, or if we are not currently running on | ||
73 | * this runqueue and our priority is higher than the current's | ||
74 | */ | ||
75 | if (running) { | ||
76 | if (p->prio > oldprio) | ||
77 | resched_task(rq->curr); | ||
78 | } else | ||
79 | check_preempt_curr(rq, p, 0); | ||
80 | } | 65 | } |
81 | 66 | ||
82 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) | 67 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) |