diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-01-25 15:08:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:22 -0500 |
commit | cb46984504048db946cd551c261df4e70d59a8ea (patch) | |
tree | e07343cc5967f74370c6b0290b67a225d868a99d /kernel/sched.c | |
parent | 9a897c5a6701bcb6f099f7ca20194999102729fd (diff) |
sched: RT-balance, add new methods to sched_class
Dmitry Adamushko found that the current implementation of the RT
balancing code left out changes to the sched_setscheduler and
rt_mutex_setprio.
This patch addresses this issue by adding methods to the schedule classes
to handle being switched out of (switched_from) and being switched into
(switched_to) a sched_class. Also a method for changing of priorities
is also added (prio_changed).
This patch also removes some duplicate logic between rt_mutex_setprio and
sched_setscheduler.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 42 |
1 files changed, 20 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2368a0d882e3..5834c7fb79a5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1152,6 +1152,18 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
1152 | #endif | 1152 | #endif |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, | ||
1156 | const struct sched_class *prev_class, | ||
1157 | int oldprio, int running) | ||
1158 | { | ||
1159 | if (prev_class != p->sched_class) { | ||
1160 | if (prev_class->switched_from) | ||
1161 | prev_class->switched_from(rq, p, running); | ||
1162 | p->sched_class->switched_to(rq, p, running); | ||
1163 | } else | ||
1164 | p->sched_class->prio_changed(rq, p, oldprio, running); | ||
1165 | } | ||
1166 | |||
1155 | #ifdef CONFIG_SMP | 1167 | #ifdef CONFIG_SMP |
1156 | 1168 | ||
1157 | /* | 1169 | /* |
@@ -4017,6 +4029,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4017 | unsigned long flags; | 4029 | unsigned long flags; |
4018 | int oldprio, on_rq, running; | 4030 | int oldprio, on_rq, running; |
4019 | struct rq *rq; | 4031 | struct rq *rq; |
4032 | const struct sched_class *prev_class = p->sched_class; | ||
4020 | 4033 | ||
4021 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 4034 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
4022 | 4035 | ||
@@ -4042,18 +4055,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4042 | if (on_rq) { | 4055 | if (on_rq) { |
4043 | if (running) | 4056 | if (running) |
4044 | p->sched_class->set_curr_task(rq); | 4057 | p->sched_class->set_curr_task(rq); |
4058 | |||
4045 | enqueue_task(rq, p, 0); | 4059 | enqueue_task(rq, p, 0); |
4046 | /* | 4060 | |
4047 | * Reschedule if we are currently running on this runqueue and | 4061 | check_class_changed(rq, p, prev_class, oldprio, running); |
4048 | * our priority decreased, or if we are not currently running on | ||
4049 | * this runqueue and our priority is higher than the current's | ||
4050 | */ | ||
4051 | if (running) { | ||
4052 | if (p->prio > oldprio) | ||
4053 | resched_task(rq->curr); | ||
4054 | } else { | ||
4055 | check_preempt_curr(rq, p); | ||
4056 | } | ||
4057 | } | 4062 | } |
4058 | task_rq_unlock(rq, &flags); | 4063 | task_rq_unlock(rq, &flags); |
4059 | } | 4064 | } |
@@ -4253,6 +4258,7 @@ int sched_setscheduler(struct task_struct *p, int policy, | |||
4253 | { | 4258 | { |
4254 | int retval, oldprio, oldpolicy = -1, on_rq, running; | 4259 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
4255 | unsigned long flags; | 4260 | unsigned long flags; |
4261 | const struct sched_class *prev_class = p->sched_class; | ||
4256 | struct rq *rq; | 4262 | struct rq *rq; |
4257 | 4263 | ||
4258 | /* may grab non-irq protected spin_locks */ | 4264 | /* may grab non-irq protected spin_locks */ |
@@ -4346,18 +4352,10 @@ recheck: | |||
4346 | if (on_rq) { | 4352 | if (on_rq) { |
4347 | if (running) | 4353 | if (running) |
4348 | p->sched_class->set_curr_task(rq); | 4354 | p->sched_class->set_curr_task(rq); |
4355 | |||
4349 | activate_task(rq, p, 0); | 4356 | activate_task(rq, p, 0); |
4350 | /* | 4357 | |
4351 | * Reschedule if we are currently running on this runqueue and | 4358 | check_class_changed(rq, p, prev_class, oldprio, running); |
4352 | * our priority decreased, or if we are not currently running on | ||
4353 | * this runqueue and our priority is higher than the current's | ||
4354 | */ | ||
4355 | if (running) { | ||
4356 | if (p->prio > oldprio) | ||
4357 | resched_task(rq->curr); | ||
4358 | } else { | ||
4359 | check_preempt_curr(rq, p); | ||
4360 | } | ||
4361 | } | 4359 | } |
4362 | __task_rq_unlock(rq); | 4360 | __task_rq_unlock(rq); |
4363 | spin_unlock_irqrestore(&p->pi_lock, flags); | 4361 | spin_unlock_irqrestore(&p->pi_lock, flags); |