diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-01-25 15:08:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:22 -0500 |
commit | cb46984504048db946cd551c261df4e70d59a8ea (patch) | |
tree | e07343cc5967f74370c6b0290b67a225d868a99d /kernel/sched_fair.c | |
parent | 9a897c5a6701bcb6f099f7ca20194999102729fd (diff) |
sched: RT-balance, add new methods to sched_class
Dmitry Adamushko found that the current implementation of the RT
balancing code left out changes to the sched_setscheduler and
rt_mutex_setprio.
This patch addresses this issue by adding methods to the schedule classes
to handle being switched out of (switched_from) and being switched into
(switched_to) a sched_class. Also a method for changing of priorities
is also added (prio_changed).
This patch also removes some duplicate logic between rt_mutex_setprio and
sched_setscheduler.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 10aa6e1ae3dd..dfa18d55561d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1280,6 +1280,42 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1280 | resched_task(rq->curr); | 1280 | resched_task(rq->curr); |
1281 | } | 1281 | } |
1282 | 1282 | ||
1283 | /* | ||
1284 | * Priority of the task has changed. Check to see if we preempt | ||
1285 | * the current task. | ||
1286 | */ | ||
1287 | static void prio_changed_fair(struct rq *rq, struct task_struct *p, | ||
1288 | int oldprio, int running) | ||
1289 | { | ||
1290 | /* | ||
1291 | * Reschedule if we are currently running on this runqueue and | ||
1292 | * our priority decreased, or if we are not currently running on | ||
1293 | * this runqueue and our priority is higher than the current's | ||
1294 | */ | ||
1295 | if (running) { | ||
1296 | if (p->prio > oldprio) | ||
1297 | resched_task(rq->curr); | ||
1298 | } else | ||
1299 | check_preempt_curr(rq, p); | ||
1300 | } | ||
1301 | |||
1302 | /* | ||
1303 | * We switched to the sched_fair class. | ||
1304 | */ | ||
1305 | static void switched_to_fair(struct rq *rq, struct task_struct *p, | ||
1306 | int running) | ||
1307 | { | ||
1308 | /* | ||
1309 | * We were most likely switched from sched_rt, so | ||
1310 | * kick off the schedule if running, otherwise just see | ||
1311 | * if we can still preempt the current task. | ||
1312 | */ | ||
1313 | if (running) | ||
1314 | resched_task(rq->curr); | ||
1315 | else | ||
1316 | check_preempt_curr(rq, p); | ||
1317 | } | ||
1318 | |||
1283 | /* Account for a task changing its policy or group. | 1319 | /* Account for a task changing its policy or group. |
1284 | * | 1320 | * |
1285 | * This routine is mostly called to set cfs_rq->curr field when a task | 1321 | * This routine is mostly called to set cfs_rq->curr field when a task |
@@ -1318,6 +1354,9 @@ static const struct sched_class fair_sched_class = { | |||
1318 | .set_curr_task = set_curr_task_fair, | 1354 | .set_curr_task = set_curr_task_fair, |
1319 | .task_tick = task_tick_fair, | 1355 | .task_tick = task_tick_fair, |
1320 | .task_new = task_new_fair, | 1356 | .task_new = task_new_fair, |
1357 | |||
1358 | .prio_changed = prio_changed_fair, | ||
1359 | .switched_to = switched_to_fair, | ||
1321 | }; | 1360 | }; |
1322 | 1361 | ||
1323 | #ifdef CONFIG_SCHED_DEBUG | 1362 | #ifdef CONFIG_SCHED_DEBUG |