aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_idletask.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-01-25 15:08:22 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:22 -0500
commitcb46984504048db946cd551c261df4e70d59a8ea (patch)
treee07343cc5967f74370c6b0290b67a225d868a99d /kernel/sched_idletask.c
parent9a897c5a6701bcb6f099f7ca20194999102729fd (diff)
sched: RT-balance, add new methods to sched_class
Dmitry Adamushko found that the current implementation of the RT balancing code left out changes to the sched_setscheduler and rt_mutex_setprio. This patch addresses this issue by adding methods to the schedule classes to handle being switched out of (switched_from) and being switched into (switched_to) a sched_class. Also a method for changing of priorities is also added (prio_changed). This patch also removes some duplicate logic between rt_mutex_setprio and sched_setscheduler. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_idletask.c')
-rw-r--r--kernel/sched_idletask.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index ca5374860ae..ef7a2661fa1 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -69,6 +69,33 @@ static void set_curr_task_idle(struct rq *rq)
69{ 69{
70} 70}
71 71
72static void switched_to_idle(struct rq *rq, struct task_struct *p,
73 int running)
74{
75 /* Can this actually happen?? */
76 if (running)
77 resched_task(rq->curr);
78 else
79 check_preempt_curr(rq, p);
80}
81
82static void prio_changed_idle(struct rq *rq, struct task_struct *p,
83 int oldprio, int running)
84{
85 /* This can happen for hot plug CPUS */
86
87 /*
88 * Reschedule if we are currently running on this runqueue and
89 * our priority decreased, or if we are not currently running on
90 * this runqueue and our priority is higher than the current's
91 */
92 if (running) {
93 if (p->prio > oldprio)
94 resched_task(rq->curr);
95 } else
96 check_preempt_curr(rq, p);
97}
98
72/* 99/*
73 * Simple, special scheduling class for the per-CPU idle tasks: 100 * Simple, special scheduling class for the per-CPU idle tasks:
74 */ 101 */
@@ -94,5 +121,9 @@ const struct sched_class idle_sched_class = {
94 121
95 .set_curr_task = set_curr_task_idle, 122 .set_curr_task = set_curr_task_idle,
96 .task_tick = task_tick_idle, 123 .task_tick = task_tick_idle,
124
125 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle,
127
97 /* no .task_new for idle tasks */ 128 /* no .task_new for idle tasks */
98}; 129};