aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-01-25 15:08:05 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:05 -0500
commite8fa136262e1121288bb93befe2295928ffd240d (patch)
tree5df829adde9b43efee39275c05751c99bf46eb2f /kernel/sched.c
parent764a9d6fe4b52995c8aba277e3634385699354f4 (diff)
sched: add RT task pushing
This patch adds an algorithm to push extra RT tasks off a run queue to other CPU runqueues. When more than one RT task is added to a run queue, this algorithm takes an assertive approach to push the RT tasks that are not running onto other run queues that have lower priority. The way this works is that the highest RT task that is not running is looked at and we examine the runqueues on the CPUS for that tasks affinity mask. We find the runqueue with the lowest prio in the CPU affinity of the picked task, and if it is lower in prio than the picked task, we push the task onto that CPU runqueue. We continue pushing RT tasks off the current runqueue until we don't push any more. The algorithm stops when the next highest RT task can't preempt any other processes on other CPUS. TODO: The algorithm may stop when there are still RT tasks that can be migrated. Specifically, if the highest non running RT task CPU affinity is restricted to CPUs that are running higher priority tasks, there may be a lower priority task queued that has an affinity with a CPU that is running a lower priority task that it could be migrated to. This patch set does not address this issue. Note: checkpatch reveals two over 80 character instances. I'm not sure that breaking them up will help visually, so I left them as is. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6185fa080ec8..97cab609fc31 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1952,6 +1952,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1952 prev_state = prev->state; 1952 prev_state = prev->state;
1953 finish_arch_switch(prev); 1953 finish_arch_switch(prev);
1954 finish_lock_switch(rq, prev); 1954 finish_lock_switch(rq, prev);
1955 schedule_tail_balance_rt(rq);
1956
1955 fire_sched_in_preempt_notifiers(current); 1957 fire_sched_in_preempt_notifiers(current);
1956 if (mm) 1958 if (mm)
1957 mmdrop(mm); 1959 mmdrop(mm);
@@ -2185,11 +2187,13 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2185/* 2187/*
2186 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2188 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2187 */ 2189 */
2188static void double_lock_balance(struct rq *this_rq, struct rq *busiest) 2190static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2189 __releases(this_rq->lock) 2191 __releases(this_rq->lock)
2190 __acquires(busiest->lock) 2192 __acquires(busiest->lock)
2191 __acquires(this_rq->lock) 2193 __acquires(this_rq->lock)
2192{ 2194{
2195 int ret = 0;
2196
2193 if (unlikely(!irqs_disabled())) { 2197 if (unlikely(!irqs_disabled())) {
2194 /* printk() doesn't work good under rq->lock */ 2198 /* printk() doesn't work good under rq->lock */
2195 spin_unlock(&this_rq->lock); 2199 spin_unlock(&this_rq->lock);
@@ -2200,9 +2204,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
2200 spin_unlock(&this_rq->lock); 2204 spin_unlock(&this_rq->lock);
2201 spin_lock(&busiest->lock); 2205 spin_lock(&busiest->lock);
2202 spin_lock(&this_rq->lock); 2206 spin_lock(&this_rq->lock);
2207 ret = 1;
2203 } else 2208 } else
2204 spin_lock(&busiest->lock); 2209 spin_lock(&busiest->lock);
2205 } 2210 }
2211 return ret;
2206} 2212}
2207 2213
2208/* 2214/*