aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-20 16:10:22 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-20 16:10:22 -0400
commit90d3ac15e5c637d45849e83c828ed78c62886737 (patch)
treec5568365f32386559d2710e8981ed41e5fe0eb12 /kernel/sched_rt.c
parent9fafbd806198eb690c9a9f9fe35a879db93a1b8d (diff)
parent317f394160e9beb97d19a84c39b7e5eb3d7815a8 (diff)
Merge commit '317f394160e9beb97d19a84c39b7e5eb3d7815a8'
Conflicts: arch/sparc/kernel/smp_32.c With merge conflict help from Daniel Hellstrom. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c54
1 files changed, 34 insertions, 20 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index e7cebdc65f82..19ecb3127379 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
977static int find_lowest_rq(struct task_struct *task); 977static int find_lowest_rq(struct task_struct *task);
978 978
979static int 979static int
980select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) 980select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
981{ 981{
982 struct task_struct *curr;
983 struct rq *rq;
984 int cpu;
985
982 if (sd_flag != SD_BALANCE_WAKE) 986 if (sd_flag != SD_BALANCE_WAKE)
983 return smp_processor_id(); 987 return smp_processor_id();
984 988
989 cpu = task_cpu(p);
990 rq = cpu_rq(cpu);
991
992 rcu_read_lock();
993 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
994
985 /* 995 /*
986 * If the current task is an RT task, then 996 * If the current task on @p's runqueue is an RT task, then
987 * try to see if we can wake this RT task up on another 997 * try to see if we can wake this RT task up on another
988 * runqueue. Otherwise simply start this RT task 998 * runqueue. Otherwise simply start this RT task
989 * on its current runqueue. 999 * on its current runqueue.
@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
997 * lock? 1007 * lock?
998 * 1008 *
999 * For equal prio tasks, we just let the scheduler sort it out. 1009 * For equal prio tasks, we just let the scheduler sort it out.
1010 *
1011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
1013 *
1014 * This test is optimistic, if we get it wrong the load-balancer
1015 * will have to sort it out.
1000 */ 1016 */
1001 if (unlikely(rt_task(rq->curr)) && 1017 if (curr && unlikely(rt_task(curr)) &&
1002 (rq->curr->rt.nr_cpus_allowed < 2 || 1018 (curr->rt.nr_cpus_allowed < 2 ||
1003 rq->curr->prio < p->prio) && 1019 curr->prio < p->prio) &&
1004 (p->rt.nr_cpus_allowed > 1)) { 1020 (p->rt.nr_cpus_allowed > 1)) {
1005 int cpu = find_lowest_rq(p); 1021 int target = find_lowest_rq(p);
1006 1022
1007 return (cpu == -1) ? task_cpu(p) : cpu; 1023 if (target != -1)
1024 cpu = target;
1008 } 1025 }
1026 rcu_read_unlock();
1009 1027
1010 /* 1028 return cpu;
1011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
1013 */
1014 return task_cpu(p);
1015} 1029}
1016 1030
1017static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1031static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
@@ -1136,7 +1150,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1136 * The previous task needs to be made eligible for pushing 1150 * The previous task needs to be made eligible for pushing
1137 * if it is still active 1151 * if it is still active
1138 */ 1152 */
1139 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) 1153 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1140 enqueue_pushable_task(rq, p); 1154 enqueue_pushable_task(rq, p);
1141} 1155}
1142 1156
@@ -1287,7 +1301,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1287 !cpumask_test_cpu(lowest_rq->cpu, 1301 !cpumask_test_cpu(lowest_rq->cpu,
1288 &task->cpus_allowed) || 1302 &task->cpus_allowed) ||
1289 task_running(rq, task) || 1303 task_running(rq, task) ||
1290 !task->se.on_rq)) { 1304 !task->on_rq)) {
1291 1305
1292 raw_spin_unlock(&lowest_rq->lock); 1306 raw_spin_unlock(&lowest_rq->lock);
1293 lowest_rq = NULL; 1307 lowest_rq = NULL;
@@ -1321,7 +1335,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1321 BUG_ON(task_current(rq, p)); 1335 BUG_ON(task_current(rq, p));
1322 BUG_ON(p->rt.nr_cpus_allowed <= 1); 1336 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1323 1337
1324 BUG_ON(!p->se.on_rq); 1338 BUG_ON(!p->on_rq);
1325 BUG_ON(!rt_task(p)); 1339 BUG_ON(!rt_task(p));
1326 1340
1327 return p; 1341 return p;
@@ -1467,7 +1481,7 @@ static int pull_rt_task(struct rq *this_rq)
1467 */ 1481 */
1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1482 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1469 WARN_ON(p == src_rq->curr); 1483 WARN_ON(p == src_rq->curr);
1470 WARN_ON(!p->se.on_rq); 1484 WARN_ON(!p->on_rq);
1471 1485
1472 /* 1486 /*
1473 * There's a chance that p is higher in priority 1487 * There's a chance that p is higher in priority
@@ -1538,7 +1552,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1538 * Update the migration status of the RQ if we have an RT task 1552 * Update the migration status of the RQ if we have an RT task
1539 * which is running AND changing its weight value. 1553 * which is running AND changing its weight value.
1540 */ 1554 */
1541 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1555 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1542 struct rq *rq = task_rq(p); 1556 struct rq *rq = task_rq(p);
1543 1557
1544 if (!task_current(rq, p)) { 1558 if (!task_current(rq, p)) {
@@ -1608,7 +1622,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1608 * we may need to handle the pulling of RT tasks 1622 * we may need to handle the pulling of RT tasks
1609 * now. 1623 * now.
1610 */ 1624 */
1611 if (p->se.on_rq && !rq->rt.rt_nr_running) 1625 if (p->on_rq && !rq->rt.rt_nr_running)
1612 pull_rt_task(rq); 1626 pull_rt_task(rq);
1613} 1627}
1614 1628
@@ -1638,7 +1652,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1638 * If that current running task is also an RT task 1652 * If that current running task is also an RT task
1639 * then see if we can move to another run queue. 1653 * then see if we can move to another run queue.
1640 */ 1654 */
1641 if (p->se.on_rq && rq->curr != p) { 1655 if (p->on_rq && rq->curr != p) {
1642#ifdef CONFIG_SMP 1656#ifdef CONFIG_SMP
1643 if (rq->rt.overloaded && push_rt_task(rq) && 1657 if (rq->rt.overloaded && push_rt_task(rq) &&
1644 /* Don't resched if we changed runqueues */ 1658 /* Don't resched if we changed runqueues */
@@ -1657,7 +1671,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1657static void 1671static void
1658prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1672prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1659{ 1673{
1660 if (!p->se.on_rq) 1674 if (!p->on_rq)
1661 return; 1675 return;
1662 1676
1663 if (rq->curr == p) { 1677 if (rq->curr == p) {