aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:44 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:35 -0400
commitfd2f4419b4cbe8fe90796df9617c355762afd6a4 (patch)
tree9eefa7f46c7163f00adb416ff4b9db97653d2665 /kernel/sched_rt.c
parentd7c01d27ab767a30d672d1fd657aa8336ebdcbca (diff)
sched: Provide p->on_rq
Provide a generic p->on_rq because the p->se.on_rq semantics are unfavourable for lockless wakeups but needed for sched_fair. In particular, p->on_rq is only cleared when we actually dequeue the task in schedule() and not on any random dequeue as done by things like __migrate_task() and __sched_setscheduler(). This also allows us to remove p->se usage from !sched_fair code. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152728.949545047@chello.nl
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index e7cebdc65f82..9ca4f5f879c4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1136,7 +1136,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1136 * The previous task needs to be made eligible for pushing 1136 * The previous task needs to be made eligible for pushing
1137 * if it is still active 1137 * if it is still active
1138 */ 1138 */
1139 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) 1139 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1140 enqueue_pushable_task(rq, p); 1140 enqueue_pushable_task(rq, p);
1141} 1141}
1142 1142
@@ -1287,7 +1287,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1287 !cpumask_test_cpu(lowest_rq->cpu, 1287 !cpumask_test_cpu(lowest_rq->cpu,
1288 &task->cpus_allowed) || 1288 &task->cpus_allowed) ||
1289 task_running(rq, task) || 1289 task_running(rq, task) ||
1290 !task->se.on_rq)) { 1290 !task->on_rq)) {
1291 1291
1292 raw_spin_unlock(&lowest_rq->lock); 1292 raw_spin_unlock(&lowest_rq->lock);
1293 lowest_rq = NULL; 1293 lowest_rq = NULL;
@@ -1321,7 +1321,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1321 BUG_ON(task_current(rq, p)); 1321 BUG_ON(task_current(rq, p));
1322 BUG_ON(p->rt.nr_cpus_allowed <= 1); 1322 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1323 1323
1324 BUG_ON(!p->se.on_rq); 1324 BUG_ON(!p->on_rq);
1325 BUG_ON(!rt_task(p)); 1325 BUG_ON(!rt_task(p));
1326 1326
1327 return p; 1327 return p;
@@ -1467,7 +1467,7 @@ static int pull_rt_task(struct rq *this_rq)
1467 */ 1467 */
1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1469 WARN_ON(p == src_rq->curr); 1469 WARN_ON(p == src_rq->curr);
1470 WARN_ON(!p->se.on_rq); 1470 WARN_ON(!p->on_rq);
1471 1471
1472 /* 1472 /*
1473 * There's a chance that p is higher in priority 1473 * There's a chance that p is higher in priority
@@ -1538,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1538 * Update the migration status of the RQ if we have an RT task 1538 * Update the migration status of the RQ if we have an RT task
1539 * which is running AND changing its weight value. 1539 * which is running AND changing its weight value.
1540 */ 1540 */
1541 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1541 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1542 struct rq *rq = task_rq(p); 1542 struct rq *rq = task_rq(p);
1543 1543
1544 if (!task_current(rq, p)) { 1544 if (!task_current(rq, p)) {
@@ -1608,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1608 * we may need to handle the pulling of RT tasks 1608 * we may need to handle the pulling of RT tasks
1609 * now. 1609 * now.
1610 */ 1610 */
1611 if (p->se.on_rq && !rq->rt.rt_nr_running) 1611 if (p->on_rq && !rq->rt.rt_nr_running)
1612 pull_rt_task(rq); 1612 pull_rt_task(rq);
1613} 1613}
1614 1614
@@ -1638,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1638 * If that current running task is also an RT task 1638 * If that current running task is also an RT task
1639 * then see if we can move to another run queue. 1639 * then see if we can move to another run queue.
1640 */ 1640 */
1641 if (p->se.on_rq && rq->curr != p) { 1641 if (p->on_rq && rq->curr != p) {
1642#ifdef CONFIG_SMP 1642#ifdef CONFIG_SMP
1643 if (rq->rt.overloaded && push_rt_task(rq) && 1643 if (rq->rt.overloaded && push_rt_task(rq) &&
1644 /* Don't resched if we changed runqueues */ 1644 /* Don't resched if we changed runqueues */
@@ -1657,7 +1657,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1657static void 1657static void
1658prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1658prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1659{ 1659{
1660 if (!p->se.on_rq) 1660 if (!p->on_rq)
1661 return; 1661 return;
1662 1662
1663 if (rq->curr == p) { 1663 if (rq->curr == p) {