aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:44 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:35 -0400
commitfd2f4419b4cbe8fe90796df9617c355762afd6a4 (patch)
tree9eefa7f46c7163f00adb416ff4b9db97653d2665
parentd7c01d27ab767a30d672d1fd657aa8336ebdcbca (diff)
sched: Provide p->on_rq
Provide a generic p->on_rq because the p->se.on_rq semantics are unfavourable for lockless wakeups but needed for sched_fair. In particular, p->on_rq is only cleared when we actually dequeue the task in schedule() and not on any random dequeue as done by things like __migrate_task() and __sched_setscheduler(). This also allows us to remove p->se usage from !sched_fair code. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152728.949545047@chello.nl
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c38
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_rt.c16
-rw-r--r--kernel/sched_stoptask.c2
5 files changed, 31 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 173850479e2c..b33a700652dc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1202,6 +1202,7 @@ struct task_struct {
1202#ifdef CONFIG_SMP 1202#ifdef CONFIG_SMP
1203 int on_cpu; 1203 int on_cpu;
1204#endif 1204#endif
1205 int on_rq;
1205 1206
1206 int prio, static_prio, normal_prio; 1207 int prio, static_prio, normal_prio;
1207 unsigned int rt_priority; 1208 unsigned int rt_priority;
diff --git a/kernel/sched.c b/kernel/sched.c
index 4481638f9178..dece28e505c9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1785,7 +1785,6 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1785 update_rq_clock(rq); 1785 update_rq_clock(rq);
1786 sched_info_queued(p); 1786 sched_info_queued(p);
1787 p->sched_class->enqueue_task(rq, p, flags); 1787 p->sched_class->enqueue_task(rq, p, flags);
1788 p->se.on_rq = 1;
1789} 1788}
1790 1789
1791static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1790static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1793,7 +1792,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1793 update_rq_clock(rq); 1792 update_rq_clock(rq);
1794 sched_info_dequeued(p); 1793 sched_info_dequeued(p);
1795 p->sched_class->dequeue_task(rq, p, flags); 1794 p->sched_class->dequeue_task(rq, p, flags);
1796 p->se.on_rq = 0;
1797} 1795}
1798 1796
1799/* 1797/*
@@ -2128,7 +2126,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2128 * A queue event has occurred, and we're going to schedule. In 2126 * A queue event has occurred, and we're going to schedule. In
2129 * this case, we can save a useless back to back clock update. 2127 * this case, we can save a useless back to back clock update.
2130 */ 2128 */
2131 if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) 2129 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
2132 rq->skip_clock_update = 1; 2130 rq->skip_clock_update = 1;
2133} 2131}
2134 2132
@@ -2203,7 +2201,7 @@ static bool migrate_task(struct task_struct *p, struct rq *rq)
2203 * If the task is not on a runqueue (and not running), then 2201 * If the task is not on a runqueue (and not running), then
2204 * the next wake-up will properly place the task. 2202 * the next wake-up will properly place the task.
2205 */ 2203 */
2206 return p->se.on_rq || task_running(rq, p); 2204 return p->on_rq || task_running(rq, p);
2207} 2205}
2208 2206
2209/* 2207/*
@@ -2263,7 +2261,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2263 rq = task_rq_lock(p, &flags); 2261 rq = task_rq_lock(p, &flags);
2264 trace_sched_wait_task(p); 2262 trace_sched_wait_task(p);
2265 running = task_running(rq, p); 2263 running = task_running(rq, p);
2266 on_rq = p->se.on_rq; 2264 on_rq = p->on_rq;
2267 ncsw = 0; 2265 ncsw = 0;
2268 if (!match_state || p->state == match_state) 2266 if (!match_state || p->state == match_state)
2269 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2267 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -2444,6 +2442,7 @@ ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
2444static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 2442static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2445{ 2443{
2446 activate_task(rq, p, en_flags); 2444 activate_task(rq, p, en_flags);
2445 p->on_rq = 1;
2447 2446
2448 /* if a worker is waking up, notify workqueue */ 2447 /* if a worker is waking up, notify workqueue */
2449 if (p->flags & PF_WQ_WORKER) 2448 if (p->flags & PF_WQ_WORKER)
@@ -2506,7 +2505,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2506 2505
2507 cpu = task_cpu(p); 2506 cpu = task_cpu(p);
2508 2507
2509 if (p->se.on_rq) 2508 if (p->on_rq)
2510 goto out_running; 2509 goto out_running;
2511 2510
2512 orig_cpu = cpu; 2511 orig_cpu = cpu;
@@ -2583,7 +2582,7 @@ static void try_to_wake_up_local(struct task_struct *p)
2583 if (!(p->state & TASK_NORMAL)) 2582 if (!(p->state & TASK_NORMAL))
2584 return; 2583 return;
2585 2584
2586 if (!p->se.on_rq) 2585 if (!p->on_rq)
2587 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2586 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2588 2587
2589 ttwu_post_activation(p, rq, 0); 2588 ttwu_post_activation(p, rq, 0);
@@ -2620,19 +2619,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
2620 */ 2619 */
2621static void __sched_fork(struct task_struct *p) 2620static void __sched_fork(struct task_struct *p)
2622{ 2621{
2622 p->on_rq = 0;
2623
2624 p->se.on_rq = 0;
2623 p->se.exec_start = 0; 2625 p->se.exec_start = 0;
2624 p->se.sum_exec_runtime = 0; 2626 p->se.sum_exec_runtime = 0;
2625 p->se.prev_sum_exec_runtime = 0; 2627 p->se.prev_sum_exec_runtime = 0;
2626 p->se.nr_migrations = 0; 2628 p->se.nr_migrations = 0;
2627 p->se.vruntime = 0; 2629 p->se.vruntime = 0;
2630 INIT_LIST_HEAD(&p->se.group_node);
2628 2631
2629#ifdef CONFIG_SCHEDSTATS 2632#ifdef CONFIG_SCHEDSTATS
2630 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2633 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2631#endif 2634#endif
2632 2635
2633 INIT_LIST_HEAD(&p->rt.run_list); 2636 INIT_LIST_HEAD(&p->rt.run_list);
2634 p->se.on_rq = 0;
2635 INIT_LIST_HEAD(&p->se.group_node);
2636 2637
2637#ifdef CONFIG_PREEMPT_NOTIFIERS 2638#ifdef CONFIG_PREEMPT_NOTIFIERS
2638 INIT_HLIST_HEAD(&p->preempt_notifiers); 2639 INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -2750,6 +2751,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2750 2751
2751 rq = task_rq_lock(p, &flags); 2752 rq = task_rq_lock(p, &flags);
2752 activate_task(rq, p, 0); 2753 activate_task(rq, p, 0);
2754 p->on_rq = 1;
2753 trace_sched_wakeup_new(p, true); 2755 trace_sched_wakeup_new(p, true);
2754 check_preempt_curr(rq, p, WF_FORK); 2756 check_preempt_curr(rq, p, WF_FORK);
2755#ifdef CONFIG_SMP 2757#ifdef CONFIG_SMP
@@ -4051,7 +4053,7 @@ static inline void schedule_debug(struct task_struct *prev)
4051 4053
4052static void put_prev_task(struct rq *rq, struct task_struct *prev) 4054static void put_prev_task(struct rq *rq, struct task_struct *prev)
4053{ 4055{
4054 if (prev->se.on_rq) 4056 if (prev->on_rq)
4055 update_rq_clock(rq); 4057 update_rq_clock(rq);
4056 prev->sched_class->put_prev_task(rq, prev); 4058 prev->sched_class->put_prev_task(rq, prev);
4057} 4059}
@@ -4126,7 +4128,9 @@ need_resched:
4126 if (to_wakeup) 4128 if (to_wakeup)
4127 try_to_wake_up_local(to_wakeup); 4129 try_to_wake_up_local(to_wakeup);
4128 } 4130 }
4131
4129 deactivate_task(rq, prev, DEQUEUE_SLEEP); 4132 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4133 prev->on_rq = 0;
4130 4134
4131 /* 4135 /*
4132 * If we are going to sleep and we have plugged IO queued, make 4136 * If we are going to sleep and we have plugged IO queued, make
@@ -4695,7 +4699,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4695 trace_sched_pi_setprio(p, prio); 4699 trace_sched_pi_setprio(p, prio);
4696 oldprio = p->prio; 4700 oldprio = p->prio;
4697 prev_class = p->sched_class; 4701 prev_class = p->sched_class;
4698 on_rq = p->se.on_rq; 4702 on_rq = p->on_rq;
4699 running = task_current(rq, p); 4703 running = task_current(rq, p);
4700 if (on_rq) 4704 if (on_rq)
4701 dequeue_task(rq, p, 0); 4705 dequeue_task(rq, p, 0);
@@ -4743,7 +4747,7 @@ void set_user_nice(struct task_struct *p, long nice)
4743 p->static_prio = NICE_TO_PRIO(nice); 4747 p->static_prio = NICE_TO_PRIO(nice);
4744 goto out_unlock; 4748 goto out_unlock;
4745 } 4749 }
4746 on_rq = p->se.on_rq; 4750 on_rq = p->on_rq;
4747 if (on_rq) 4751 if (on_rq)
4748 dequeue_task(rq, p, 0); 4752 dequeue_task(rq, p, 0);
4749 4753
@@ -4877,8 +4881,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
4877static void 4881static void
4878__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) 4882__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4879{ 4883{
4880 BUG_ON(p->se.on_rq);
4881
4882 p->policy = policy; 4884 p->policy = policy;
4883 p->rt_priority = prio; 4885 p->rt_priority = prio;
4884 p->normal_prio = normal_prio(p); 4886 p->normal_prio = normal_prio(p);
@@ -5044,7 +5046,7 @@ recheck:
5044 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5046 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5045 goto recheck; 5047 goto recheck;
5046 } 5048 }
5047 on_rq = p->se.on_rq; 5049 on_rq = p->on_rq;
5048 running = task_current(rq, p); 5050 running = task_current(rq, p);
5049 if (on_rq) 5051 if (on_rq)
5050 deactivate_task(rq, p, 0); 5052 deactivate_task(rq, p, 0);
@@ -5965,7 +5967,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5965 * If we're not on a rq, the next wake-up will ensure we're 5967 * If we're not on a rq, the next wake-up will ensure we're
5966 * placed properly. 5968 * placed properly.
5967 */ 5969 */
5968 if (p->se.on_rq) { 5970 if (p->on_rq) {
5969 deactivate_task(rq_src, p, 0); 5971 deactivate_task(rq_src, p, 0);
5970 set_task_cpu(p, dest_cpu); 5972 set_task_cpu(p, dest_cpu);
5971 activate_task(rq_dest, p, 0); 5973 activate_task(rq_dest, p, 0);
@@ -8339,7 +8341,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8339 int old_prio = p->prio; 8341 int old_prio = p->prio;
8340 int on_rq; 8342 int on_rq;
8341 8343
8342 on_rq = p->se.on_rq; 8344 on_rq = p->on_rq;
8343 if (on_rq) 8345 if (on_rq)
8344 deactivate_task(rq, p, 0); 8346 deactivate_task(rq, p, 0);
8345 __setscheduler(rq, p, SCHED_NORMAL, 0); 8347 __setscheduler(rq, p, SCHED_NORMAL, 0);
@@ -8682,7 +8684,7 @@ void sched_move_task(struct task_struct *tsk)
8682 rq = task_rq_lock(tsk, &flags); 8684 rq = task_rq_lock(tsk, &flags);
8683 8685
8684 running = task_current(rq, tsk); 8686 running = task_current(rq, tsk);
8685 on_rq = tsk->se.on_rq; 8687 on_rq = tsk->on_rq;
8686 8688
8687 if (on_rq) 8689 if (on_rq)
8688 dequeue_task(rq, tsk, 0); 8690 dequeue_task(rq, tsk, 0);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 7bacd83a4158..3669bec6e130 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
152 read_lock_irqsave(&tasklist_lock, flags); 152 read_lock_irqsave(&tasklist_lock, flags);
153 153
154 do_each_thread(g, p) { 154 do_each_thread(g, p) {
155 if (!p->se.on_rq || task_cpu(p) != rq_cpu) 155 if (!p->on_rq || task_cpu(p) != rq_cpu)
156 continue; 156 continue;
157 157
158 print_task(m, rq, p); 158 print_task(m, rq, p);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index e7cebdc65f82..9ca4f5f879c4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1136,7 +1136,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1136 * The previous task needs to be made eligible for pushing 1136 * The previous task needs to be made eligible for pushing
1137 * if it is still active 1137 * if it is still active
1138 */ 1138 */
1139 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) 1139 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1140 enqueue_pushable_task(rq, p); 1140 enqueue_pushable_task(rq, p);
1141} 1141}
1142 1142
@@ -1287,7 +1287,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1287 !cpumask_test_cpu(lowest_rq->cpu, 1287 !cpumask_test_cpu(lowest_rq->cpu,
1288 &task->cpus_allowed) || 1288 &task->cpus_allowed) ||
1289 task_running(rq, task) || 1289 task_running(rq, task) ||
1290 !task->se.on_rq)) { 1290 !task->on_rq)) {
1291 1291
1292 raw_spin_unlock(&lowest_rq->lock); 1292 raw_spin_unlock(&lowest_rq->lock);
1293 lowest_rq = NULL; 1293 lowest_rq = NULL;
@@ -1321,7 +1321,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1321 BUG_ON(task_current(rq, p)); 1321 BUG_ON(task_current(rq, p));
1322 BUG_ON(p->rt.nr_cpus_allowed <= 1); 1322 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1323 1323
1324 BUG_ON(!p->se.on_rq); 1324 BUG_ON(!p->on_rq);
1325 BUG_ON(!rt_task(p)); 1325 BUG_ON(!rt_task(p));
1326 1326
1327 return p; 1327 return p;
@@ -1467,7 +1467,7 @@ static int pull_rt_task(struct rq *this_rq)
1467 */ 1467 */
1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1469 WARN_ON(p == src_rq->curr); 1469 WARN_ON(p == src_rq->curr);
1470 WARN_ON(!p->se.on_rq); 1470 WARN_ON(!p->on_rq);
1471 1471
1472 /* 1472 /*
1473 * There's a chance that p is higher in priority 1473 * There's a chance that p is higher in priority
@@ -1538,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1538 * Update the migration status of the RQ if we have an RT task 1538 * Update the migration status of the RQ if we have an RT task
1539 * which is running AND changing its weight value. 1539 * which is running AND changing its weight value.
1540 */ 1540 */
1541 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1541 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1542 struct rq *rq = task_rq(p); 1542 struct rq *rq = task_rq(p);
1543 1543
1544 if (!task_current(rq, p)) { 1544 if (!task_current(rq, p)) {
@@ -1608,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1608 * we may need to handle the pulling of RT tasks 1608 * we may need to handle the pulling of RT tasks
1609 * now. 1609 * now.
1610 */ 1610 */
1611 if (p->se.on_rq && !rq->rt.rt_nr_running) 1611 if (p->on_rq && !rq->rt.rt_nr_running)
1612 pull_rt_task(rq); 1612 pull_rt_task(rq);
1613} 1613}
1614 1614
@@ -1638,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1638 * If that current running task is also an RT task 1638 * If that current running task is also an RT task
1639 * then see if we can move to another run queue. 1639 * then see if we can move to another run queue.
1640 */ 1640 */
1641 if (p->se.on_rq && rq->curr != p) { 1641 if (p->on_rq && rq->curr != p) {
1642#ifdef CONFIG_SMP 1642#ifdef CONFIG_SMP
1643 if (rq->rt.overloaded && push_rt_task(rq) && 1643 if (rq->rt.overloaded && push_rt_task(rq) &&
1644 /* Don't resched if we changed runqueues */ 1644 /* Don't resched if we changed runqueues */
@@ -1657,7 +1657,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1657static void 1657static void
1658prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1658prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1659{ 1659{
1660 if (!p->se.on_rq) 1660 if (!p->on_rq)
1661 return; 1661 return;
1662 1662
1663 if (rq->curr == p) { 1663 if (rq->curr == p) {
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 1ba2bd40fdac..f607de42e6fc 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -26,7 +26,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
26{ 26{
27 struct task_struct *stop = rq->stop; 27 struct task_struct *stop = rq->stop;
28 28
29 if (stop && stop->se.on_rq) 29 if (stop && stop->on_rq)
30 return stop; 30 return stop;
31 31
32 return NULL; 32 return NULL;