summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:44 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:35 -0400
commitfd2f4419b4cbe8fe90796df9617c355762afd6a4 (patch)
tree9eefa7f46c7163f00adb416ff4b9db97653d2665 /kernel/sched.c
parentd7c01d27ab767a30d672d1fd657aa8336ebdcbca (diff)
sched: Provide p->on_rq
Provide a generic p->on_rq because the p->se.on_rq semantics are unfavourable for lockless wakeups but needed for sched_fair. In particular, p->on_rq is only cleared when we actually dequeue the task in schedule() and not on any random dequeue as done by things like __migrate_task() and __sched_setscheduler(). This also allows us to remove p->se usage from !sched_fair code. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152728.949545047@chello.nl
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4481638f9178..dece28e505c9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1785,7 +1785,6 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1785 update_rq_clock(rq); 1785 update_rq_clock(rq);
1786 sched_info_queued(p); 1786 sched_info_queued(p);
1787 p->sched_class->enqueue_task(rq, p, flags); 1787 p->sched_class->enqueue_task(rq, p, flags);
1788 p->se.on_rq = 1;
1789} 1788}
1790 1789
1791static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1790static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1793,7 +1792,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1793 update_rq_clock(rq); 1792 update_rq_clock(rq);
1794 sched_info_dequeued(p); 1793 sched_info_dequeued(p);
1795 p->sched_class->dequeue_task(rq, p, flags); 1794 p->sched_class->dequeue_task(rq, p, flags);
1796 p->se.on_rq = 0;
1797} 1795}
1798 1796
1799/* 1797/*
@@ -2128,7 +2126,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2128 * A queue event has occurred, and we're going to schedule. In 2126 * A queue event has occurred, and we're going to schedule. In
2129 * this case, we can save a useless back to back clock update. 2127 * this case, we can save a useless back to back clock update.
2130 */ 2128 */
2131 if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) 2129 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
2132 rq->skip_clock_update = 1; 2130 rq->skip_clock_update = 1;
2133} 2131}
2134 2132
@@ -2203,7 +2201,7 @@ static bool migrate_task(struct task_struct *p, struct rq *rq)
2203 * If the task is not on a runqueue (and not running), then 2201 * If the task is not on a runqueue (and not running), then
2204 * the next wake-up will properly place the task. 2202 * the next wake-up will properly place the task.
2205 */ 2203 */
2206 return p->se.on_rq || task_running(rq, p); 2204 return p->on_rq || task_running(rq, p);
2207} 2205}
2208 2206
2209/* 2207/*
@@ -2263,7 +2261,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2263 rq = task_rq_lock(p, &flags); 2261 rq = task_rq_lock(p, &flags);
2264 trace_sched_wait_task(p); 2262 trace_sched_wait_task(p);
2265 running = task_running(rq, p); 2263 running = task_running(rq, p);
2266 on_rq = p->se.on_rq; 2264 on_rq = p->on_rq;
2267 ncsw = 0; 2265 ncsw = 0;
2268 if (!match_state || p->state == match_state) 2266 if (!match_state || p->state == match_state)
2269 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2267 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -2444,6 +2442,7 @@ ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
2444static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 2442static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2445{ 2443{
2446 activate_task(rq, p, en_flags); 2444 activate_task(rq, p, en_flags);
2445 p->on_rq = 1;
2447 2446
2448 /* if a worker is waking up, notify workqueue */ 2447 /* if a worker is waking up, notify workqueue */
2449 if (p->flags & PF_WQ_WORKER) 2448 if (p->flags & PF_WQ_WORKER)
@@ -2506,7 +2505,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2506 2505
2507 cpu = task_cpu(p); 2506 cpu = task_cpu(p);
2508 2507
2509 if (p->se.on_rq) 2508 if (p->on_rq)
2510 goto out_running; 2509 goto out_running;
2511 2510
2512 orig_cpu = cpu; 2511 orig_cpu = cpu;
@@ -2583,7 +2582,7 @@ static void try_to_wake_up_local(struct task_struct *p)
2583 if (!(p->state & TASK_NORMAL)) 2582 if (!(p->state & TASK_NORMAL))
2584 return; 2583 return;
2585 2584
2586 if (!p->se.on_rq) 2585 if (!p->on_rq)
2587 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2586 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2588 2587
2589 ttwu_post_activation(p, rq, 0); 2588 ttwu_post_activation(p, rq, 0);
@@ -2620,19 +2619,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
2620 */ 2619 */
2621static void __sched_fork(struct task_struct *p) 2620static void __sched_fork(struct task_struct *p)
2622{ 2621{
2622 p->on_rq = 0;
2623
2624 p->se.on_rq = 0;
2623 p->se.exec_start = 0; 2625 p->se.exec_start = 0;
2624 p->se.sum_exec_runtime = 0; 2626 p->se.sum_exec_runtime = 0;
2625 p->se.prev_sum_exec_runtime = 0; 2627 p->se.prev_sum_exec_runtime = 0;
2626 p->se.nr_migrations = 0; 2628 p->se.nr_migrations = 0;
2627 p->se.vruntime = 0; 2629 p->se.vruntime = 0;
2630 INIT_LIST_HEAD(&p->se.group_node);
2628 2631
2629#ifdef CONFIG_SCHEDSTATS 2632#ifdef CONFIG_SCHEDSTATS
2630 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2633 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2631#endif 2634#endif
2632 2635
2633 INIT_LIST_HEAD(&p->rt.run_list); 2636 INIT_LIST_HEAD(&p->rt.run_list);
2634 p->se.on_rq = 0;
2635 INIT_LIST_HEAD(&p->se.group_node);
2636 2637
2637#ifdef CONFIG_PREEMPT_NOTIFIERS 2638#ifdef CONFIG_PREEMPT_NOTIFIERS
2638 INIT_HLIST_HEAD(&p->preempt_notifiers); 2639 INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -2750,6 +2751,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2750 2751
2751 rq = task_rq_lock(p, &flags); 2752 rq = task_rq_lock(p, &flags);
2752 activate_task(rq, p, 0); 2753 activate_task(rq, p, 0);
2754 p->on_rq = 1;
2753 trace_sched_wakeup_new(p, true); 2755 trace_sched_wakeup_new(p, true);
2754 check_preempt_curr(rq, p, WF_FORK); 2756 check_preempt_curr(rq, p, WF_FORK);
2755#ifdef CONFIG_SMP 2757#ifdef CONFIG_SMP
@@ -4051,7 +4053,7 @@ static inline void schedule_debug(struct task_struct *prev)
4051 4053
4052static void put_prev_task(struct rq *rq, struct task_struct *prev) 4054static void put_prev_task(struct rq *rq, struct task_struct *prev)
4053{ 4055{
4054 if (prev->se.on_rq) 4056 if (prev->on_rq)
4055 update_rq_clock(rq); 4057 update_rq_clock(rq);
4056 prev->sched_class->put_prev_task(rq, prev); 4058 prev->sched_class->put_prev_task(rq, prev);
4057} 4059}
@@ -4126,7 +4128,9 @@ need_resched:
4126 if (to_wakeup) 4128 if (to_wakeup)
4127 try_to_wake_up_local(to_wakeup); 4129 try_to_wake_up_local(to_wakeup);
4128 } 4130 }
4131
4129 deactivate_task(rq, prev, DEQUEUE_SLEEP); 4132 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4133 prev->on_rq = 0;
4130 4134
4131 /* 4135 /*
4132 * If we are going to sleep and we have plugged IO queued, make 4136 * If we are going to sleep and we have plugged IO queued, make
@@ -4695,7 +4699,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4695 trace_sched_pi_setprio(p, prio); 4699 trace_sched_pi_setprio(p, prio);
4696 oldprio = p->prio; 4700 oldprio = p->prio;
4697 prev_class = p->sched_class; 4701 prev_class = p->sched_class;
4698 on_rq = p->se.on_rq; 4702 on_rq = p->on_rq;
4699 running = task_current(rq, p); 4703 running = task_current(rq, p);
4700 if (on_rq) 4704 if (on_rq)
4701 dequeue_task(rq, p, 0); 4705 dequeue_task(rq, p, 0);
@@ -4743,7 +4747,7 @@ void set_user_nice(struct task_struct *p, long nice)
4743 p->static_prio = NICE_TO_PRIO(nice); 4747 p->static_prio = NICE_TO_PRIO(nice);
4744 goto out_unlock; 4748 goto out_unlock;
4745 } 4749 }
4746 on_rq = p->se.on_rq; 4750 on_rq = p->on_rq;
4747 if (on_rq) 4751 if (on_rq)
4748 dequeue_task(rq, p, 0); 4752 dequeue_task(rq, p, 0);
4749 4753
@@ -4877,8 +4881,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
4877static void 4881static void
4878__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) 4882__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4879{ 4883{
4880 BUG_ON(p->se.on_rq);
4881
4882 p->policy = policy; 4884 p->policy = policy;
4883 p->rt_priority = prio; 4885 p->rt_priority = prio;
4884 p->normal_prio = normal_prio(p); 4886 p->normal_prio = normal_prio(p);
@@ -5044,7 +5046,7 @@ recheck:
5044 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5046 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5045 goto recheck; 5047 goto recheck;
5046 } 5048 }
5047 on_rq = p->se.on_rq; 5049 on_rq = p->on_rq;
5048 running = task_current(rq, p); 5050 running = task_current(rq, p);
5049 if (on_rq) 5051 if (on_rq)
5050 deactivate_task(rq, p, 0); 5052 deactivate_task(rq, p, 0);
@@ -5965,7 +5967,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5965 * If we're not on a rq, the next wake-up will ensure we're 5967 * If we're not on a rq, the next wake-up will ensure we're
5966 * placed properly. 5968 * placed properly.
5967 */ 5969 */
5968 if (p->se.on_rq) { 5970 if (p->on_rq) {
5969 deactivate_task(rq_src, p, 0); 5971 deactivate_task(rq_src, p, 0);
5970 set_task_cpu(p, dest_cpu); 5972 set_task_cpu(p, dest_cpu);
5971 activate_task(rq_dest, p, 0); 5973 activate_task(rq_dest, p, 0);
@@ -8339,7 +8341,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8339 int old_prio = p->prio; 8341 int old_prio = p->prio;
8340 int on_rq; 8342 int on_rq;
8341 8343
8342 on_rq = p->se.on_rq; 8344 on_rq = p->on_rq;
8343 if (on_rq) 8345 if (on_rq)
8344 deactivate_task(rq, p, 0); 8346 deactivate_task(rq, p, 0);
8345 __setscheduler(rq, p, SCHED_NORMAL, 0); 8347 __setscheduler(rq, p, SCHED_NORMAL, 0);
@@ -8682,7 +8684,7 @@ void sched_move_task(struct task_struct *tsk)
8682 rq = task_rq_lock(tsk, &flags); 8684 rq = task_rq_lock(tsk, &flags);
8683 8685
8684 running = task_current(rq, tsk); 8686 running = task_current(rq, tsk);
8685 on_rq = tsk->se.on_rq; 8687 on_rq = tsk->on_rq;
8686 8688
8687 if (on_rq) 8689 if (on_rq)
8688 dequeue_task(rq, tsk, 0); 8690 dequeue_task(rq, tsk, 0);