diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-05 11:23:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-14 02:52:32 -0400 |
commit | 3ca7a440da394808571dad32d33d3bc0389982e6 (patch) | |
tree | ed749666030d201249f62a8dcb93224271d43241 /kernel/sched.c | |
parent | 184748cc50b2dceb8287f9fb657eda48ff8fcfe7 (diff) |
sched: Always provide p->on_cpu
Always provide p->on_cpu so that we can determine if its on a cpu
without having to lock the rq.
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152728.785452014@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 46 |
1 files changed, 29 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a187c3fe027b..cd2593e1a3ec 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p) | |||
838 | return rq->curr == p; | 838 | return rq->curr == p; |
839 | } | 839 | } |
840 | 840 | ||
841 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
842 | static inline int task_running(struct rq *rq, struct task_struct *p) | 841 | static inline int task_running(struct rq *rq, struct task_struct *p) |
843 | { | 842 | { |
843 | #ifdef CONFIG_SMP | ||
844 | return p->on_cpu; | ||
845 | #else | ||
844 | return task_current(rq, p); | 846 | return task_current(rq, p); |
847 | #endif | ||
845 | } | 848 | } |
846 | 849 | ||
850 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
847 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 851 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
848 | { | 852 | { |
853 | #ifdef CONFIG_SMP | ||
854 | /* | ||
855 | * We can optimise this out completely for !SMP, because the | ||
856 | * SMP rebalancing from interrupt is the only thing that cares | ||
857 | * here. | ||
858 | */ | ||
859 | next->on_cpu = 1; | ||
860 | #endif | ||
849 | } | 861 | } |
850 | 862 | ||
851 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | 863 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
852 | { | 864 | { |
865 | #ifdef CONFIG_SMP | ||
866 | /* | ||
867 | * After ->on_cpu is cleared, the task can be moved to a different CPU. | ||
868 | * We must ensure this doesn't happen until the switch is completely | ||
869 | * finished. | ||
870 | */ | ||
871 | smp_wmb(); | ||
872 | prev->on_cpu = 0; | ||
873 | #endif | ||
853 | #ifdef CONFIG_DEBUG_SPINLOCK | 874 | #ifdef CONFIG_DEBUG_SPINLOCK |
854 | /* this is a valid case when another task releases the spinlock */ | 875 | /* this is a valid case when another task releases the spinlock */ |
855 | rq->lock.owner = current; | 876 | rq->lock.owner = current; |
@@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
865 | } | 886 | } |
866 | 887 | ||
867 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 888 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
868 | static inline int task_running(struct rq *rq, struct task_struct *p) | ||
869 | { | ||
870 | #ifdef CONFIG_SMP | ||
871 | return p->oncpu; | ||
872 | #else | ||
873 | return task_current(rq, p); | ||
874 | #endif | ||
875 | } | ||
876 | |||
877 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 889 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
878 | { | 890 | { |
879 | #ifdef CONFIG_SMP | 891 | #ifdef CONFIG_SMP |
@@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
882 | * SMP rebalancing from interrupt is the only thing that cares | 894 | * SMP rebalancing from interrupt is the only thing that cares |
883 | * here. | 895 | * here. |
884 | */ | 896 | */ |
885 | next->oncpu = 1; | 897 | next->on_cpu = 1; |
886 | #endif | 898 | #endif |
887 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 899 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
888 | raw_spin_unlock_irq(&rq->lock); | 900 | raw_spin_unlock_irq(&rq->lock); |
@@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
895 | { | 907 | { |
896 | #ifdef CONFIG_SMP | 908 | #ifdef CONFIG_SMP |
897 | /* | 909 | /* |
898 | * After ->oncpu is cleared, the task can be moved to a different CPU. | 910 | * After ->on_cpu is cleared, the task can be moved to a different CPU. |
899 | * We must ensure this doesn't happen until the switch is completely | 911 | * We must ensure this doesn't happen until the switch is completely |
900 | * finished. | 912 | * finished. |
901 | */ | 913 | */ |
902 | smp_wmb(); | 914 | smp_wmb(); |
903 | prev->oncpu = 0; | 915 | prev->on_cpu = 0; |
904 | #endif | 916 | #endif |
905 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 917 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
906 | local_irq_enable(); | 918 | local_irq_enable(); |
@@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2686 | if (likely(sched_info_on())) | 2698 | if (likely(sched_info_on())) |
2687 | memset(&p->sched_info, 0, sizeof(p->sched_info)); | 2699 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
2688 | #endif | 2700 | #endif |
2689 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 2701 | #if defined(CONFIG_SMP) |
2690 | p->oncpu = 0; | 2702 | p->on_cpu = 0; |
2691 | #endif | 2703 | #endif |
2692 | #ifdef CONFIG_PREEMPT | 2704 | #ifdef CONFIG_PREEMPT |
2693 | /* Want to start with kernel preemption disabled. */ | 2705 | /* Want to start with kernel preemption disabled. */ |
@@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5776 | rcu_read_unlock(); | 5788 | rcu_read_unlock(); |
5777 | 5789 | ||
5778 | rq->curr = rq->idle = idle; | 5790 | rq->curr = rq->idle = idle; |
5779 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5791 | #if defined(CONFIG_SMP) |
5780 | idle->oncpu = 1; | 5792 | idle->on_cpu = 1; |
5781 | #endif | 5793 | #endif |
5782 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5794 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
5783 | 5795 | ||