aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/sched.c46
2 files changed, 30 insertions, 20 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 758e27afcda5..3435837e89ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1200,9 +1200,7 @@ struct task_struct {
1200 int lock_depth; /* BKL lock depth */ 1200 int lock_depth; /* BKL lock depth */
1201 1201
1202#ifdef CONFIG_SMP 1202#ifdef CONFIG_SMP
1203#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1203 int on_cpu;
1204 int oncpu;
1205#endif
1206#endif 1204#endif
1207 1205
1208 int prio, static_prio, normal_prio; 1206 int prio, static_prio, normal_prio;
diff --git a/kernel/sched.c b/kernel/sched.c
index a187c3fe027b..cd2593e1a3ec 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
838 return rq->curr == p; 838 return rq->curr == p;
839} 839}
840 840
841#ifndef __ARCH_WANT_UNLOCKED_CTXSW
842static inline int task_running(struct rq *rq, struct task_struct *p) 841static inline int task_running(struct rq *rq, struct task_struct *p)
843{ 842{
843#ifdef CONFIG_SMP
844 return p->on_cpu;
845#else
844 return task_current(rq, p); 846 return task_current(rq, p);
847#endif
845} 848}
846 849
850#ifndef __ARCH_WANT_UNLOCKED_CTXSW
847static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 851static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
848{ 852{
853#ifdef CONFIG_SMP
854 /*
855 * We can optimise this out completely for !SMP, because the
856 * SMP rebalancing from interrupt is the only thing that cares
857 * here.
858 */
859 next->on_cpu = 1;
860#endif
849} 861}
850 862
851static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 863static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
852{ 864{
865#ifdef CONFIG_SMP
866 /*
867 * After ->on_cpu is cleared, the task can be moved to a different CPU.
868 * We must ensure this doesn't happen until the switch is completely
869 * finished.
870 */
871 smp_wmb();
872 prev->on_cpu = 0;
873#endif
853#ifdef CONFIG_DEBUG_SPINLOCK 874#ifdef CONFIG_DEBUG_SPINLOCK
854 /* this is a valid case when another task releases the spinlock */ 875 /* this is a valid case when another task releases the spinlock */
855 rq->lock.owner = current; 876 rq->lock.owner = current;
@@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
865} 886}
866 887
867#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 888#else /* __ARCH_WANT_UNLOCKED_CTXSW */
868static inline int task_running(struct rq *rq, struct task_struct *p)
869{
870#ifdef CONFIG_SMP
871 return p->oncpu;
872#else
873 return task_current(rq, p);
874#endif
875}
876
877static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 889static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
878{ 890{
879#ifdef CONFIG_SMP 891#ifdef CONFIG_SMP
@@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
882 * SMP rebalancing from interrupt is the only thing that cares 894 * SMP rebalancing from interrupt is the only thing that cares
883 * here. 895 * here.
884 */ 896 */
885 next->oncpu = 1; 897 next->on_cpu = 1;
886#endif 898#endif
887#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 899#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
888 raw_spin_unlock_irq(&rq->lock); 900 raw_spin_unlock_irq(&rq->lock);
@@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
895{ 907{
896#ifdef CONFIG_SMP 908#ifdef CONFIG_SMP
897 /* 909 /*
898 * After ->oncpu is cleared, the task can be moved to a different CPU. 910 * After ->on_cpu is cleared, the task can be moved to a different CPU.
899 * We must ensure this doesn't happen until the switch is completely 911 * We must ensure this doesn't happen until the switch is completely
900 * finished. 912 * finished.
901 */ 913 */
902 smp_wmb(); 914 smp_wmb();
903 prev->oncpu = 0; 915 prev->on_cpu = 0;
904#endif 916#endif
905#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW 917#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
906 local_irq_enable(); 918 local_irq_enable();
@@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2686 if (likely(sched_info_on())) 2698 if (likely(sched_info_on()))
2687 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2699 memset(&p->sched_info, 0, sizeof(p->sched_info));
2688#endif 2700#endif
2689#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 2701#if defined(CONFIG_SMP)
2690 p->oncpu = 0; 2702 p->on_cpu = 0;
2691#endif 2703#endif
2692#ifdef CONFIG_PREEMPT 2704#ifdef CONFIG_PREEMPT
2693 /* Want to start with kernel preemption disabled. */ 2705 /* Want to start with kernel preemption disabled. */
@@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5776 rcu_read_unlock(); 5788 rcu_read_unlock();
5777 5789
5778 rq->curr = rq->idle = idle; 5790 rq->curr = rq->idle = idle;
5779#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5791#if defined(CONFIG_SMP)
5780 idle->oncpu = 1; 5792 idle->on_cpu = 1;
5781#endif 5793#endif
5782 raw_spin_unlock_irqrestore(&rq->lock, flags); 5794 raw_spin_unlock_irqrestore(&rq->lock, flags);
5783 5795