diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-09-23 11:06:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-09-24 08:47:05 -0400 |
commit | c55f5158f5606f8a62e694b7e009f59b92ac6258 (patch) | |
tree | f51365aca7822ecf82a03c65c919b84274ff86a2 | |
parent | 5bd96ab6fef66ec6b9f54134364e618fd0f8f2f3 (diff) |
sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW
Kirill found that there's a subtle race in the
__ARCH_WANT_UNLOCKED_CTXSW code, and instead of fixing it, remove the
entire exception because neither arch that uses it seems to actually
still require it.
Boot tested on mips64el (qemu) only.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kirill Tkhai <tkhai@yandex.ru>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Qais Yousef <qais.yousef@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: oleg@redhat.com
Cc: linux@roeck-us.net
Cc: linux-ia64@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Link: http://lkml.kernel.org/r/20140923150641.GH3312@worktop.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/ia64/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/processor.h | 6 | ||||
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/sched.h | 30 |
4 files changed, 0 insertions, 43 deletions
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index c7367130ab14..ce53c50d0ba4 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/ustack.h> | 20 | #include <asm/ustack.h> |
21 | 21 | ||
22 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
23 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | 22 | #define ARCH_HAS_PREFETCH_SWITCH_STACK |
24 | 23 | ||
25 | #define IA64_NUM_PHYS_STACK_REG 96 | 24 | #define IA64_NUM_PHYS_STACK_REG 96 |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 05f08438a7c4..f1df4cb4a286 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p); | |||
397 | #define ARCH_HAS_PREFETCHW | 397 | #define ARCH_HAS_PREFETCHW |
398 | #define prefetchw(x) __builtin_prefetch((x), 1, 1) | 398 | #define prefetchw(x) __builtin_prefetch((x), 1, 1) |
399 | 399 | ||
400 | /* | ||
401 | * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP | ||
402 | * systems. | ||
403 | */ | ||
404 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
405 | |||
406 | #endif | 400 | #endif |
407 | 401 | ||
408 | #endif /* _ASM_PROCESSOR_H */ | 402 | #endif /* _ASM_PROCESSOR_H */ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d65566d07fcf..5b0eac9f4e78 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2331,10 +2331,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) | |||
2331 | */ | 2331 | */ |
2332 | post_schedule(rq); | 2332 | post_schedule(rq); |
2333 | 2333 | ||
2334 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
2335 | /* In this case, finish_task_switch does not reenable preemption */ | ||
2336 | preempt_enable(); | ||
2337 | #endif | ||
2338 | if (current->set_child_tid) | 2334 | if (current->set_child_tid) |
2339 | put_user(task_pid_vnr(current), current->set_child_tid); | 2335 | put_user(task_pid_vnr(current), current->set_child_tid); |
2340 | } | 2336 | } |
@@ -2377,9 +2373,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2377 | * of the scheduler it's an obvious special-case), so we | 2373 | * of the scheduler it's an obvious special-case), so we |
2378 | * do an early lockdep release here: | 2374 | * do an early lockdep release here: |
2379 | */ | 2375 | */ |
2380 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
2381 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 2376 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
2382 | #endif | ||
2383 | 2377 | ||
2384 | context_tracking_task_switch(prev, next); | 2378 | context_tracking_task_switch(prev, next); |
2385 | /* Here we just switch the register state and the stack. */ | 2379 | /* Here we just switch the register state and the stack. */ |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 16e1ca9cb7e8..6130251de280 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -975,7 +975,6 @@ static inline int task_on_rq_migrating(struct task_struct *p) | |||
975 | # define finish_arch_post_lock_switch() do { } while (0) | 975 | # define finish_arch_post_lock_switch() do { } while (0) |
976 | #endif | 976 | #endif |
977 | 977 | ||
978 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | ||
979 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | 978 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) |
980 | { | 979 | { |
981 | #ifdef CONFIG_SMP | 980 | #ifdef CONFIG_SMP |
@@ -1013,35 +1012,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
1013 | raw_spin_unlock_irq(&rq->lock); | 1012 | raw_spin_unlock_irq(&rq->lock); |
1014 | } | 1013 | } |
1015 | 1014 | ||
1016 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | ||
1017 | static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | ||
1018 | { | ||
1019 | #ifdef CONFIG_SMP | ||
1020 | /* | ||
1021 | * We can optimise this out completely for !SMP, because the | ||
1022 | * SMP rebalancing from interrupt is the only thing that cares | ||
1023 | * here. | ||
1024 | */ | ||
1025 | next->on_cpu = 1; | ||
1026 | #endif | ||
1027 | raw_spin_unlock(&rq->lock); | ||
1028 | } | ||
1029 | |||
1030 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | ||
1031 | { | ||
1032 | #ifdef CONFIG_SMP | ||
1033 | /* | ||
1034 | * After ->on_cpu is cleared, the task can be moved to a different CPU. | ||
1035 | * We must ensure this doesn't happen until the switch is completely | ||
1036 | * finished. | ||
1037 | */ | ||
1038 | smp_wmb(); | ||
1039 | prev->on_cpu = 0; | ||
1040 | #endif | ||
1041 | local_irq_enable(); | ||
1042 | } | ||
1043 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | ||
1044 | |||
1045 | /* | 1015 | /* |
1046 | * wake flags | 1016 | * wake flags |
1047 | */ | 1017 | */ |