aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-13 14:00:35 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-18 08:20:46 -0400
commitd00a569284b1340c16fe2c148099e077ea09ebc9 (patch)
tree5e44a663929cdd1288989150ca9a7304a29cabd9
parentce3609f93445846f7b5a5b4bacb236a9bdc35216 (diff)
arch,x86: Convert smp_mb__*()
x86 is strongly ordered and all its atomic ops imply a full barrier. Implement the two new primitives as the old ones were. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-knswsr5mldkr0w1lrdxvc81w@git.kernel.org Cc: Dave Jones <davej@redhat.com> Cc: Jesse Brandeburg <jesse.brandeburg@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michel Lespinasse <walken@google.com> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/atomic.h7
-rw-r--r--arch/x86/include/asm/barrier.h4
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/sync_bitops.h2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c2
5 files changed, 9 insertions, 12 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b17f4f48ecd7..6dd1c7dd0473 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -7,6 +7,7 @@
7#include <asm/alternative.h> 7#include <asm/alternative.h>
8#include <asm/cmpxchg.h> 8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h> 9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
10 11
11/* 12/*
12 * Atomic operations that C can't guarantee us. Useful for 13 * Atomic operations that C can't guarantee us. Useful for
@@ -243,12 +244,6 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
243 : : "r" ((unsigned)(mask)), "m" (*(addr)) \ 244 : : "r" ((unsigned)(mask)), "m" (*(addr)) \
244 : "memory") 245 : "memory")
245 246
246/* Atomic operations are already serializing on x86 */
247#define smp_mb__before_atomic_dec() barrier()
248#define smp_mb__after_atomic_dec() barrier()
249#define smp_mb__before_atomic_inc() barrier()
250#define smp_mb__after_atomic_inc() barrier()
251
252#ifdef CONFIG_X86_32 247#ifdef CONFIG_X86_32
253# include <asm/atomic64_32.h> 248# include <asm/atomic64_32.h>
254#else 249#else
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 69bbb4845020..5c7198cca5ed 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -137,6 +137,10 @@ do { \
137 137
138#endif 138#endif
139 139
140/* Atomic operations are already serializing on x86 */
141#define smp_mb__before_atomic() barrier()
142#define smp_mb__after_atomic() barrier()
143
140/* 144/*
141 * Stop RDTSC speculation. This is needed when you need to use RDTSC 145 * Stop RDTSC speculation. This is needed when you need to use RDTSC
142 * (or get_cycles or vread that possibly accesses the TSC) in a defined 146 * (or get_cycles or vread that possibly accesses the TSC) in a defined
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 9fc1af74dc83..afcd35d331de 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/alternative.h> 16#include <asm/alternative.h>
17#include <asm/rmwcc.h> 17#include <asm/rmwcc.h>
18#include <asm/barrier.h>
18 19
19#if BITS_PER_LONG == 32 20#if BITS_PER_LONG == 32
20# define _BITOPS_LONG_SHIFT 5 21# define _BITOPS_LONG_SHIFT 5
@@ -102,7 +103,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
102 * 103 *
103 * clear_bit() is atomic and may not be reordered. However, it does 104 * clear_bit() is atomic and may not be reordered. However, it does
104 * not contain a memory barrier, so if it is used for locking purposes, 105 * not contain a memory barrier, so if it is used for locking purposes,
105 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
106 * in order to ensure changes are visible on other processors. 107 * in order to ensure changes are visible on other processors.
107 */ 108 */
108static __always_inline void 109static __always_inline void
@@ -156,9 +157,6 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
156 __clear_bit(nr, addr); 157 __clear_bit(nr, addr);
157} 158}
158 159
159#define smp_mb__before_clear_bit() barrier()
160#define smp_mb__after_clear_bit() barrier()
161
162/** 160/**
163 * __change_bit - Toggle a bit in memory 161 * __change_bit - Toggle a bit in memory
164 * @nr: the bit to change 162 * @nr: the bit to change
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 05af3b31d522..f28a24b51dc7 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -41,7 +41,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
41 * 41 *
42 * sync_clear_bit() is atomic and may not be reordered. However, it does 42 * sync_clear_bit() is atomic and may not be reordered. However, it does
43 * not contain a memory barrier, so if it is used for locking purposes, 43 * not contain a memory barrier, so if it is used for locking purposes,
44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 44 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
45 * in order to ensure changes are visible on other processors. 45 * in order to ensure changes are visible on other processors.
46 */ 46 */
47static inline void sync_clear_bit(long nr, volatile unsigned long *addr) 47static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index a698d7165c96..eab67047dec3 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -57,7 +57,7 @@ void arch_trigger_all_cpu_backtrace(void)
57 } 57 }
58 58
59 clear_bit(0, &backtrace_flag); 59 clear_bit(0, &backtrace_flag);
60 smp_mb__after_clear_bit(); 60 smp_mb__after_atomic();
61} 61}
62 62
63static int __kprobes 63static int __kprobes