diff options
| -rw-r--r-- | arch/x86/include/asm/atomic.h | 7 | ||||
| -rw-r--r-- | arch/x86/include/asm/barrier.h | 4 | ||||
| -rw-r--r-- | arch/x86/include/asm/bitops.h | 6 | ||||
| -rw-r--r-- | arch/x86/include/asm/sync_bitops.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/hw_nmi.c | 2 |
5 files changed, 9 insertions, 12 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b17f4f48ecd7..6dd1c7dd0473 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <asm/alternative.h> | 7 | #include <asm/alternative.h> |
| 8 | #include <asm/cmpxchg.h> | 8 | #include <asm/cmpxchg.h> |
| 9 | #include <asm/rmwcc.h> | 9 | #include <asm/rmwcc.h> |
| 10 | #include <asm/barrier.h> | ||
| 10 | 11 | ||
| 11 | /* | 12 | /* |
| 12 | * Atomic operations that C can't guarantee us. Useful for | 13 | * Atomic operations that C can't guarantee us. Useful for |
| @@ -243,12 +244,6 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | |||
| 243 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ | 244 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
| 244 | : "memory") | 245 | : "memory") |
| 245 | 246 | ||
| 246 | /* Atomic operations are already serializing on x86 */ | ||
| 247 | #define smp_mb__before_atomic_dec() barrier() | ||
| 248 | #define smp_mb__after_atomic_dec() barrier() | ||
| 249 | #define smp_mb__before_atomic_inc() barrier() | ||
| 250 | #define smp_mb__after_atomic_inc() barrier() | ||
| 251 | |||
| 252 | #ifdef CONFIG_X86_32 | 247 | #ifdef CONFIG_X86_32 |
| 253 | # include <asm/atomic64_32.h> | 248 | # include <asm/atomic64_32.h> |
| 254 | #else | 249 | #else |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 69bbb4845020..5c7198cca5ed 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
| @@ -137,6 +137,10 @@ do { \ | |||
| 137 | 137 | ||
| 138 | #endif | 138 | #endif |
| 139 | 139 | ||
| 140 | /* Atomic operations are already serializing on x86 */ | ||
| 141 | #define smp_mb__before_atomic() barrier() | ||
| 142 | #define smp_mb__after_atomic() barrier() | ||
| 143 | |||
| 140 | /* | 144 | /* |
| 141 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | 145 | * Stop RDTSC speculation. This is needed when you need to use RDTSC |
| 142 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | 146 | * (or get_cycles or vread that possibly accesses the TSC) in a defined |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 9fc1af74dc83..afcd35d331de 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
| 16 | #include <asm/alternative.h> | 16 | #include <asm/alternative.h> |
| 17 | #include <asm/rmwcc.h> | 17 | #include <asm/rmwcc.h> |
| 18 | #include <asm/barrier.h> | ||
| 18 | 19 | ||
| 19 | #if BITS_PER_LONG == 32 | 20 | #if BITS_PER_LONG == 32 |
| 20 | # define _BITOPS_LONG_SHIFT 5 | 21 | # define _BITOPS_LONG_SHIFT 5 |
| @@ -102,7 +103,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) | |||
| 102 | * | 103 | * |
| 103 | * clear_bit() is atomic and may not be reordered. However, it does | 104 | * clear_bit() is atomic and may not be reordered. However, it does |
| 104 | * not contain a memory barrier, so if it is used for locking purposes, | 105 | * not contain a memory barrier, so if it is used for locking purposes, |
| 105 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 106 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
| 106 | * in order to ensure changes are visible on other processors. | 107 | * in order to ensure changes are visible on other processors. |
| 107 | */ | 108 | */ |
| 108 | static __always_inline void | 109 | static __always_inline void |
| @@ -156,9 +157,6 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) | |||
| 156 | __clear_bit(nr, addr); | 157 | __clear_bit(nr, addr); |
| 157 | } | 158 | } |
| 158 | 159 | ||
| 159 | #define smp_mb__before_clear_bit() barrier() | ||
| 160 | #define smp_mb__after_clear_bit() barrier() | ||
| 161 | |||
| 162 | /** | 160 | /** |
| 163 | * __change_bit - Toggle a bit in memory | 161 | * __change_bit - Toggle a bit in memory |
| 164 | * @nr: the bit to change | 162 | * @nr: the bit to change |
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h index 05af3b31d522..f28a24b51dc7 100644 --- a/arch/x86/include/asm/sync_bitops.h +++ b/arch/x86/include/asm/sync_bitops.h | |||
| @@ -41,7 +41,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr) | |||
| 41 | * | 41 | * |
| 42 | * sync_clear_bit() is atomic and may not be reordered. However, it does | 42 | * sync_clear_bit() is atomic and may not be reordered. However, it does |
| 43 | * not contain a memory barrier, so if it is used for locking purposes, | 43 | * not contain a memory barrier, so if it is used for locking purposes, |
| 44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 44 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
| 45 | * in order to ensure changes are visible on other processors. | 45 | * in order to ensure changes are visible on other processors. |
| 46 | */ | 46 | */ |
| 47 | static inline void sync_clear_bit(long nr, volatile unsigned long *addr) | 47 | static inline void sync_clear_bit(long nr, volatile unsigned long *addr) |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index a698d7165c96..eab67047dec3 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
| @@ -57,7 +57,7 @@ void arch_trigger_all_cpu_backtrace(void) | |||
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | clear_bit(0, &backtrace_flag); | 59 | clear_bit(0, &backtrace_flag); |
| 60 | smp_mb__after_clear_bit(); | 60 | smp_mb__after_atomic(); |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static int __kprobes | 63 | static int __kprobes |
