aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/atomic.h5
-rw-r--r--arch/alpha/include/asm/bitops.h3
-rw-r--r--arch/arc/include/asm/atomic.h5
-rw-r--r--arch/arc/include/asm/bitops.h5
-rw-r--r--arch/arm/include/asm/atomic.h5
-rw-r--r--arch/arm/include/asm/barrier.h3
-rw-r--r--arch/arm/include/asm/bitops.h4
-rw-r--r--arch/arm64/include/asm/atomic.h5
-rw-r--r--arch/arm64/include/asm/barrier.h3
-rw-r--r--arch/arm64/include/asm/bitops.h9
-rw-r--r--arch/avr32/include/asm/atomic.h5
-rw-r--r--arch/avr32/include/asm/bitops.h9
-rw-r--r--arch/blackfin/include/asm/barrier.h3
-rw-r--r--arch/blackfin/include/asm/bitops.h14
-rw-r--r--arch/c6x/include/asm/bitops.h8
-rw-r--r--arch/cris/include/asm/atomic.h8
-rw-r--r--arch/cris/include/asm/bitops.h9
-rw-r--r--arch/frv/include/asm/atomic.h7
-rw-r--r--arch/frv/include/asm/bitops.h6
-rw-r--r--arch/hexagon/include/asm/atomic.h6
-rw-r--r--arch/hexagon/include/asm/bitops.h4
-rw-r--r--arch/ia64/include/asm/atomic.h7
-rw-r--r--arch/ia64/include/asm/barrier.h3
-rw-r--r--arch/ia64/include/asm/bitops.h9
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h9
-rw-r--r--arch/m32r/include/asm/atomic.h7
-rw-r--r--arch/m32r/include/asm/bitops.h6
-rw-r--r--arch/m68k/include/asm/atomic.h8
-rw-r--r--arch/m68k/include/asm/bitops.h7
-rw-r--r--arch/metag/include/asm/atomic.h6
-rw-r--r--arch/metag/include/asm/barrier.h3
-rw-r--r--arch/metag/include/asm/bitops.h6
-rw-r--r--arch/mips/include/asm/atomic.h9
-rw-r--r--arch/mips/include/asm/barrier.h3
-rw-r--r--arch/mips/include/asm/bitops.h11
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mn10300/include/asm/atomic.h7
-rw-r--r--arch/mn10300/include/asm/bitops.h4
-rw-r--r--arch/mn10300/mm/tlb-smp.c4
-rw-r--r--arch/openrisc/include/asm/bitops.h9
-rw-r--r--arch/parisc/include/asm/atomic.h6
-rw-r--r--arch/parisc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/atomic.h6
-rw-r--r--arch/powerpc/include/asm/barrier.h3
-rw-r--r--arch/powerpc/include/asm/bitops.h6
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/s390/include/asm/atomic.h5
-rw-r--r--arch/s390/include/asm/barrier.h5
-rw-r--r--arch/score/include/asm/bitops.h7
-rw-r--r--arch/sh/include/asm/atomic.h6
-rw-r--r--arch/sh/include/asm/bitops.h7
-rw-r--r--arch/sparc/include/asm/atomic_32.h7
-rw-r--r--arch/sparc/include/asm/atomic_64.h7
-rw-r--r--arch/sparc/include/asm/barrier_64.h3
-rw-r--r--arch/sparc/include/asm/bitops_32.h3
-rw-r--r--arch/sparc/include/asm/bitops_64.h4
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/atomic_64.h6
-rw-r--r--arch/tile/include/asm/barrier.h14
-rw-r--r--arch/tile/include/asm/bitops.h1
-rw-r--r--arch/tile/include/asm/bitops_32.h8
-rw-r--r--arch/tile/include/asm/bitops_64.h4
-rw-r--r--arch/x86/include/asm/atomic.h7
-rw-r--r--arch/x86/include/asm/barrier.h4
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/sync_bitops.h2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c2
-rw-r--r--arch/xtensa/include/asm/atomic.h7
-rw-r--r--arch/xtensa/include/asm/barrier.h3
-rw-r--r--arch/xtensa/include/asm/bitops.h4
70 files changed, 110 insertions, 297 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 78b03ef39f6f..ed60a1ee1ed3 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -292,9 +292,4 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
292#define atomic_dec(v) atomic_sub(1,(v)) 292#define atomic_dec(v) atomic_sub(1,(v))
293#define atomic64_dec(v) atomic64_sub(1,(v)) 293#define atomic64_dec(v) atomic64_sub(1,(v))
294 294
295#define smp_mb__before_atomic_dec() smp_mb()
296#define smp_mb__after_atomic_dec() smp_mb()
297#define smp_mb__before_atomic_inc() smp_mb()
298#define smp_mb__after_atomic_inc() smp_mb()
299
300#endif /* _ALPHA_ATOMIC_H */ 295#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index a19ba5efea4c..4bdfbd444e63 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -53,9 +53,6 @@ __set_bit(unsigned long nr, volatile void * addr)
53 *m |= 1 << (nr & 31); 53 *m |= 1 << (nr & 31);
54} 54}
55 55
56#define smp_mb__before_clear_bit() smp_mb()
57#define smp_mb__after_clear_bit() smp_mb()
58
59static inline void 56static inline void
60clear_bit(unsigned long nr, volatile void * addr) 57clear_bit(unsigned long nr, volatile void * addr)
61{ 58{
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03e494f695d1..83f03ca6caf6 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -190,11 +190,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
190 190
191#endif /* !CONFIG_ARC_HAS_LLSC */ 191#endif /* !CONFIG_ARC_HAS_LLSC */
192 192
193#define smp_mb__before_atomic_dec() barrier()
194#define smp_mb__after_atomic_dec() barrier()
195#define smp_mb__before_atomic_inc() barrier()
196#define smp_mb__after_atomic_inc() barrier()
197
198/** 193/**
199 * __atomic_add_unless - add unless the number is a given value 194 * __atomic_add_unless - add unless the number is a given value
200 * @v: pointer of type atomic_t 195 * @v: pointer of type atomic_t
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 647a83a8e756..ebc0cf3164dc 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -19,6 +19,7 @@
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <asm/barrier.h>
22 23
23/* 24/*
24 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns. 25 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
@@ -496,10 +497,6 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)
496 */ 497 */
497#define ffz(x) __ffs(~(x)) 498#define ffz(x) __ffs(~(x))
498 499
499/* TODO does this affect uni-processor code */
500#define smp_mb__before_clear_bit() barrier()
501#define smp_mb__after_clear_bit() barrier()
502
503#include <asm-generic/bitops/hweight.h> 500#include <asm-generic/bitops/hweight.h>
504#include <asm-generic/bitops/fls64.h> 501#include <asm-generic/bitops/fls64.h>
505#include <asm-generic/bitops/sched.h> 502#include <asm-generic/bitops/sched.h>
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9a92fd7864a8..3040359094d9 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -241,11 +241,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
241 241
242#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 242#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
243 243
244#define smp_mb__before_atomic_dec() smp_mb()
245#define smp_mb__after_atomic_dec() smp_mb()
246#define smp_mb__before_atomic_inc() smp_mb()
247#define smp_mb__after_atomic_inc() smp_mb()
248
249#ifndef CONFIG_GENERIC_ATOMIC64 244#ifndef CONFIG_GENERIC_ATOMIC64
250typedef struct { 245typedef struct {
251 long long counter; 246 long long counter;
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 2f59f7443396..c6a3e73a6e24 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -79,5 +79,8 @@ do { \
79 79
80#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 80#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
81 81
82#define smp_mb__before_atomic() smp_mb()
83#define smp_mb__after_atomic() smp_mb()
84
82#endif /* !__ASSEMBLY__ */ 85#endif /* !__ASSEMBLY__ */
83#endif /* __ASM_BARRIER_H */ 86#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index b2e298a90d76..56380995f4c3 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -25,9 +25,7 @@
25 25
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <linux/irqflags.h> 27#include <linux/irqflags.h>
28 28#include <asm/barrier.h>
29#define smp_mb__before_clear_bit() smp_mb()
30#define smp_mb__after_clear_bit() smp_mb()
31 29
32/* 30/*
33 * These functions are the basis of our bit ops. 31 * These functions are the basis of our bit ops.
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 0237f0867e37..57e8cb49824c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -152,11 +152,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
152 152
153#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 153#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
154 154
155#define smp_mb__before_atomic_dec() smp_mb()
156#define smp_mb__after_atomic_dec() smp_mb()
157#define smp_mb__before_atomic_inc() smp_mb()
158#define smp_mb__after_atomic_inc() smp_mb()
159
160/* 155/*
161 * 64-bit atomic operations. 156 * 64-bit atomic operations.
162 */ 157 */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 66eb7648043b..48b9e704af7c 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -98,6 +98,9 @@ do { \
98#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 98#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
99#define nop() asm volatile("nop"); 99#define nop() asm volatile("nop");
100 100
101#define smp_mb__before_atomic() smp_mb()
102#define smp_mb__after_atomic() smp_mb()
103
101#endif /* __ASSEMBLY__ */ 104#endif /* __ASSEMBLY__ */
102 105
103#endif /* __ASM_BARRIER_H */ 106#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index aa5b59d6ba43..9c19594ce7cb 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -17,17 +17,8 @@
17#define __ASM_BITOPS_H 17#define __ASM_BITOPS_H
18 18
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20
21#include <asm/barrier.h> 20#include <asm/barrier.h>
22 21
23/*
24 * clear_bit may not imply a memory barrier
25 */
26#ifndef smp_mb__before_clear_bit
27#define smp_mb__before_clear_bit() smp_mb()
28#define smp_mb__after_clear_bit() smp_mb()
29#endif
30
31#ifndef _LINUX_BITOPS_H 22#ifndef _LINUX_BITOPS_H
32#error only <linux/bitops.h> can be included directly 23#error only <linux/bitops.h> can be included directly
33#endif 24#endif
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 61407279208a..0780f3f2415b 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -183,9 +183,4 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
183 183
184#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) 184#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
185 185
186#define smp_mb__before_atomic_dec() barrier()
187#define smp_mb__after_atomic_dec() barrier()
188#define smp_mb__before_atomic_inc() barrier()
189#define smp_mb__after_atomic_inc() barrier()
190
191#endif /* __ASM_AVR32_ATOMIC_H */ 186#endif /* __ASM_AVR32_ATOMIC_H */
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
index ebe7ad3f490b..910d5374ce59 100644
--- a/arch/avr32/include/asm/bitops.h
+++ b/arch/avr32/include/asm/bitops.h
@@ -13,12 +13,7 @@
13#endif 13#endif
14 14
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16 16#include <asm/barrier.h>
17/*
18 * clear_bit() doesn't provide any barrier for the compiler
19 */
20#define smp_mb__before_clear_bit() barrier()
21#define smp_mb__after_clear_bit() barrier()
22 17
23/* 18/*
24 * set_bit - Atomically set a bit in memory 19 * set_bit - Atomically set a bit in memory
@@ -67,7 +62,7 @@ static inline void set_bit(int nr, volatile void * addr)
67 * 62 *
68 * clear_bit() is atomic and may not be reordered. However, it does 63 * clear_bit() is atomic and may not be reordered. However, it does
69 * not contain a memory barrier, so if it is used for locking purposes, 64 * not contain a memory barrier, so if it is used for locking purposes,
70 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 65 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
71 * in order to ensure changes are visible on other processors. 66 * in order to ensure changes are visible on other processors.
72 */ 67 */
73static inline void clear_bit(int nr, volatile void * addr) 68static inline void clear_bit(int nr, volatile void * addr)
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
index 19283a16ac08..420006877998 100644
--- a/arch/blackfin/include/asm/barrier.h
+++ b/arch/blackfin/include/asm/barrier.h
@@ -27,6 +27,9 @@
27 27
28#endif /* !CONFIG_SMP */ 28#endif /* !CONFIG_SMP */
29 29
30#define smp_mb__before_atomic() barrier()
31#define smp_mb__after_atomic() barrier()
32
30#include <asm-generic/barrier.h> 33#include <asm-generic/barrier.h>
31 34
32#endif /* _BLACKFIN_BARRIER_H */ 35#endif /* _BLACKFIN_BARRIER_H */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 0ca40dd44724..b298b654a26f 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -27,21 +27,17 @@
27 27
28#include <asm-generic/bitops/ext2-atomic.h> 28#include <asm-generic/bitops/ext2-atomic.h>
29 29
30#include <asm/barrier.h>
31
30#ifndef CONFIG_SMP 32#ifndef CONFIG_SMP
31#include <linux/irqflags.h> 33#include <linux/irqflags.h>
32
33/* 34/*
34 * clear_bit may not imply a memory barrier 35 * clear_bit may not imply a memory barrier
35 */ 36 */
36#ifndef smp_mb__before_clear_bit
37#define smp_mb__before_clear_bit() smp_mb()
38#define smp_mb__after_clear_bit() smp_mb()
39#endif
40#include <asm-generic/bitops/atomic.h> 37#include <asm-generic/bitops/atomic.h>
41#include <asm-generic/bitops/non-atomic.h> 38#include <asm-generic/bitops/non-atomic.h>
42#else 39#else
43 40
44#include <asm/barrier.h>
45#include <asm/byteorder.h> /* swab32 */ 41#include <asm/byteorder.h> /* swab32 */
46#include <linux/linkage.h> 42#include <linux/linkage.h>
47 43
@@ -101,12 +97,6 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
101 return __raw_bit_test_toggle_asm(a, nr & 0x1f); 97 return __raw_bit_test_toggle_asm(a, nr & 0x1f);
102} 98}
103 99
104/*
105 * clear_bit() doesn't provide any barrier for the compiler.
106 */
107#define smp_mb__before_clear_bit() barrier()
108#define smp_mb__after_clear_bit() barrier()
109
110#define test_bit __skip_test_bit 100#define test_bit __skip_test_bit
111#include <asm-generic/bitops/non-atomic.h> 101#include <asm-generic/bitops/non-atomic.h>
112#undef test_bit 102#undef test_bit
diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h
index 0bec7e5036a8..f0ab012401b6 100644
--- a/arch/c6x/include/asm/bitops.h
+++ b/arch/c6x/include/asm/bitops.h
@@ -14,14 +14,8 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17
18#include <asm/byteorder.h> 17#include <asm/byteorder.h>
19 18#include <asm/barrier.h>
20/*
21 * clear_bit() doesn't provide any barrier for the compiler.
22 */
23#define smp_mb__before_clear_bit() barrier()
24#define smp_mb__after_clear_bit() barrier()
25 19
26/* 20/*
27 * We are lucky, DSP is perfect for bitops: do it in 3 cycles 21 * We are lucky, DSP is perfect for bitops: do it in 3 cycles
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
index 1056a5dfe04f..aa429baebaf9 100644
--- a/arch/cris/include/asm/atomic.h
+++ b/arch/cris/include/asm/atomic.h
@@ -7,6 +7,8 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <asm/cmpxchg.h> 8#include <asm/cmpxchg.h>
9#include <arch/atomic.h> 9#include <arch/atomic.h>
10#include <arch/system.h>
11#include <asm/barrier.h>
10 12
11/* 13/*
12 * Atomic operations that C can't guarantee us. Useful for 14 * Atomic operations that C can't guarantee us. Useful for
@@ -151,10 +153,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
151 return ret; 153 return ret;
152} 154}
153 155
154/* Atomic operations are already serializing */
155#define smp_mb__before_atomic_dec() barrier()
156#define smp_mb__after_atomic_dec() barrier()
157#define smp_mb__before_atomic_inc() barrier()
158#define smp_mb__after_atomic_inc() barrier()
159
160#endif 156#endif
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index 053c17b36559..bd49a546f4f5 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -21,6 +21,7 @@
21#include <arch/bitops.h> 21#include <arch/bitops.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <asm/barrier.h>
24 25
25/* 26/*
26 * set_bit - Atomically set a bit in memory 27 * set_bit - Atomically set a bit in memory
@@ -42,7 +43,7 @@
42 * 43 *
43 * clear_bit() is atomic and may not be reordered. However, it does 44 * clear_bit() is atomic and may not be reordered. However, it does
44 * not contain a memory barrier, so if it is used for locking purposes, 45 * not contain a memory barrier, so if it is used for locking purposes,
45 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 46 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
46 * in order to ensure changes are visible on other processors. 47 * in order to ensure changes are visible on other processors.
47 */ 48 */
48 49
@@ -84,12 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
84 return retval; 85 return retval;
85} 86}
86 87
87/*
88 * clear_bit() doesn't provide any barrier for the compiler.
89 */
90#define smp_mb__before_clear_bit() barrier()
91#define smp_mb__after_clear_bit() barrier()
92
93/** 88/**
94 * test_and_clear_bit - Clear a bit and return its old value 89 * test_and_clear_bit - Clear a bit and return its old value
95 * @nr: Bit to clear 90 * @nr: Bit to clear
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index b86329d0e316..f6c3a1690101 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/spr-regs.h> 18#include <asm/spr-regs.h>
19#include <asm/cmpxchg.h> 19#include <asm/cmpxchg.h>
20#include <asm/barrier.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22#error not SMP safe 23#error not SMP safe
@@ -29,12 +30,6 @@
29 * We do not have SMP systems, so we don't have to deal with that. 30 * We do not have SMP systems, so we don't have to deal with that.
30 */ 31 */
31 32
32/* Atomic operations are already serializing */
33#define smp_mb__before_atomic_dec() barrier()
34#define smp_mb__after_atomic_dec() barrier()
35#define smp_mb__before_atomic_inc() barrier()
36#define smp_mb__after_atomic_inc() barrier()
37
38#define ATOMIC_INIT(i) { (i) } 33#define ATOMIC_INIT(i) { (i) }
39#define atomic_read(v) (*(volatile int *)&(v)->counter) 34#define atomic_read(v) (*(volatile int *)&(v)->counter)
40#define atomic_set(v, i) (((v)->counter) = (i)) 35#define atomic_set(v, i) (((v)->counter) = (i))
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 57bf85db893f..96de220ef131 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,12 +25,6 @@
25 25
26#include <asm-generic/bitops/ffz.h> 26#include <asm-generic/bitops/ffz.h>
27 27
28/*
29 * clear_bit() doesn't provide any barrier for the compiler.
30 */
31#define smp_mb__before_clear_bit() barrier()
32#define smp_mb__after_clear_bit() barrier()
33
34#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 28#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
35static inline 29static inline
36unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) 30unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 17dc63780c06..de916b11bff5 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -24,6 +24,7 @@
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <asm/cmpxchg.h> 26#include <asm/cmpxchg.h>
27#include <asm/barrier.h>
27 28
28#define ATOMIC_INIT(i) { (i) } 29#define ATOMIC_INIT(i) { (i) }
29 30
@@ -176,9 +177,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
176#define atomic_inc_return(v) (atomic_add_return(1, v)) 177#define atomic_inc_return(v) (atomic_add_return(1, v))
177#define atomic_dec_return(v) (atomic_sub_return(1, v)) 178#define atomic_dec_return(v) (atomic_sub_return(1, v))
178 179
179#define smp_mb__before_atomic_dec() barrier()
180#define smp_mb__after_atomic_dec() barrier()
181#define smp_mb__before_atomic_inc() barrier()
182#define smp_mb__after_atomic_inc() barrier()
183
184#endif 180#endif
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 9b1e4afbab3c..5e4a59b3ec1b 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -25,12 +25,10 @@
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <asm/byteorder.h> 26#include <asm/byteorder.h>
27#include <asm/atomic.h> 27#include <asm/atomic.h>
28#include <asm/barrier.h>
28 29
29#ifdef __KERNEL__ 30#ifdef __KERNEL__
30 31
31#define smp_mb__before_clear_bit() barrier()
32#define smp_mb__after_clear_bit() barrier()
33
34/* 32/*
35 * The offset calculations for these are based on BITS_PER_LONG == 32 33 * The offset calculations for these are based on BITS_PER_LONG == 32
36 * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access), 34 * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 6e6fe1839f5d..0f8bf48dadf3 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/barrier.h>
18 19
19 20
20#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
208#define atomic64_inc(v) atomic64_add(1, (v)) 209#define atomic64_inc(v) atomic64_add(1, (v))
209#define atomic64_dec(v) atomic64_sub(1, (v)) 210#define atomic64_dec(v) atomic64_sub(1, (v))
210 211
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec() barrier()
213#define smp_mb__after_atomic_dec() barrier()
214#define smp_mb__before_atomic_inc() barrier()
215#define smp_mb__after_atomic_inc() barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */ 212#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index d0a69aa35e27..a48957c7b445 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -55,6 +55,9 @@
55 55
56#endif 56#endif
57 57
58#define smp_mb__before_atomic() barrier()
59#define smp_mb__after_atomic() barrier()
60
58/* 61/*
59 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no 62 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
60 * need for asm trickery! 63 * need for asm trickery!
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index c27eccd33349..71e8145243ee 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/intrinsics.h> 18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
19 20
20/** 21/**
21 * set_bit - Atomically set a bit in memory 22 * set_bit - Atomically set a bit in memory
@@ -65,12 +66,6 @@ __set_bit (int nr, volatile void *addr)
65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); 66 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
66} 67}
67 68
68/*
69 * clear_bit() has "acquire" semantics.
70 */
71#define smp_mb__before_clear_bit() smp_mb()
72#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
73
74/** 69/**
75 * clear_bit - Clears a bit in memory 70 * clear_bit - Clears a bit in memory
76 * @nr: Bit to clear 71 * @nr: Bit to clear
@@ -78,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
78 * 73 *
79 * clear_bit() is atomic and may not be reordered. However, it does 74 * clear_bit() is atomic and may not be reordered. However, it does
80 * not contain a memory barrier, so if it is used for locking purposes, 75 * not contain a memory barrier, so if it is used for locking purposes,
81 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
82 * in order to ensure changes are visible on other processors. 77 * in order to ensure changes are visible on other processors.
83 */ 78 */
84static __inline__ void 79static __inline__ void
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 4f37dbbb8640..f35109b1d907 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
118#define cmpxchg_rel(ptr, o, n) \ 118#define cmpxchg_rel(ptr, o, n) \
119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) 119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
120 120
121/*
122 * Worse still - early processor implementations actually just ignored
123 * the acquire/release and did a full fence all the time. Unfortunately
124 * this meant a lot of badly written code that used .acq when they really
125 * wanted .rel became legacy out in the wild - so when we made a cpu
126 * that strictly did the .acq or .rel ... all that code started breaking - so
127 * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
128 */
129
121/* for compatibility with other platforms: */ 130/* for compatibility with other platforms: */
122#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) 131#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
123#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) 132#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 0d81697c326c..8ad0ed4182a5 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -13,6 +13,7 @@
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/cmpxchg.h> 14#include <asm/cmpxchg.h>
15#include <asm/dcache_clear.h> 15#include <asm/dcache_clear.h>
16#include <asm/barrier.h>
16 17
17/* 18/*
18 * Atomic operations that C can't guarantee us. Useful for 19 * Atomic operations that C can't guarantee us. Useful for
@@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
308 local_irq_restore(flags); 309 local_irq_restore(flags);
309} 310}
310 311
311/* Atomic operations are already serializing on m32r */
312#define smp_mb__before_atomic_dec() barrier()
313#define smp_mb__after_atomic_dec() barrier()
314#define smp_mb__before_atomic_inc() barrier()
315#define smp_mb__after_atomic_inc() barrier()
316
317#endif /* _ASM_M32R_ATOMIC_H */ 312#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h
index d3dea9ac7d4e..86ba2b42a6cf 100644
--- a/arch/m32r/include/asm/bitops.h
+++ b/arch/m32r/include/asm/bitops.h
@@ -21,6 +21,7 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/dcache_clear.h> 22#include <asm/dcache_clear.h>
23#include <asm/types.h> 23#include <asm/types.h>
24#include <asm/barrier.h>
24 25
25/* 26/*
26 * These have to be done with inline assembly: that way the bit-setting 27 * These have to be done with inline assembly: that way the bit-setting
@@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
73 * 74 *
74 * clear_bit() is atomic and may not be reordered. However, it does 75 * clear_bit() is atomic and may not be reordered. However, it does
75 * not contain a memory barrier, so if it is used for locking purposes, 76 * not contain a memory barrier, so if it is used for locking purposes,
76 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 77 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
77 * in order to ensure changes are visible on other processors. 78 * in order to ensure changes are visible on other processors.
78 */ 79 */
79static __inline__ void clear_bit(int nr, volatile void * addr) 80static __inline__ void clear_bit(int nr, volatile void * addr)
@@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
103 local_irq_restore(flags); 104 local_irq_restore(flags);
104} 105}
105 106
106#define smp_mb__before_clear_bit() barrier()
107#define smp_mb__after_clear_bit() barrier()
108
109/** 107/**
110 * change_bit - Toggle a bit in memory 108 * change_bit - Toggle a bit in memory
111 * @nr: Bit to clear 109 * @nr: Bit to clear
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index f4e32de263a7..55695212a2ae 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/irqflags.h> 5#include <linux/irqflags.h>
6#include <asm/cmpxchg.h> 6#include <asm/cmpxchg.h>
7#include <asm/barrier.h>
7 8
8/* 9/*
9 * Atomic operations that C can't guarantee us. Useful for 10 * Atomic operations that C can't guarantee us. Useful for
@@ -209,11 +210,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
209 return c; 210 return c;
210} 211}
211 212
212
213/* Atomic operations are already serializing */
214#define smp_mb__before_atomic_dec() barrier()
215#define smp_mb__after_atomic_dec() barrier()
216#define smp_mb__before_atomic_inc() barrier()
217#define smp_mb__after_atomic_inc() barrier()
218
219#endif /* __ARCH_M68K_ATOMIC __ */ 213#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index c6baa913592a..b4a9b0d5928d 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -13,6 +13,7 @@
13#endif 13#endif
14 14
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/barrier.h>
16 17
17/* 18/*
18 * Bit access functions vary across the ColdFire and 68k families. 19 * Bit access functions vary across the ColdFire and 68k families.
@@ -67,12 +68,6 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
67#define __set_bit(nr, vaddr) set_bit(nr, vaddr) 68#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
68 69
69 70
70/*
71 * clear_bit() doesn't provide any barrier for the compiler.
72 */
73#define smp_mb__before_clear_bit() barrier()
74#define smp_mb__after_clear_bit() barrier()
75
76static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) 71static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
77{ 72{
78 char *p = (char *)vaddr + (nr ^ 31) / 8; 73 char *p = (char *)vaddr + (nr ^ 31) / 8;
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
index 307ecd2bd9a1..470e365f04ea 100644
--- a/arch/metag/include/asm/atomic.h
+++ b/arch/metag/include/asm/atomic.h
@@ -4,6 +4,7 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/cmpxchg.h> 6#include <asm/cmpxchg.h>
7#include <asm/barrier.h>
7 8
8#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) 9#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
9/* The simple UP case. */ 10/* The simple UP case. */
@@ -39,11 +40,6 @@
39 40
40#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 41#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
41 42
42#define smp_mb__before_atomic_dec() barrier()
43#define smp_mb__after_atomic_dec() barrier()
44#define smp_mb__before_atomic_inc() barrier()
45#define smp_mb__after_atomic_inc() barrier()
46
47#endif 43#endif
48 44
49#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) 45#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 2d6f0de77325..c7591e80067c 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -100,4 +100,7 @@ do { \
100 ___p1; \ 100 ___p1; \
101}) 101})
102 102
103#define smp_mb__before_atomic() barrier()
104#define smp_mb__after_atomic() barrier()
105
103#endif /* _ASM_METAG_BARRIER_H */ 106#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h
index c0d0df0d1378..2671134ee745 100644
--- a/arch/metag/include/asm/bitops.h
+++ b/arch/metag/include/asm/bitops.h
@@ -5,12 +5,6 @@
5#include <asm/barrier.h> 5#include <asm/barrier.h>
6#include <asm/global_lock.h> 6#include <asm/global_lock.h>
7 7
8/*
9 * clear_bit() doesn't provide any barrier for the compiler.
10 */
11#define smp_mb__before_clear_bit() barrier()
12#define smp_mb__after_clear_bit() barrier()
13
14#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
15/* 9/*
16 * These functions are the basis of our bit ops. 10 * These functions are the basis of our bit ops.
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index e8eb3d53a241..37b2befe651a 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
761 761
762#endif /* CONFIG_64BIT */ 762#endif /* CONFIG_64BIT */
763 763
764/*
765 * atomic*_return operations are serializing but not the non-*_return
766 * versions.
767 */
768#define smp_mb__before_atomic_dec() smp_mb__before_llsc()
769#define smp_mb__after_atomic_dec() smp_llsc_mb()
770#define smp_mb__before_atomic_inc() smp_mb__before_llsc()
771#define smp_mb__after_atomic_inc() smp_llsc_mb()
772
773#endif /* _ASM_ATOMIC_H */ 764#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index e1aa4e4c2984..d0101dd0575e 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -195,4 +195,7 @@ do { \
195 ___p1; \ 195 ___p1; \
196}) 196})
197 197
198#define smp_mb__before_atomic() smp_mb__before_llsc()
199#define smp_mb__after_atomic() smp_llsc_mb()
200
198#endif /* __ASM_BARRIER_H */ 201#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 6a65d49e2c0d..7c8816f7b7c4 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -38,13 +38,6 @@
38#endif 38#endif
39 39
40/* 40/*
41 * clear_bit() doesn't provide any barrier for the compiler.
42 */
43#define smp_mb__before_clear_bit() smp_mb__before_llsc()
44#define smp_mb__after_clear_bit() smp_llsc_mb()
45
46
47/*
48 * These are the "slower" versions of the functions and are in bitops.c. 41 * These are the "slower" versions of the functions and are in bitops.c.
49 * These functions call raw_local_irq_{save,restore}(). 42 * These functions call raw_local_irq_{save,restore}().
50 */ 43 */
@@ -120,7 +113,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
120 * 113 *
121 * clear_bit() is atomic and may not be reordered. However, it does 114 * clear_bit() is atomic and may not be reordered. However, it does
122 * not contain a memory barrier, so if it is used for locking purposes, 115 * not contain a memory barrier, so if it is used for locking purposes,
123 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 116 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
124 * in order to ensure changes are visible on other processors. 117 * in order to ensure changes are visible on other processors.
125 */ 118 */
126static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) 119static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
@@ -175,7 +168,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
175 */ 168 */
176static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) 169static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
177{ 170{
178 smp_mb__before_clear_bit(); 171 smp_mb__before_atomic();
179 clear_bit(nr, addr); 172 clear_bit(nr, addr);
180} 173}
181 174
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d1fea7a054be..1818da4dbb85 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -62,9 +62,9 @@ void __init alloc_legacy_irqno(void)
62 62
63void free_irqno(unsigned int irq) 63void free_irqno(unsigned int irq)
64{ 64{
65 smp_mb__before_clear_bit(); 65 smp_mb__before_atomic();
66 clear_bit(irq, irq_map); 66 clear_bit(irq, irq_map);
67 smp_mb__after_clear_bit(); 67 smp_mb__after_atomic();
68} 68}
69 69
70/* 70/*
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 975e1841ca64..cadeb1e2cdfc 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -13,6 +13,7 @@
13 13
14#include <asm/irqflags.h> 14#include <asm/irqflags.h>
15#include <asm/cmpxchg.h> 15#include <asm/cmpxchg.h>
16#include <asm/barrier.h>
16 17
17#ifndef CONFIG_SMP 18#ifndef CONFIG_SMP
18#include <asm-generic/atomic.h> 19#include <asm-generic/atomic.h>
@@ -234,12 +235,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
234#endif 235#endif
235} 236}
236 237
237/* Atomic operations are already serializing on MN10300??? */
238#define smp_mb__before_atomic_dec() barrier()
239#define smp_mb__after_atomic_dec() barrier()
240#define smp_mb__before_atomic_inc() barrier()
241#define smp_mb__after_atomic_inc() barrier()
242
243#endif /* __KERNEL__ */ 238#endif /* __KERNEL__ */
244#endif /* CONFIG_SMP */ 239#endif /* CONFIG_SMP */
245#endif /* _ASM_ATOMIC_H */ 240#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index 596bb2706d81..fe6f8e2c3617 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -18,9 +18,7 @@
18#define __ASM_BITOPS_H 18#define __ASM_BITOPS_H
19 19
20#include <asm/cpu-regs.h> 20#include <asm/cpu-regs.h>
21 21#include <asm/barrier.h>
22#define smp_mb__before_clear_bit() barrier()
23#define smp_mb__after_clear_bit() barrier()
24 22
25/* 23/*
26 * set bit 24 * set bit
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index 3e57faf04083..e5d0ef722bfa 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -78,9 +78,9 @@ void smp_flush_tlb(void *unused)
78 else 78 else
79 local_flush_tlb_page(flush_mm, flush_va); 79 local_flush_tlb_page(flush_mm, flush_va);
80 80
81 smp_mb__before_clear_bit(); 81 smp_mb__before_atomic();
82 cpumask_clear_cpu(cpu_id, &flush_cpumask); 82 cpumask_clear_cpu(cpu_id, &flush_cpumask);
83 smp_mb__after_clear_bit(); 83 smp_mb__after_atomic();
84out: 84out:
85 put_cpu(); 85 put_cpu();
86} 86}
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
index 2c64f2228dc7..3003cdad561b 100644
--- a/arch/openrisc/include/asm/bitops.h
+++ b/arch/openrisc/include/asm/bitops.h
@@ -27,14 +27,7 @@
27 27
28#include <linux/irqflags.h> 28#include <linux/irqflags.h>
29#include <linux/compiler.h> 29#include <linux/compiler.h>
30 30#include <asm/barrier.h>
31/*
32 * clear_bit may not imply a memory barrier
33 */
34#ifndef smp_mb__before_clear_bit
35#define smp_mb__before_clear_bit() smp_mb()
36#define smp_mb__after_clear_bit() smp_mb()
37#endif
38 31
39#include <asm/bitops/__ffs.h> 32#include <asm/bitops/__ffs.h>
40#include <asm-generic/bitops/ffz.h> 33#include <asm-generic/bitops/ffz.h>
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 472886ceab1d..0be2db2c7d44 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -7,6 +7,7 @@
7 7
8#include <linux/types.h> 8#include <linux/types.h>
9#include <asm/cmpxchg.h> 9#include <asm/cmpxchg.h>
10#include <asm/barrier.h>
10 11
11/* 12/*
12 * Atomic operations that C can't guarantee us. Useful for 13 * Atomic operations that C can't guarantee us. Useful for
@@ -143,11 +144,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
143 144
144#define ATOMIC_INIT(i) { (i) } 145#define ATOMIC_INIT(i) { (i) }
145 146
146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb()
148#define smp_mb__before_atomic_inc() smp_mb()
149#define smp_mb__after_atomic_inc() smp_mb()
150
151#ifdef CONFIG_64BIT 147#ifdef CONFIG_64BIT
152 148
153#define ATOMIC64_INIT(i) { (i) } 149#define ATOMIC64_INIT(i) { (i) }
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 8c9b631d2a78..3f9406d9b9d6 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -8,6 +8,7 @@
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ 9#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/barrier.h>
11#include <linux/atomic.h> 12#include <linux/atomic.h>
12 13
13/* 14/*
@@ -19,9 +20,6 @@
19#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) 20#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
20 21
21 22
22#define smp_mb__before_clear_bit() smp_mb()
23#define smp_mb__after_clear_bit() smp_mb()
24
25/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion 23/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
26 * on use of volatile and __*_bit() (set/clear/change): 24 * on use of volatile and __*_bit() (set/clear/change):
27 * *_bit() want use of volatile. 25 * *_bit() want use of volatile.
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index e3b1d41c89be..28992d012926 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -8,6 +8,7 @@
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/cmpxchg.h> 10#include <asm/cmpxchg.h>
11#include <asm/barrier.h>
11 12
12#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
13 14
@@ -270,11 +271,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
270} 271}
271#define atomic_dec_if_positive atomic_dec_if_positive 272#define atomic_dec_if_positive atomic_dec_if_positive
272 273
273#define smp_mb__before_atomic_dec() smp_mb()
274#define smp_mb__after_atomic_dec() smp_mb()
275#define smp_mb__before_atomic_inc() smp_mb()
276#define smp_mb__after_atomic_inc() smp_mb()
277
278#ifdef __powerpc64__ 274#ifdef __powerpc64__
279 275
280#define ATOMIC64_INIT(i) { (i) } 276#define ATOMIC64_INIT(i) { (i) }
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index f89da808ce31..bab79a110c7b 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -84,4 +84,7 @@ do { \
84 ___p1; \ 84 ___p1; \
85}) 85})
86 86
87#define smp_mb__before_atomic() smp_mb()
88#define smp_mb__after_atomic() smp_mb()
89
87#endif /* _ASM_POWERPC_BARRIER_H */ 90#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index a5e9a7d494d8..bd3bd573d0ae 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,11 +51,7 @@
51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) 52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
53 53
54/* 54#include <asm/barrier.h>
55 * clear_bit doesn't imply a memory barrier
56 */
57#define smp_mb__before_clear_bit() smp_mb()
58#define smp_mb__after_clear_bit() smp_mb()
59 55
60/* Macro for generating the ***_bits() functions */ 56/* Macro for generating the ***_bits() functions */
61#define DEFINE_BITOP(fn, op, prefix) \ 57#define DEFINE_BITOP(fn, op, prefix) \
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 18d7c80ddeb9..51dbace3269b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -81,7 +81,7 @@ void crash_ipi_callback(struct pt_regs *regs)
81 } 81 }
82 82
83 atomic_inc(&cpus_in_crash); 83 atomic_inc(&cpus_in_crash);
84 smp_mb__after_atomic_inc(); 84 smp_mb__after_atomic();
85 85
86 /* 86 /*
87 * Starting the kdump boot. 87 * Starting the kdump boot.
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 1d4706114a45..fa934fe080c1 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -412,9 +412,4 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
412#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) 412#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
413#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 413#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
414 414
415#define smp_mb__before_atomic_dec() smp_mb()
416#define smp_mb__after_atomic_dec() smp_mb()
417#define smp_mb__before_atomic_inc() smp_mb()
418#define smp_mb__after_atomic_inc() smp_mb()
419
420#endif /* __ARCH_S390_ATOMIC__ */ 415#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 578680f6207a..19ff956b752b 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -27,8 +27,9 @@
27#define smp_rmb() rmb() 27#define smp_rmb() rmb()
28#define smp_wmb() wmb() 28#define smp_wmb() wmb()
29#define smp_read_barrier_depends() read_barrier_depends() 29#define smp_read_barrier_depends() read_barrier_depends()
30#define smp_mb__before_clear_bit() smp_mb() 30
31#define smp_mb__after_clear_bit() smp_mb() 31#define smp_mb__before_atomic() smp_mb()
32#define smp_mb__after_atomic() smp_mb()
32 33
33#define set_mb(var, value) do { var = value; mb(); } while (0) 34#define set_mb(var, value) do { var = value; mb(); } while (0)
34 35
diff --git a/arch/score/include/asm/bitops.h b/arch/score/include/asm/bitops.h
index a304096b1894..c1bf8d6d0fb0 100644
--- a/arch/score/include/asm/bitops.h
+++ b/arch/score/include/asm/bitops.h
@@ -2,12 +2,7 @@
2#define _ASM_SCORE_BITOPS_H 2#define _ASM_SCORE_BITOPS_H
3 3
4#include <asm/byteorder.h> /* swab32 */ 4#include <asm/byteorder.h> /* swab32 */
5 5#include <asm/barrier.h>
6/*
7 * clear_bit() doesn't provide any barrier for the compiler.
8 */
9#define smp_mb__before_clear_bit() barrier()
10#define smp_mb__after_clear_bit() barrier()
11 6
12#include <asm-generic/bitops.h> 7#include <asm-generic/bitops.h>
13#include <asm-generic/bitops/__fls.h> 8#include <asm-generic/bitops/__fls.h>
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index f4c1c20bcdf6..f57b8a6743b3 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/cmpxchg.h> 12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
13 14
14#define ATOMIC_INIT(i) { (i) } 15#define ATOMIC_INIT(i) { (i) }
15 16
@@ -62,9 +63,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
62 return c; 63 return c;
63} 64}
64 65
65#define smp_mb__before_atomic_dec() smp_mb()
66#define smp_mb__after_atomic_dec() smp_mb()
67#define smp_mb__before_atomic_inc() smp_mb()
68#define smp_mb__after_atomic_inc() smp_mb()
69
70#endif /* __ASM_SH_ATOMIC_H */ 66#endif /* __ASM_SH_ATOMIC_H */
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index ea8706d94f08..fc8e652cf173 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -9,6 +9,7 @@
9 9
10/* For __swab32 */ 10/* For __swab32 */
11#include <asm/byteorder.h> 11#include <asm/byteorder.h>
12#include <asm/barrier.h>
12 13
13#ifdef CONFIG_GUSA_RB 14#ifdef CONFIG_GUSA_RB
14#include <asm/bitops-grb.h> 15#include <asm/bitops-grb.h>
@@ -22,12 +23,6 @@
22#include <asm-generic/bitops/non-atomic.h> 23#include <asm-generic/bitops/non-atomic.h>
23#endif 24#endif
24 25
25/*
26 * clear_bit() doesn't provide any barrier for the compiler.
27 */
28#define smp_mb__before_clear_bit() smp_mb()
29#define smp_mb__after_clear_bit() smp_mb()
30
31#ifdef CONFIG_SUPERH32 26#ifdef CONFIG_SUPERH32
32static inline unsigned long ffz(unsigned long word) 27static inline unsigned long ffz(unsigned long word)
33{ 28{
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 905832aa9e9e..f08fe51b264d 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15 15
16#include <asm/cmpxchg.h> 16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
17#include <asm-generic/atomic64.h> 18#include <asm-generic/atomic64.h>
18 19
19 20
@@ -52,10 +53,4 @@ extern void atomic_set(atomic_t *, int);
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 53#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 54#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54 55
55/* Atomic operations are already serializing */
56#define smp_mb__before_atomic_dec() barrier()
57#define smp_mb__after_atomic_dec() barrier()
58#define smp_mb__before_atomic_inc() barrier()
59#define smp_mb__after_atomic_inc() barrier()
60
61#endif /* !(__ARCH_SPARC_ATOMIC__) */ 56#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index be56a244c9cf..8b2f1bde2889 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/cmpxchg.h> 11#include <asm/cmpxchg.h>
12#include <asm/barrier.h>
12 13
13#define ATOMIC_INIT(i) { (i) } 14#define ATOMIC_INIT(i) { (i) }
14#define ATOMIC64_INIT(i) { (i) } 15#define ATOMIC64_INIT(i) { (i) }
@@ -108,10 +109,4 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
108 109
109extern long atomic64_dec_if_positive(atomic64_t *v); 110extern long atomic64_dec_if_positive(atomic64_t *v);
110 111
111/* Atomic operations are already serializing */
112#define smp_mb__before_atomic_dec() barrier()
113#define smp_mb__after_atomic_dec() barrier()
114#define smp_mb__before_atomic_inc() barrier()
115#define smp_mb__after_atomic_inc() barrier()
116
117#endif /* !(__ARCH_SPARC64_ATOMIC__) */ 112#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index b5aad964558e..305dcc3dc721 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -68,4 +68,7 @@ do { \
68 ___p1; \ 68 ___p1; \
69}) 69})
70 70
71#define smp_mb__before_atomic() barrier()
72#define smp_mb__after_atomic() barrier()
73
71#endif /* !(__SPARC64_BARRIER_H) */ 74#endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 25a676653d45..88c9a962502c 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -90,9 +90,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
90 90
91#include <asm-generic/bitops/non-atomic.h> 91#include <asm-generic/bitops/non-atomic.h>
92 92
93#define smp_mb__before_clear_bit() do { } while(0)
94#define smp_mb__after_clear_bit() do { } while(0)
95
96#include <asm-generic/bitops/ffz.h> 93#include <asm-generic/bitops/ffz.h>
97#include <asm-generic/bitops/__ffs.h> 94#include <asm-generic/bitops/__ffs.h>
98#include <asm-generic/bitops/sched.h> 95#include <asm-generic/bitops/sched.h>
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index 29011cc0e4be..f1a051ca301a 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/barrier.h>
16 17
17extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr); 18extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
18extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); 19extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
@@ -23,9 +24,6 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
23 24
24#include <asm-generic/bitops/non-atomic.h> 25#include <asm-generic/bitops/non-atomic.h>
25 26
26#define smp_mb__before_clear_bit() barrier()
27#define smp_mb__after_clear_bit() barrier()
28
29#include <asm-generic/bitops/fls.h> 27#include <asm-generic/bitops/fls.h>
30#include <asm-generic/bitops/__fls.h> 28#include <asm-generic/bitops/__fls.h>
31#include <asm-generic/bitops/fls64.h> 29#include <asm-generic/bitops/fls64.h>
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1ad4a1f7d42b..1b109fad9fff 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -169,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, long long n)
169#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 169#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
170#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) 170#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
171 171
172/*
173 * We need to barrier before modifying the word, since the _atomic_xxx()
174 * routines just tns the lock and then read/modify/write of the word.
175 * But after the word is updated, the routine issues an "mf" before returning,
176 * and since it's a function call, we don't even need a compiler barrier.
177 */
178#define smp_mb__before_atomic_dec() smp_mb()
179#define smp_mb__before_atomic_inc() smp_mb()
180#define smp_mb__after_atomic_dec() do { } while (0)
181#define smp_mb__after_atomic_inc() do { } while (0)
182 172
183#endif /* !__ASSEMBLY__ */ 173#endif /* !__ASSEMBLY__ */
184 174
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index ad220eed05fc..7b11c5fadd42 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
105 105
106#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 106#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
107 107
108/* Atomic dec and inc don't implement barrier, so provide them if needed. */
109#define smp_mb__before_atomic_dec() smp_mb()
110#define smp_mb__after_atomic_dec() smp_mb()
111#define smp_mb__before_atomic_inc() smp_mb()
112#define smp_mb__after_atomic_inc() smp_mb()
113
114/* Define this to indicate that cmpxchg is an efficient operation. */ 108/* Define this to indicate that cmpxchg is an efficient operation. */
115#define __HAVE_ARCH_CMPXCHG 109#define __HAVE_ARCH_CMPXCHG
116 110
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index b5a05d050a8f..96a42ae79f4d 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -72,6 +72,20 @@ mb_incoherent(void)
72#define mb() fast_mb() 72#define mb() fast_mb()
73#define iob() fast_iob() 73#define iob() fast_iob()
74 74
75#ifndef __tilegx__ /* 32 bit */
76/*
77 * We need to barrier before modifying the word, since the _atomic_xxx()
78 * routines just tns the lock and then read/modify/write of the word.
79 * But after the word is updated, the routine issues an "mf" before returning,
80 * and since it's a function call, we don't even need a compiler barrier.
81 */
82#define smp_mb__before_atomic() smp_mb()
83#define smp_mb__after_atomic() do { } while (0)
84#else /* 64 bit */
85#define smp_mb__before_atomic() smp_mb()
86#define smp_mb__after_atomic() smp_mb()
87#endif
88
75#include <asm-generic/barrier.h> 89#include <asm-generic/barrier.h>
76 90
77#endif /* !__ASSEMBLY__ */ 91#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index d5a206865036..20caa346ac06 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -17,6 +17,7 @@
17#define _ASM_TILE_BITOPS_H 17#define _ASM_TILE_BITOPS_H
18 18
19#include <linux/types.h> 19#include <linux/types.h>
20#include <asm/barrier.h>
20 21
21#ifndef _LINUX_BITOPS_H 22#ifndef _LINUX_BITOPS_H
22#error only <linux/bitops.h> can be included directly 23#error only <linux/bitops.h> can be included directly
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index 386865ad2f55..bbf7b666f21d 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
49 * restricted to acting on a single-word quantity. 49 * restricted to acting on a single-word quantity.
50 * 50 *
51 * clear_bit() may not contain a memory barrier, so if it is used for 51 * clear_bit() may not contain a memory barrier, so if it is used for
52 * locking purposes, you should call smp_mb__before_clear_bit() and/or 52 * locking purposes, you should call smp_mb__before_atomic() and/or
53 * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. 53 * smp_mb__after_atomic() to ensure changes are visible on other cpus.
54 */ 54 */
55static inline void clear_bit(unsigned nr, volatile unsigned long *addr) 55static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
56{ 56{
@@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr,
121 return (_atomic_xor(addr, mask) & mask) != 0; 121 return (_atomic_xor(addr, mask) & mask) != 0;
122} 122}
123 123
124/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
125#define smp_mb__before_clear_bit() smp_mb()
126#define smp_mb__after_clear_bit() do {} while (0)
127
128#include <asm-generic/bitops/ext2-atomic.h> 124#include <asm-generic/bitops/ext2-atomic.h>
129 125
130#endif /* _ASM_TILE_BITOPS_32_H */ 126#endif /* _ASM_TILE_BITOPS_32_H */
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index ad34cd056085..bb1a29221fcd 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
32 __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask); 32 __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
33} 33}
34 34
35#define smp_mb__before_clear_bit() smp_mb()
36#define smp_mb__after_clear_bit() smp_mb()
37
38
39static inline void change_bit(unsigned nr, volatile unsigned long *addr) 35static inline void change_bit(unsigned nr, volatile unsigned long *addr)
40{ 36{
41 unsigned long mask = (1UL << (nr % BITS_PER_LONG)); 37 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b17f4f48ecd7..6dd1c7dd0473 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -7,6 +7,7 @@
7#include <asm/alternative.h> 7#include <asm/alternative.h>
8#include <asm/cmpxchg.h> 8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h> 9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
10 11
11/* 12/*
12 * Atomic operations that C can't guarantee us. Useful for 13 * Atomic operations that C can't guarantee us. Useful for
@@ -243,12 +244,6 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
243 : : "r" ((unsigned)(mask)), "m" (*(addr)) \ 244 : : "r" ((unsigned)(mask)), "m" (*(addr)) \
244 : "memory") 245 : "memory")
245 246
246/* Atomic operations are already serializing on x86 */
247#define smp_mb__before_atomic_dec() barrier()
248#define smp_mb__after_atomic_dec() barrier()
249#define smp_mb__before_atomic_inc() barrier()
250#define smp_mb__after_atomic_inc() barrier()
251
252#ifdef CONFIG_X86_32 247#ifdef CONFIG_X86_32
253# include <asm/atomic64_32.h> 248# include <asm/atomic64_32.h>
254#else 249#else
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 69bbb4845020..5c7198cca5ed 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -137,6 +137,10 @@ do { \
137 137
138#endif 138#endif
139 139
140/* Atomic operations are already serializing on x86 */
141#define smp_mb__before_atomic() barrier()
142#define smp_mb__after_atomic() barrier()
143
140/* 144/*
141 * Stop RDTSC speculation. This is needed when you need to use RDTSC 145 * Stop RDTSC speculation. This is needed when you need to use RDTSC
142 * (or get_cycles or vread that possibly accesses the TSC) in a defined 146 * (or get_cycles or vread that possibly accesses the TSC) in a defined
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 9fc1af74dc83..afcd35d331de 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/alternative.h> 16#include <asm/alternative.h>
17#include <asm/rmwcc.h> 17#include <asm/rmwcc.h>
18#include <asm/barrier.h>
18 19
19#if BITS_PER_LONG == 32 20#if BITS_PER_LONG == 32
20# define _BITOPS_LONG_SHIFT 5 21# define _BITOPS_LONG_SHIFT 5
@@ -102,7 +103,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
102 * 103 *
103 * clear_bit() is atomic and may not be reordered. However, it does 104 * clear_bit() is atomic and may not be reordered. However, it does
104 * not contain a memory barrier, so if it is used for locking purposes, 105 * not contain a memory barrier, so if it is used for locking purposes,
105 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
106 * in order to ensure changes are visible on other processors. 107 * in order to ensure changes are visible on other processors.
107 */ 108 */
108static __always_inline void 109static __always_inline void
@@ -156,9 +157,6 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
156 __clear_bit(nr, addr); 157 __clear_bit(nr, addr);
157} 158}
158 159
159#define smp_mb__before_clear_bit() barrier()
160#define smp_mb__after_clear_bit() barrier()
161
162/** 160/**
163 * __change_bit - Toggle a bit in memory 161 * __change_bit - Toggle a bit in memory
164 * @nr: the bit to change 162 * @nr: the bit to change
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 05af3b31d522..f28a24b51dc7 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -41,7 +41,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
41 * 41 *
42 * sync_clear_bit() is atomic and may not be reordered. However, it does 42 * sync_clear_bit() is atomic and may not be reordered. However, it does
43 * not contain a memory barrier, so if it is used for locking purposes, 43 * not contain a memory barrier, so if it is used for locking purposes,
44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 44 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
45 * in order to ensure changes are visible on other processors. 45 * in order to ensure changes are visible on other processors.
46 */ 46 */
47static inline void sync_clear_bit(long nr, volatile unsigned long *addr) 47static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index a698d7165c96..eab67047dec3 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -57,7 +57,7 @@ void arch_trigger_all_cpu_backtrace(void)
57 } 57 }
58 58
59 clear_bit(0, &backtrace_flag); 59 clear_bit(0, &backtrace_flag);
60 smp_mb__after_clear_bit(); 60 smp_mb__after_atomic();
61} 61}
62 62
63static int __kprobes 63static int __kprobes
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e7fb447bce8e..e5103b47a8ce 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -19,6 +19,7 @@
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/cmpxchg.h> 21#include <asm/cmpxchg.h>
22#include <asm/barrier.h>
22 23
23#define ATOMIC_INIT(i) { (i) } 24#define ATOMIC_INIT(i) { (i) }
24 25
@@ -387,12 +388,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
387#endif 388#endif
388} 389}
389 390
390/* Atomic operations are already serializing */
391#define smp_mb__before_atomic_dec() barrier()
392#define smp_mb__after_atomic_dec() barrier()
393#define smp_mb__before_atomic_inc() barrier()
394#define smp_mb__after_atomic_inc() barrier()
395
396#endif /* __KERNEL__ */ 391#endif /* __KERNEL__ */
397 392
398#endif /* _XTENSA_ATOMIC_H */ 393#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index 0a24b04d6b21..5b88774c75ab 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -13,6 +13,9 @@
13#define rmb() barrier() 13#define rmb() barrier()
14#define wmb() mb() 14#define wmb() mb()
15 15
16#define smp_mb__before_atomic() barrier()
17#define smp_mb__after_atomic() barrier()
18
16#include <asm-generic/barrier.h> 19#include <asm-generic/barrier.h>
17 20
18#endif /* _XTENSA_SYSTEM_H */ 21#endif /* _XTENSA_SYSTEM_H */
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 7b6873ae84c2..3f44fa2a53e9 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -21,9 +21,7 @@
21 21
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/byteorder.h> 23#include <asm/byteorder.h>
24 24#include <asm/barrier.h>
25#define smp_mb__before_clear_bit() smp_mb()
26#define smp_mb__after_clear_bit() smp_mb()
27 25
28#include <asm-generic/bitops/non-atomic.h> 26#include <asm-generic/bitops/non-atomic.h>
29 27