diff options
-rw-r--r-- | arch/m32r/include/asm/atomic.h | 7 | ||||
-rw-r--r-- | arch/m32r/include/asm/bitops.h | 6 |
2 files changed, 3 insertions, 10 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 0d81697c326c..8ad0ed4182a5 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/cmpxchg.h> | 14 | #include <asm/cmpxchg.h> |
15 | #include <asm/dcache_clear.h> | 15 | #include <asm/dcache_clear.h> |
16 | #include <asm/barrier.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Atomic operations that C can't guarantee us. Useful for | 19 | * Atomic operations that C can't guarantee us. Useful for |
@@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) | |||
308 | local_irq_restore(flags); | 309 | local_irq_restore(flags); |
309 | } | 310 | } |
310 | 311 | ||
311 | /* Atomic operations are already serializing on m32r */ | ||
312 | #define smp_mb__before_atomic_dec() barrier() | ||
313 | #define smp_mb__after_atomic_dec() barrier() | ||
314 | #define smp_mb__before_atomic_inc() barrier() | ||
315 | #define smp_mb__after_atomic_inc() barrier() | ||
316 | |||
317 | #endif /* _ASM_M32R_ATOMIC_H */ | 312 | #endif /* _ASM_M32R_ATOMIC_H */ |
diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h index d3dea9ac7d4e..86ba2b42a6cf 100644 --- a/arch/m32r/include/asm/bitops.h +++ b/arch/m32r/include/asm/bitops.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
22 | #include <asm/dcache_clear.h> | 22 | #include <asm/dcache_clear.h> |
23 | #include <asm/types.h> | 23 | #include <asm/types.h> |
24 | #include <asm/barrier.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * These have to be done with inline assembly: that way the bit-setting | 27 | * These have to be done with inline assembly: that way the bit-setting |
@@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
73 | * | 74 | * |
74 | * clear_bit() is atomic and may not be reordered. However, it does | 75 | * clear_bit() is atomic and may not be reordered. However, it does |
75 | * not contain a memory barrier, so if it is used for locking purposes, | 76 | * not contain a memory barrier, so if it is used for locking purposes, |
76 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 77 | * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
77 | * in order to ensure changes are visible on other processors. | 78 | * in order to ensure changes are visible on other processors. |
78 | */ | 79 | */ |
79 | static __inline__ void clear_bit(int nr, volatile void * addr) | 80 | static __inline__ void clear_bit(int nr, volatile void * addr) |
@@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) | |||
103 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
104 | } | 105 | } |
105 | 106 | ||
106 | #define smp_mb__before_clear_bit() barrier() | ||
107 | #define smp_mb__after_clear_bit() barrier() | ||
108 | |||
109 | /** | 107 | /** |
110 | * change_bit - Toggle a bit in memory | 108 | * change_bit - Toggle a bit in memory |
111 | * @nr: Bit to clear | 109 | * @nr: Bit to clear |