aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-13 14:00:36 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-18 08:20:35 -0400
commit0cd64efb61f1e68be26bd5121ccff3c779dc488b (patch)
tree4f5b7c1e0fd57f9b7f1652003264a996bb79429e /arch/ia64
parent94cf42f823bc904305b0ee93a09bcd51ba380497 (diff)
arch,ia64: Convert smp_mb__*()
ia64 atomic ops are full barriers; implement the new smp_mb__{before,after}_atomic(). Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-hyp7yj68cmqz1nqbfpr541ca@git.kernel.org Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/atomic.h7
-rw-r--r--arch/ia64/include/asm/barrier.h3
-rw-r--r--arch/ia64/include/asm/bitops.h6
3 files changed, 6 insertions, 10 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 6e6fe1839f5d..0f8bf48dadf3 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/barrier.h>
18 19
19 20
20#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
208#define atomic64_inc(v) atomic64_add(1, (v)) 209#define atomic64_inc(v) atomic64_add(1, (v))
209#define atomic64_dec(v) atomic64_sub(1, (v)) 210#define atomic64_dec(v) atomic64_sub(1, (v))
210 211
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec() barrier()
213#define smp_mb__after_atomic_dec() barrier()
214#define smp_mb__before_atomic_inc() barrier()
215#define smp_mb__after_atomic_inc() barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */ 212#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index d0a69aa35e27..a48957c7b445 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -55,6 +55,9 @@
55 55
56#endif 56#endif
57 57
58#define smp_mb__before_atomic() barrier()
59#define smp_mb__after_atomic() barrier()
60
58/* 61/*
59 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no 62 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
60 * need for asm trickery! 63 * need for asm trickery!
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index feb8117ed06a..71e8145243ee 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/intrinsics.h> 18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
19 20
20/** 21/**
21 * set_bit - Atomically set a bit in memory 22 * set_bit - Atomically set a bit in memory
@@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr)
65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); 66 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
66} 67}
67 68
68#define smp_mb__before_clear_bit() barrier();
69#define smp_mb__after_clear_bit() barrier();
70
71/** 69/**
72 * clear_bit - Clears a bit in memory 70 * clear_bit - Clears a bit in memory
73 * @nr: Bit to clear 71 * @nr: Bit to clear
@@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
75 * 73 *
76 * clear_bit() is atomic and may not be reordered. However, it does 74 * clear_bit() is atomic and may not be reordered. However, it does
77 * not contain a memory barrier, so if it is used for locking purposes, 75 * not contain a memory barrier, so if it is used for locking purposes,
78 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
79 * in order to ensure changes are visible on other processors. 77 * in order to ensure changes are visible on other processors.
80 */ 78 */
81static __inline__ void 79static __inline__ void