aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2015-12-27 08:04:42 -0500
committerMichael S. Tsirkin <mst@redhat.com>2016-01-12 13:46:55 -0500
commiteebd1b927822f13429ec09d0a48fe92716b22840 (patch)
treee2a2454ce69bd21eb3077e73ad853f5548b68e4a /arch/ia64
parent27f6cabc0ebf9e452c3251bf0511c41cf2c75dde (diff)
ia64: define __smp_xxx
This defines __smp_xxx barriers for ia64, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Tony Luck <tony.luck@intel.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/barrier.h14
1 files changed, 5 insertions, 9 deletions
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 2f933480a764..588f1614cafc 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -42,28 +42,24 @@
42#define dma_rmb() mb() 42#define dma_rmb() mb()
43#define dma_wmb() mb() 43#define dma_wmb() mb()
44 44
45#ifdef CONFIG_SMP 45# define __smp_mb() mb()
46# define smp_mb() mb()
47#else
48# define smp_mb() barrier()
49#endif
50 46
51#define smp_mb__before_atomic() barrier() 47#define __smp_mb__before_atomic() barrier()
52#define smp_mb__after_atomic() barrier() 48#define __smp_mb__after_atomic() barrier()
53 49
54/* 50/*
55 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no 51 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
56 * need for asm trickery! 52 * need for asm trickery!
57 */ 53 */
58 54
59#define smp_store_release(p, v) \ 55#define __smp_store_release(p, v) \
60do { \ 56do { \
61 compiletime_assert_atomic_type(*p); \ 57 compiletime_assert_atomic_type(*p); \
62 barrier(); \ 58 barrier(); \
63 WRITE_ONCE(*p, v); \ 59 WRITE_ONCE(*p, v); \
64} while (0) 60} while (0)
65 61
66#define smp_load_acquire(p) \ 62#define __smp_load_acquire(p) \
67({ \ 63({ \
68 typeof(*p) ___p1 = READ_ONCE(*p); \ 64 typeof(*p) ___p1 = READ_ONCE(*p); \
69 compiletime_assert_atomic_type(*p); \ 65 compiletime_assert_atomic_type(*p); \