aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-02-04 14:36:01 -0500
committerIngo Molnar <mingo@kernel.org>2014-04-18 05:40:29 -0400
commite4f9bfb3feaeaca55cf177dadb7e3313836b10f4 (patch)
tree14a1a121064fbc30e13176a792f62f8898178081 /arch/ia64/include
parent81cef0fe19e086ff6abfd45e92246f68ffa0185f (diff)
ia64: Fix up smp_mb__{before,after}_clear_bit()
IA64 doesn't actually have acquire/release barriers, its a lie! Add a comment explaining this and fix up the bitop barriers. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-akevfh136um9dqvb1ohm55ca@git.kernel.org Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-ia64@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/bitops.h7
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h9
2 files changed, 11 insertions, 5 deletions
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index c27eccd33349..feb8117ed06a 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -65,11 +65,8 @@ __set_bit (int nr, volatile void *addr)
65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); 65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
66} 66}
67 67
68/* 68#define smp_mb__before_clear_bit() barrier();
69 * clear_bit() has "acquire" semantics. 69#define smp_mb__after_clear_bit() barrier();
70 */
71#define smp_mb__before_clear_bit() smp_mb()
72#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
73 70
74/** 71/**
75 * clear_bit - Clears a bit in memory 72 * clear_bit - Clears a bit in memory
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 4f37dbbb8640..f35109b1d907 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
118#define cmpxchg_rel(ptr, o, n) \ 118#define cmpxchg_rel(ptr, o, n) \
119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) 119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
120 120
121/*
122 * Worse still - early processor implementations actually just ignored
123 * the acquire/release and did a full fence all the time. Unfortunately
124 * this meant a lot of badly written code that used .acq when they really
125 * wanted .rel became legacy out in the wild - so when we made a cpu
126 * that strictly did the .acq or .rel ... all that code started breaking - so
127 * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
128 */
129
121/* for compatibility with other platforms: */ 130/* for compatibility with other platforms: */
122#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) 131#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
123#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) 132#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))