aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /arch/ia64/include
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/atomic.h7
-rw-r--r--arch/ia64/include/asm/barrier.h3
-rw-r--r--arch/ia64/include/asm/bitops.h9
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h9
4 files changed, 15 insertions, 13 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 6e6fe1839f5d..0f8bf48dadf3 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/barrier.h>
18 19
19 20
20#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
208#define atomic64_inc(v) atomic64_add(1, (v)) 209#define atomic64_inc(v) atomic64_add(1, (v))
209#define atomic64_dec(v) atomic64_sub(1, (v)) 210#define atomic64_dec(v) atomic64_sub(1, (v))
210 211
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec() barrier()
213#define smp_mb__after_atomic_dec() barrier()
214#define smp_mb__before_atomic_inc() barrier()
215#define smp_mb__after_atomic_inc() barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */ 212#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index d0a69aa35e27..a48957c7b445 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -55,6 +55,9 @@
55 55
56#endif 56#endif
57 57
58#define smp_mb__before_atomic() barrier()
59#define smp_mb__after_atomic() barrier()
60
58/* 61/*
59 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no 62 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
60 * need for asm trickery! 63 * need for asm trickery!
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index c27eccd33349..71e8145243ee 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/intrinsics.h> 18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
19 20
20/** 21/**
21 * set_bit - Atomically set a bit in memory 22 * set_bit - Atomically set a bit in memory
@@ -65,12 +66,6 @@ __set_bit (int nr, volatile void *addr)
65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); 66 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
66} 67}
67 68
68/*
69 * clear_bit() has "acquire" semantics.
70 */
71#define smp_mb__before_clear_bit() smp_mb()
72#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
73
74/** 69/**
75 * clear_bit - Clears a bit in memory 70 * clear_bit - Clears a bit in memory
76 * @nr: Bit to clear 71 * @nr: Bit to clear
@@ -78,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
78 * 73 *
79 * clear_bit() is atomic and may not be reordered. However, it does 74 * clear_bit() is atomic and may not be reordered. However, it does
80 * not contain a memory barrier, so if it is used for locking purposes, 75 * not contain a memory barrier, so if it is used for locking purposes,
81 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 76 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
82 * in order to ensure changes are visible on other processors. 77 * in order to ensure changes are visible on other processors.
83 */ 78 */
84static __inline__ void 79static __inline__ void
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 4f37dbbb8640..f35109b1d907 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
118#define cmpxchg_rel(ptr, o, n) \ 118#define cmpxchg_rel(ptr, o, n) \
119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) 119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
120 120
121/*
122 * Worse still - early processor implementations actually just ignored
123 * the acquire/release and did a full fence all the time. Unfortunately
124 * this meant a lot of badly written code that used .acq when they really
125 * wanted .rel became legacy out in the wild - so when we made a cpu
126 * that strictly did the .acq or .rel ... all that code started breaking - so
127 * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
128 */
129
121/* for compatibility with other platforms: */ 130/* for compatibility with other platforms: */
122#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) 131#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
123#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) 132#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))