aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/alpha/include
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/atomic.h42
1 files changed, 27 insertions, 15 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 8f8eafbedd7c..e8c956098424 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -29,13 +29,13 @@
29 * branch back to restart the operation. 29 * branch back to restart the operation.
30 */ 30 */
31 31
32#define ATOMIC_OP(op) \ 32#define ATOMIC_OP(op, asm_op) \
33static __inline__ void atomic_##op(int i, atomic_t * v) \ 33static __inline__ void atomic_##op(int i, atomic_t * v) \
34{ \ 34{ \
35 unsigned long temp; \ 35 unsigned long temp; \
36 __asm__ __volatile__( \ 36 __asm__ __volatile__( \
37 "1: ldl_l %0,%1\n" \ 37 "1: ldl_l %0,%1\n" \
38 " " #op "l %0,%2,%0\n" \ 38 " " #asm_op " %0,%2,%0\n" \
39 " stl_c %0,%1\n" \ 39 " stl_c %0,%1\n" \
40 " beq %0,2f\n" \ 40 " beq %0,2f\n" \
41 ".subsection 2\n" \ 41 ".subsection 2\n" \
@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
45 :"Ir" (i), "m" (v->counter)); \ 45 :"Ir" (i), "m" (v->counter)); \
46} \ 46} \
47 47
48#define ATOMIC_OP_RETURN(op) \ 48#define ATOMIC_OP_RETURN(op, asm_op) \
49static inline int atomic_##op##_return(int i, atomic_t *v) \ 49static inline int atomic_##op##_return(int i, atomic_t *v) \
50{ \ 50{ \
51 long temp, result; \ 51 long temp, result; \
52 smp_mb(); \ 52 smp_mb(); \
53 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
54 "1: ldl_l %0,%1\n" \ 54 "1: ldl_l %0,%1\n" \
55 " " #op "l %0,%3,%2\n" \ 55 " " #asm_op " %0,%3,%2\n" \
56 " " #op "l %0,%3,%0\n" \ 56 " " #asm_op " %0,%3,%0\n" \
57 " stl_c %0,%1\n" \ 57 " stl_c %0,%1\n" \
58 " beq %0,2f\n" \ 58 " beq %0,2f\n" \
59 ".subsection 2\n" \ 59 ".subsection 2\n" \
@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
65 return result; \ 65 return result; \
66} 66}
67 67
68#define ATOMIC64_OP(op) \ 68#define ATOMIC64_OP(op, asm_op) \
69static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 69static __inline__ void atomic64_##op(long i, atomic64_t * v) \
70{ \ 70{ \
71 unsigned long temp; \ 71 unsigned long temp; \
72 __asm__ __volatile__( \ 72 __asm__ __volatile__( \
73 "1: ldq_l %0,%1\n" \ 73 "1: ldq_l %0,%1\n" \
74 " " #op "q %0,%2,%0\n" \ 74 " " #asm_op " %0,%2,%0\n" \
75 " stq_c %0,%1\n" \ 75 " stq_c %0,%1\n" \
76 " beq %0,2f\n" \ 76 " beq %0,2f\n" \
77 ".subsection 2\n" \ 77 ".subsection 2\n" \
@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
81 :"Ir" (i), "m" (v->counter)); \ 81 :"Ir" (i), "m" (v->counter)); \
82} \ 82} \
83 83
84#define ATOMIC64_OP_RETURN(op) \ 84#define ATOMIC64_OP_RETURN(op, asm_op) \
85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
86{ \ 86{ \
87 long temp, result; \ 87 long temp, result; \
88 smp_mb(); \ 88 smp_mb(); \
89 __asm__ __volatile__( \ 89 __asm__ __volatile__( \
90 "1: ldq_l %0,%1\n" \ 90 "1: ldq_l %0,%1\n" \
91 " " #op "q %0,%3,%2\n" \ 91 " " #asm_op " %0,%3,%2\n" \
92 " " #op "q %0,%3,%0\n" \ 92 " " #asm_op " %0,%3,%0\n" \
93 " stq_c %0,%1\n" \ 93 " stq_c %0,%1\n" \
94 " beq %0,2f\n" \ 94 " beq %0,2f\n" \
95 ".subsection 2\n" \ 95 ".subsection 2\n" \
@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
101 return result; \ 101 return result; \
102} 102}
103 103
104#define ATOMIC_OPS(opg) \ 104#define ATOMIC_OPS(op) \
105 ATOMIC_OP(opg) \ 105 ATOMIC_OP(op, op##l) \
106 ATOMIC_OP_RETURN(opg) \ 106 ATOMIC_OP_RETURN(op, op##l) \
107 ATOMIC64_OP(opg) \ 107 ATOMIC64_OP(op, op##q) \
108 ATOMIC64_OP_RETURN(opg) 108 ATOMIC64_OP_RETURN(op, op##q)
109 109
110ATOMIC_OPS(add) 110ATOMIC_OPS(add)
111ATOMIC_OPS(sub) 111ATOMIC_OPS(sub)
112 112
113#define atomic_andnot atomic_andnot
114#define atomic64_andnot atomic64_andnot
115
116ATOMIC_OP(and, and)
117ATOMIC_OP(andnot, bic)
118ATOMIC_OP(or, bis)
119ATOMIC_OP(xor, xor)
120ATOMIC64_OP(and, and)
121ATOMIC64_OP(andnot, bic)
122ATOMIC64_OP(or, bis)
123ATOMIC64_OP(xor, xor)
124
113#undef ATOMIC_OPS 125#undef ATOMIC_OPS
114#undef ATOMIC64_OP_RETURN 126#undef ATOMIC64_OP_RETURN
115#undef ATOMIC64_OP 127#undef ATOMIC64_OP