summaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 15:41:29 -0400
commitc86ad14d305d2429c3da19462440bac50c183def (patch)
treebd794cd72476661faf82c440063c217bb978ce44 /arch/alpha
parenta2303849a6b4b7ba59667091e00d6bb194071d9a (diff)
parentf06628638cf6e75f179742b6c1b35076965b9fdd (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The locking tree was busier in this cycle than the usual pattern - a couple of major projects happened to coincide. The main changes are: - implement the atomic_fetch_{add,sub,and,or,xor}() API natively across all SMP architectures (Peter Zijlstra) - add atomic_fetch_{inc/dec}() as well, using the generic primitives (Davidlohr Bueso) - optimize various aspects of rwsems (Jason Low, Davidlohr Bueso, Waiman Long) - optimize smp_cond_load_acquire() on arm64 and implement LSE based atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() on arm64 (Will Deacon) - introduce smp_acquire__after_ctrl_dep() and fix various barrier mis-uses and bugs (Peter Zijlstra) - after discovering ancient spin_unlock_wait() barrier bugs in its implementation and usage, strengthen its semantics and update/fix usage sites (Peter Zijlstra) - optimize mutex_trylock() fastpath (Peter Zijlstra) - ... misc fixes and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire() locking/static_keys: Fix non static symbol Sparse warning locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec() locking/atomic, arch/tile: Fix tilepro build locking/atomic, arch/m68k: Remove comment locking/atomic, arch/arc: Fix build locking/Documentation: Clarify limited control-dependency scope locking/atomic, arch/rwsem: Employ atomic_long_fetch_add() locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire() locking/atomic, arch/mips: Convert to _relaxed atomics locking/atomic, arch/alpha: Convert to _relaxed atomics locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions locking/atomic: Remove linux/atomic.h:atomic_fetch_or() locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() locking/atomic: Fix atomic64_relaxed() bits locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() ...
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/atomic.h87
-rw-r--r--arch/alpha/include/asm/rwsem.h68
-rw-r--r--arch/alpha/include/asm/spinlock.h9
3 files changed, 92 insertions, 72 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 572b228c44c7..498933a7df97 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -46,10 +46,9 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
46} \ 46} \
47 47
48#define ATOMIC_OP_RETURN(op, asm_op) \ 48#define ATOMIC_OP_RETURN(op, asm_op) \
49static inline int atomic_##op##_return(int i, atomic_t *v) \ 49static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
50{ \ 50{ \
51 long temp, result; \ 51 long temp, result; \
52 smp_mb(); \
53 __asm__ __volatile__( \ 52 __asm__ __volatile__( \
54 "1: ldl_l %0,%1\n" \ 53 "1: ldl_l %0,%1\n" \
55 " " #asm_op " %0,%3,%2\n" \ 54 " " #asm_op " %0,%3,%2\n" \
@@ -61,7 +60,23 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
61 ".previous" \ 60 ".previous" \
62 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 61 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
63 :"Ir" (i), "m" (v->counter) : "memory"); \ 62 :"Ir" (i), "m" (v->counter) : "memory"); \
64 smp_mb(); \ 63 return result; \
64}
65
66#define ATOMIC_FETCH_OP(op, asm_op) \
67static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
68{ \
69 long temp, result; \
70 __asm__ __volatile__( \
71 "1: ldl_l %2,%1\n" \
72 " " #asm_op " %2,%3,%0\n" \
73 " stl_c %0,%1\n" \
74 " beq %0,2f\n" \
75 ".subsection 2\n" \
76 "2: br 1b\n" \
77 ".previous" \
78 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
79 :"Ir" (i), "m" (v->counter) : "memory"); \
65 return result; \ 80 return result; \
66} 81}
67 82
@@ -82,10 +97,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
82} \ 97} \
83 98
84#define ATOMIC64_OP_RETURN(op, asm_op) \ 99#define ATOMIC64_OP_RETURN(op, asm_op) \
85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 100static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
86{ \ 101{ \
87 long temp, result; \ 102 long temp, result; \
88 smp_mb(); \
89 __asm__ __volatile__( \ 103 __asm__ __volatile__( \
90 "1: ldq_l %0,%1\n" \ 104 "1: ldq_l %0,%1\n" \
91 " " #asm_op " %0,%3,%2\n" \ 105 " " #asm_op " %0,%3,%2\n" \
@@ -97,34 +111,77 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
97 ".previous" \ 111 ".previous" \
98 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 112 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
99 :"Ir" (i), "m" (v->counter) : "memory"); \ 113 :"Ir" (i), "m" (v->counter) : "memory"); \
100 smp_mb(); \ 114 return result; \
115}
116
117#define ATOMIC64_FETCH_OP(op, asm_op) \
118static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
119{ \
120 long temp, result; \
121 __asm__ __volatile__( \
122 "1: ldq_l %2,%1\n" \
123 " " #asm_op " %2,%3,%0\n" \
124 " stq_c %0,%1\n" \
125 " beq %0,2f\n" \
126 ".subsection 2\n" \
127 "2: br 1b\n" \
128 ".previous" \
129 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
130 :"Ir" (i), "m" (v->counter) : "memory"); \
101 return result; \ 131 return result; \
102} 132}
103 133
104#define ATOMIC_OPS(op) \ 134#define ATOMIC_OPS(op) \
105 ATOMIC_OP(op, op##l) \ 135 ATOMIC_OP(op, op##l) \
106 ATOMIC_OP_RETURN(op, op##l) \ 136 ATOMIC_OP_RETURN(op, op##l) \
137 ATOMIC_FETCH_OP(op, op##l) \
107 ATOMIC64_OP(op, op##q) \ 138 ATOMIC64_OP(op, op##q) \
108 ATOMIC64_OP_RETURN(op, op##q) 139 ATOMIC64_OP_RETURN(op, op##q) \
140 ATOMIC64_FETCH_OP(op, op##q)
109 141
110ATOMIC_OPS(add) 142ATOMIC_OPS(add)
111ATOMIC_OPS(sub) 143ATOMIC_OPS(sub)
112 144
145#define atomic_add_return_relaxed atomic_add_return_relaxed
146#define atomic_sub_return_relaxed atomic_sub_return_relaxed
147#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
148#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
149
150#define atomic64_add_return_relaxed atomic64_add_return_relaxed
151#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
152#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
153#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
154
113#define atomic_andnot atomic_andnot 155#define atomic_andnot atomic_andnot
114#define atomic64_andnot atomic64_andnot 156#define atomic64_andnot atomic64_andnot
115 157
116ATOMIC_OP(and, and) 158#undef ATOMIC_OPS
117ATOMIC_OP(andnot, bic) 159#define ATOMIC_OPS(op, asm) \
118ATOMIC_OP(or, bis) 160 ATOMIC_OP(op, asm) \
119ATOMIC_OP(xor, xor) 161 ATOMIC_FETCH_OP(op, asm) \
120ATOMIC64_OP(and, and) 162 ATOMIC64_OP(op, asm) \
121ATOMIC64_OP(andnot, bic) 163 ATOMIC64_FETCH_OP(op, asm)
122ATOMIC64_OP(or, bis) 164
123ATOMIC64_OP(xor, xor) 165ATOMIC_OPS(and, and)
166ATOMIC_OPS(andnot, bic)
167ATOMIC_OPS(or, bis)
168ATOMIC_OPS(xor, xor)
169
170#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
171#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
172#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
173#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
174
175#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
176#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
177#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
178#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
124 179
125#undef ATOMIC_OPS 180#undef ATOMIC_OPS
181#undef ATOMIC64_FETCH_OP
126#undef ATOMIC64_OP_RETURN 182#undef ATOMIC64_OP_RETURN
127#undef ATOMIC64_OP 183#undef ATOMIC64_OP
184#undef ATOMIC_FETCH_OP
128#undef ATOMIC_OP_RETURN 185#undef ATOMIC_OP_RETURN
129#undef ATOMIC_OP 186#undef ATOMIC_OP
130 187
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 0131a7058778..77873d0ad293 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -25,8 +25,8 @@ static inline void __down_read(struct rw_semaphore *sem)
25{ 25{
26 long oldcount; 26 long oldcount;
27#ifndef CONFIG_SMP 27#ifndef CONFIG_SMP
28 oldcount = sem->count; 28 oldcount = sem->count.counter;
29 sem->count += RWSEM_ACTIVE_READ_BIAS; 29 sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
30#else 30#else
31 long temp; 31 long temp;
32 __asm__ __volatile__( 32 __asm__ __volatile__(
@@ -52,13 +52,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
52{ 52{
53 long old, new, res; 53 long old, new, res;
54 54
55 res = sem->count; 55 res = atomic_long_read(&sem->count);
56 do { 56 do {
57 new = res + RWSEM_ACTIVE_READ_BIAS; 57 new = res + RWSEM_ACTIVE_READ_BIAS;
58 if (new <= 0) 58 if (new <= 0)
59 break; 59 break;
60 old = res; 60 old = res;
61 res = cmpxchg(&sem->count, old, new); 61 res = atomic_long_cmpxchg(&sem->count, old, new);
62 } while (res != old); 62 } while (res != old);
63 return res >= 0 ? 1 : 0; 63 return res >= 0 ? 1 : 0;
64} 64}
@@ -67,8 +67,8 @@ static inline long ___down_write(struct rw_semaphore *sem)
67{ 67{
68 long oldcount; 68 long oldcount;
69#ifndef CONFIG_SMP 69#ifndef CONFIG_SMP
70 oldcount = sem->count; 70 oldcount = sem->count.counter;
71 sem->count += RWSEM_ACTIVE_WRITE_BIAS; 71 sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
72#else 72#else
73 long temp; 73 long temp;
74 __asm__ __volatile__( 74 __asm__ __volatile__(
@@ -106,7 +106,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
106 */ 106 */
107static inline int __down_write_trylock(struct rw_semaphore *sem) 107static inline int __down_write_trylock(struct rw_semaphore *sem)
108{ 108{
109 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 109 long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
110 RWSEM_ACTIVE_WRITE_BIAS); 110 RWSEM_ACTIVE_WRITE_BIAS);
111 if (ret == RWSEM_UNLOCKED_VALUE) 111 if (ret == RWSEM_UNLOCKED_VALUE)
112 return 1; 112 return 1;
@@ -117,8 +117,8 @@ static inline void __up_read(struct rw_semaphore *sem)
117{ 117{
118 long oldcount; 118 long oldcount;
119#ifndef CONFIG_SMP 119#ifndef CONFIG_SMP
120 oldcount = sem->count; 120 oldcount = sem->count.counter;
121 sem->count -= RWSEM_ACTIVE_READ_BIAS; 121 sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
122#else 122#else
123 long temp; 123 long temp;
124 __asm__ __volatile__( 124 __asm__ __volatile__(
@@ -142,8 +142,8 @@ static inline void __up_write(struct rw_semaphore *sem)
142{ 142{
143 long count; 143 long count;
144#ifndef CONFIG_SMP 144#ifndef CONFIG_SMP
145 sem->count -= RWSEM_ACTIVE_WRITE_BIAS; 145 sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
146 count = sem->count; 146 count = sem->count.counter;
147#else 147#else
148 long temp; 148 long temp;
149 __asm__ __volatile__( 149 __asm__ __volatile__(
@@ -171,8 +171,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
171{ 171{
172 long oldcount; 172 long oldcount;
173#ifndef CONFIG_SMP 173#ifndef CONFIG_SMP
174 oldcount = sem->count; 174 oldcount = sem->count.counter;
175 sem->count -= RWSEM_WAITING_BIAS; 175 sem->count.counter -= RWSEM_WAITING_BIAS;
176#else 176#else
177 long temp; 177 long temp;
178 __asm__ __volatile__( 178 __asm__ __volatile__(
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
191 rwsem_downgrade_wake(sem); 191 rwsem_downgrade_wake(sem);
192} 192}
193 193
194static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
195{
196#ifndef CONFIG_SMP
197 sem->count += val;
198#else
199 long temp;
200 __asm__ __volatile__(
201 "1: ldq_l %0,%1\n"
202 " addq %0,%2,%0\n"
203 " stq_c %0,%1\n"
204 " beq %0,2f\n"
205 ".subsection 2\n"
206 "2: br 1b\n"
207 ".previous"
208 :"=&r" (temp), "=m" (sem->count)
209 :"Ir" (val), "m" (sem->count));
210#endif
211}
212
213static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
214{
215#ifndef CONFIG_SMP
216 sem->count += val;
217 return sem->count;
218#else
219 long ret, temp;
220 __asm__ __volatile__(
221 "1: ldq_l %0,%1\n"
222 " addq %0,%3,%2\n"
223 " addq %0,%3,%0\n"
224 " stq_c %2,%1\n"
225 " beq %2,2f\n"
226 ".subsection 2\n"
227 "2: br 1b\n"
228 ".previous"
229 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
230 :"Ir" (val), "m" (sem->count));
231
232 return ret;
233#endif
234}
235
236#endif /* __KERNEL__ */ 194#endif /* __KERNEL__ */
237#endif /* _ALPHA_RWSEM_H */ 195#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index fed9c6f44c19..a40b9fc0c6c3 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/current.h> 5#include <asm/current.h>
6#include <asm/barrier.h>
7#include <asm/processor.h>
6 8
7/* 9/*
8 * Simple spin lock operations. There are two variants, one clears IRQ's 10 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -13,8 +15,11 @@
13 15
14#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 16#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
15#define arch_spin_is_locked(x) ((x)->lock != 0) 17#define arch_spin_is_locked(x) ((x)->lock != 0)
16#define arch_spin_unlock_wait(x) \ 18
17 do { cpu_relax(); } while ((x)->lock) 19static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
20{
21 smp_cond_load_acquire(&lock->lock, !VAL);
22}
18 23
19static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 24static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
20{ 25{