aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/include
diff options
context:
space:
mode:
authorJason Low <jason.low2@hpe.com>2016-05-16 20:38:02 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-08 09:16:59 -0400
commitd157bd860f1c828593730dca594d0ce51956833b (patch)
treee0d9bc3d73909ee33912016094b76118869b7b60 /arch/alpha/include
parent8ee62b1870be8e630158701632a533d0378e15b8 (diff)
locking/rwsem: Remove rwsem_atomic_add() and rwsem_atomic_update()
The rwsem-xadd count has been converted to an atomic variable and the rwsem code now directly uses atomic_long_add() and atomic_long_add_return(), so we can remove the arch implementations of rwsem_atomic_add() and rwsem_atomic_update(). Signed-off-by: Jason Low <jason.low2@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Terry Rudd <terry.rudd@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Waiman Long <Waiman.Long@hpe.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/rwsem.h42
1 files changed, 0 insertions, 42 deletions
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index b40021aabb9f..77873d0ad293 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
191 rwsem_downgrade_wake(sem); 191 rwsem_downgrade_wake(sem);
192} 192}
193 193
194static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
195{
196#ifndef CONFIG_SMP
197 sem->count += val;
198#else
199 long temp;
200 __asm__ __volatile__(
201 "1: ldq_l %0,%1\n"
202 " addq %0,%2,%0\n"
203 " stq_c %0,%1\n"
204 " beq %0,2f\n"
205 ".subsection 2\n"
206 "2: br 1b\n"
207 ".previous"
208 :"=&r" (temp), "=m" (sem->count)
209 :"Ir" (val), "m" (sem->count));
210#endif
211}
212
213static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
214{
215#ifndef CONFIG_SMP
216 sem->count += val;
217 return sem->count;
218#else
219 long ret, temp;
220 __asm__ __volatile__(
221 "1: ldq_l %0,%1\n"
222 " addq %0,%3,%2\n"
223 " addq %0,%3,%0\n"
224 " stq_c %2,%1\n"
225 " beq %2,2f\n"
226 ".subsection 2\n"
227 "2: br 1b\n"
228 ".previous"
229 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
230 :"Ir" (val), "m" (sem->count));
231
232 return ret;
233#endif
234}
235
236#endif /* __KERNEL__ */ 194#endif /* __KERNEL__ */
237#endif /* _ALPHA_RWSEM_H */ 195#endif /* _ALPHA_RWSEM_H */