aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorJason Low <jason.low2@hpe.com>2016-06-04 01:26:02 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-08 09:16:42 -0400
commit8ee62b1870be8e630158701632a533d0378e15b8 (patch)
tree18f68e695ee974f059703e75957e201695049cb9 /arch/alpha
parent055ce0fd1b86c204430cbc0887165599d6e15090 (diff)
locking/rwsem: Convert sem->count to 'atomic_long_t'
Convert the rwsem count variable to an atomic_long_t since we use it as an atomic variable. This also allows us to remove the rwsem_atomic_{add,update}() "abstraction" which would now be an unnecesary level of indirection. In follow up patches, we also remove the rwsem_atomic_{add,update}() definitions across the various architectures. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Jason Low <jason.low2@hpe.com> [ Build warning fixes on various architectures. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Terry Rudd <terry.rudd@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Waiman Long <Waiman.Long@hpe.com> Link: http://lkml.kernel.org/r/1465017963-4839-2-git-send-email-jason.low2@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/rwsem.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 0131a7058778..b40021aabb9f 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -25,8 +25,8 @@ static inline void __down_read(struct rw_semaphore *sem)
25{ 25{
26 long oldcount; 26 long oldcount;
27#ifndef CONFIG_SMP 27#ifndef CONFIG_SMP
28 oldcount = sem->count; 28 oldcount = sem->count.counter;
29 sem->count += RWSEM_ACTIVE_READ_BIAS; 29 sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
30#else 30#else
31 long temp; 31 long temp;
32 __asm__ __volatile__( 32 __asm__ __volatile__(
@@ -52,13 +52,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
52{ 52{
53 long old, new, res; 53 long old, new, res;
54 54
55 res = sem->count; 55 res = atomic_long_read(&sem->count);
56 do { 56 do {
57 new = res + RWSEM_ACTIVE_READ_BIAS; 57 new = res + RWSEM_ACTIVE_READ_BIAS;
58 if (new <= 0) 58 if (new <= 0)
59 break; 59 break;
60 old = res; 60 old = res;
61 res = cmpxchg(&sem->count, old, new); 61 res = atomic_long_cmpxchg(&sem->count, old, new);
62 } while (res != old); 62 } while (res != old);
63 return res >= 0 ? 1 : 0; 63 return res >= 0 ? 1 : 0;
64} 64}
@@ -67,8 +67,8 @@ static inline long ___down_write(struct rw_semaphore *sem)
67{ 67{
68 long oldcount; 68 long oldcount;
69#ifndef CONFIG_SMP 69#ifndef CONFIG_SMP
70 oldcount = sem->count; 70 oldcount = sem->count.counter;
71 sem->count += RWSEM_ACTIVE_WRITE_BIAS; 71 sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
72#else 72#else
73 long temp; 73 long temp;
74 __asm__ __volatile__( 74 __asm__ __volatile__(
@@ -106,7 +106,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
106 */ 106 */
107static inline int __down_write_trylock(struct rw_semaphore *sem) 107static inline int __down_write_trylock(struct rw_semaphore *sem)
108{ 108{
109 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 109 long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
110 RWSEM_ACTIVE_WRITE_BIAS); 110 RWSEM_ACTIVE_WRITE_BIAS);
111 if (ret == RWSEM_UNLOCKED_VALUE) 111 if (ret == RWSEM_UNLOCKED_VALUE)
112 return 1; 112 return 1;
@@ -117,8 +117,8 @@ static inline void __up_read(struct rw_semaphore *sem)
117{ 117{
118 long oldcount; 118 long oldcount;
119#ifndef CONFIG_SMP 119#ifndef CONFIG_SMP
120 oldcount = sem->count; 120 oldcount = sem->count.counter;
121 sem->count -= RWSEM_ACTIVE_READ_BIAS; 121 sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
122#else 122#else
123 long temp; 123 long temp;
124 __asm__ __volatile__( 124 __asm__ __volatile__(
@@ -142,8 +142,8 @@ static inline void __up_write(struct rw_semaphore *sem)
142{ 142{
143 long count; 143 long count;
144#ifndef CONFIG_SMP 144#ifndef CONFIG_SMP
145 sem->count -= RWSEM_ACTIVE_WRITE_BIAS; 145 sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
146 count = sem->count; 146 count = sem->count.counter;
147#else 147#else
148 long temp; 148 long temp;
149 __asm__ __volatile__( 149 __asm__ __volatile__(
@@ -171,8 +171,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
171{ 171{
172 long oldcount; 172 long oldcount;
173#ifndef CONFIG_SMP 173#ifndef CONFIG_SMP
174 oldcount = sem->count; 174 oldcount = sem->count.counter;
175 sem->count -= RWSEM_WAITING_BIAS; 175 sem->count.counter -= RWSEM_WAITING_BIAS;
176#else 176#else
177 long temp; 177 long temp;
178 __asm__ __volatile__( 178 __asm__ __volatile__(