aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-01-26 15:05:53 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-01-27 06:30:38 -0500
commitbde11efbc21ea84c3351464a422b467eaefabb9a (patch)
tree52e7f7a0e884d1c900276bcc57126a9403e63c8a /arch
parentc16a87ce063f79e0ec7d25ce2950e1bc6db03c72 (diff)
x86: Cleanup rwsem_count_t typedef
Remove the typedef which has no real reason to be there. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: David Howells <dhowells@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David Miller <davem@davemloft.net> Cc: Chris Zankel <chris@zankel.net> LKML-Reference: <20110126195833.580335506@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/rwsem.h25
1 files changed, 10 insertions, 15 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index a626cff86041..c30206c2bbf9 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -68,10 +68,8 @@ extern asmregparm struct rw_semaphore *
68#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 68#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
69#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 69#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
70 70
71typedef signed long rwsem_count_t;
72
73struct rw_semaphore { 71struct rw_semaphore {
74 rwsem_count_t count; 72 long count;
75 spinlock_t wait_lock; 73 spinlock_t wait_lock;
76 struct list_head wait_list; 74 struct list_head wait_list;
77#ifdef CONFIG_DEBUG_LOCK_ALLOC 75#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -127,7 +125,7 @@ static inline void __down_read(struct rw_semaphore *sem)
127 */ 125 */
128static inline int __down_read_trylock(struct rw_semaphore *sem) 126static inline int __down_read_trylock(struct rw_semaphore *sem)
129{ 127{
130 rwsem_count_t result, tmp; 128 long result, tmp;
131 asm volatile("# beginning __down_read_trylock\n\t" 129 asm volatile("# beginning __down_read_trylock\n\t"
132 " mov %0,%1\n\t" 130 " mov %0,%1\n\t"
133 "1:\n\t" 131 "1:\n\t"
@@ -149,7 +147,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
149 */ 147 */
150static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 148static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
151{ 149{
152 rwsem_count_t tmp; 150 long tmp;
153 asm volatile("# beginning down_write\n\t" 151 asm volatile("# beginning down_write\n\t"
154 LOCK_PREFIX " xadd %1,(%2)\n\t" 152 LOCK_PREFIX " xadd %1,(%2)\n\t"
155 /* adds 0xffff0001, returns the old value */ 153 /* adds 0xffff0001, returns the old value */
@@ -174,9 +172,8 @@ static inline void __down_write(struct rw_semaphore *sem)
174 */ 172 */
175static inline int __down_write_trylock(struct rw_semaphore *sem) 173static inline int __down_write_trylock(struct rw_semaphore *sem)
176{ 174{
177 rwsem_count_t ret = cmpxchg(&sem->count, 175 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
178 RWSEM_UNLOCKED_VALUE, 176 RWSEM_ACTIVE_WRITE_BIAS);
179 RWSEM_ACTIVE_WRITE_BIAS);
180 if (ret == RWSEM_UNLOCKED_VALUE) 177 if (ret == RWSEM_UNLOCKED_VALUE)
181 return 1; 178 return 1;
182 return 0; 179 return 0;
@@ -187,7 +184,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
187 */ 184 */
188static inline void __up_read(struct rw_semaphore *sem) 185static inline void __up_read(struct rw_semaphore *sem)
189{ 186{
190 rwsem_count_t tmp; 187 long tmp;
191 asm volatile("# beginning __up_read\n\t" 188 asm volatile("# beginning __up_read\n\t"
192 LOCK_PREFIX " xadd %1,(%2)\n\t" 189 LOCK_PREFIX " xadd %1,(%2)\n\t"
193 /* subtracts 1, returns the old value */ 190 /* subtracts 1, returns the old value */
@@ -205,7 +202,7 @@ static inline void __up_read(struct rw_semaphore *sem)
205 */ 202 */
206static inline void __up_write(struct rw_semaphore *sem) 203static inline void __up_write(struct rw_semaphore *sem)
207{ 204{
208 rwsem_count_t tmp; 205 long tmp;
209 asm volatile("# beginning __up_write\n\t" 206 asm volatile("# beginning __up_write\n\t"
210 LOCK_PREFIX " xadd %1,(%2)\n\t" 207 LOCK_PREFIX " xadd %1,(%2)\n\t"
211 /* subtracts 0xffff0001, returns the old value */ 208 /* subtracts 0xffff0001, returns the old value */
@@ -241,8 +238,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
241/* 238/*
242 * implement atomic add functionality 239 * implement atomic add functionality
243 */ 240 */
244static inline void rwsem_atomic_add(rwsem_count_t delta, 241static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
245 struct rw_semaphore *sem)
246{ 242{
247 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" 243 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
248 : "+m" (sem->count) 244 : "+m" (sem->count)
@@ -252,10 +248,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
252/* 248/*
253 * implement exchange and add functionality 249 * implement exchange and add functionality
254 */ 250 */
255static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, 251static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
256 struct rw_semaphore *sem)
257{ 252{
258 rwsem_count_t tmp = delta; 253 long tmp = delta;
259 254
260 asm volatile(LOCK_PREFIX "xadd %0,%1" 255 asm volatile(LOCK_PREFIX "xadd %0,%1"
261 : "+r" (tmp), "+m" (sem->count) 256 : "+r" (tmp), "+m" (sem->count)