aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-01-12 20:57:35 -0500
committerH. Peter Anvin <hpa@zytor.com>2010-01-14 01:38:51 -0500
commit5d0b7235d83eefdafda300656e97d368afcafc9a (patch)
tree4d6d7036b4f89780dcce1b39ce17cc47038b2a6c
parent3bef444797f7624f8fbd27f4e0334ce96a108725 (diff)
x86: clean up rwsem type system
The fast version of the rwsems (the code that uses xadd) has traditionally only worked on x86-32, and as a result it mixes different kinds of types wildly - they just all happen to be 32-bit. We have "long", we have "__s32", and we have "int". To make it work on x86-64, the types suddenly matter a lot more. It can be either a 32-bit or 64-bit signed type, and both work (with the caveat that a 32-bit counter will only have 15 bits of effective write counters, so it's limited to 32767 users). But whatever type you choose, it needs to be used consistently. This makes a new 'rwsem_counter_t', that is a 32-bit signed type. For a 64-bit type, you'd need to also update the BIAS values. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <alpine.LFD.2.00.1001121755220.17145@localhost.localdomain> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--arch/x86/include/asm/rwsem.h25
1 files changed, 15 insertions, 10 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 413620024768..5f9af3081d66 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -55,6 +55,9 @@ extern asmregparm struct rw_semaphore *
55 55
56/* 56/*
57 * the semaphore definition 57 * the semaphore definition
58 *
59 * The bias values and the counter type needs to be extended to 64 bits
60 * if we want to have more than 32767 potential readers/writers
58 */ 61 */
59 62
60#define RWSEM_UNLOCKED_VALUE 0x00000000 63#define RWSEM_UNLOCKED_VALUE 0x00000000
@@ -64,8 +67,10 @@ extern asmregparm struct rw_semaphore *
64#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 67#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
65#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 68#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
66 69
70typedef signed int rwsem_count_t;
71
67struct rw_semaphore { 72struct rw_semaphore {
68 signed long count; 73 rwsem_count_t count;
69 spinlock_t wait_lock; 74 spinlock_t wait_lock;
70 struct list_head wait_list; 75 struct list_head wait_list;
71#ifdef CONFIG_DEBUG_LOCK_ALLOC 76#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -121,7 +126,7 @@ static inline void __down_read(struct rw_semaphore *sem)
121 */ 126 */
122static inline int __down_read_trylock(struct rw_semaphore *sem) 127static inline int __down_read_trylock(struct rw_semaphore *sem)
123{ 128{
124 __s32 result, tmp; 129 rwsem_count_t result, tmp;
125 asm volatile("# beginning __down_read_trylock\n\t" 130 asm volatile("# beginning __down_read_trylock\n\t"
126 " mov %0,%1\n\t" 131 " mov %0,%1\n\t"
127 "1:\n\t" 132 "1:\n\t"
@@ -143,7 +148,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
143 */ 148 */
144static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 149static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
145{ 150{
146 int tmp; 151 rwsem_count_t tmp;
147 152
148 tmp = RWSEM_ACTIVE_WRITE_BIAS; 153 tmp = RWSEM_ACTIVE_WRITE_BIAS;
149 asm volatile("# beginning down_write\n\t" 154 asm volatile("# beginning down_write\n\t"
@@ -170,9 +175,9 @@ static inline void __down_write(struct rw_semaphore *sem)
170 */ 175 */
171static inline int __down_write_trylock(struct rw_semaphore *sem) 176static inline int __down_write_trylock(struct rw_semaphore *sem)
172{ 177{
173 signed long ret = cmpxchg(&sem->count, 178 rwsem_count_t ret = cmpxchg(&sem->count,
174 RWSEM_UNLOCKED_VALUE, 179 RWSEM_UNLOCKED_VALUE,
175 RWSEM_ACTIVE_WRITE_BIAS); 180 RWSEM_ACTIVE_WRITE_BIAS);
176 if (ret == RWSEM_UNLOCKED_VALUE) 181 if (ret == RWSEM_UNLOCKED_VALUE)
177 return 1; 182 return 1;
178 return 0; 183 return 0;
@@ -183,7 +188,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
183 */ 188 */
184static inline void __up_read(struct rw_semaphore *sem) 189static inline void __up_read(struct rw_semaphore *sem)
185{ 190{
186 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; 191 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
187 asm volatile("# beginning __up_read\n\t" 192 asm volatile("# beginning __up_read\n\t"
188 LOCK_PREFIX " xadd %1,(%2)\n\t" 193 LOCK_PREFIX " xadd %1,(%2)\n\t"
189 /* subtracts 1, returns the old value */ 194 /* subtracts 1, returns the old value */
@@ -201,7 +206,7 @@ static inline void __up_read(struct rw_semaphore *sem)
201 */ 206 */
202static inline void __up_write(struct rw_semaphore *sem) 207static inline void __up_write(struct rw_semaphore *sem)
203{ 208{
204 unsigned long tmp; 209 rwsem_count_t tmp;
205 asm volatile("# beginning __up_write\n\t" 210 asm volatile("# beginning __up_write\n\t"
206 LOCK_PREFIX " xadd %1,(%2)\n\t" 211 LOCK_PREFIX " xadd %1,(%2)\n\t"
207 /* tries to transition 212 /* tries to transition
@@ -245,9 +250,9 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
245/* 250/*
246 * implement exchange and add functionality 251 * implement exchange and add functionality
247 */ 252 */
248static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 253static inline rwsem_count_t rwsem_atomic_update(int delta, struct rw_semaphore *sem)
249{ 254{
250 int tmp = delta; 255 rwsem_count_t tmp = delta;
251 256
252 asm volatile(LOCK_PREFIX "xadd %0,%1" 257 asm volatile(LOCK_PREFIX "xadd %0,%1"
253 : "+r" (tmp), "+m" (sem->count) 258 : "+r" (tmp), "+m" (sem->count)