diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/rwsem.h | 25 |
1 files changed, 10 insertions, 15 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index a626cff86041..c30206c2bbf9 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -68,10 +68,8 @@ extern asmregparm struct rw_semaphore * | |||
68 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 68 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
69 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 69 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
70 | 70 | ||
71 | typedef signed long rwsem_count_t; | ||
72 | |||
73 | struct rw_semaphore { | 71 | struct rw_semaphore { |
74 | rwsem_count_t count; | 72 | long count; |
75 | spinlock_t wait_lock; | 73 | spinlock_t wait_lock; |
76 | struct list_head wait_list; | 74 | struct list_head wait_list; |
77 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 75 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -127,7 +125,7 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
127 | */ | 125 | */ |
128 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 126 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
129 | { | 127 | { |
130 | rwsem_count_t result, tmp; | 128 | long result, tmp; |
131 | asm volatile("# beginning __down_read_trylock\n\t" | 129 | asm volatile("# beginning __down_read_trylock\n\t" |
132 | " mov %0,%1\n\t" | 130 | " mov %0,%1\n\t" |
133 | "1:\n\t" | 131 | "1:\n\t" |
@@ -149,7 +147,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
149 | */ | 147 | */ |
150 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 148 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
151 | { | 149 | { |
152 | rwsem_count_t tmp; | 150 | long tmp; |
153 | asm volatile("# beginning down_write\n\t" | 151 | asm volatile("# beginning down_write\n\t" |
154 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 152 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
155 | /* adds 0xffff0001, returns the old value */ | 153 | /* adds 0xffff0001, returns the old value */ |
@@ -174,9 +172,8 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
174 | */ | 172 | */ |
175 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 173 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
176 | { | 174 | { |
177 | rwsem_count_t ret = cmpxchg(&sem->count, | 175 | long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, |
178 | RWSEM_UNLOCKED_VALUE, | 176 | RWSEM_ACTIVE_WRITE_BIAS); |
179 | RWSEM_ACTIVE_WRITE_BIAS); | ||
180 | if (ret == RWSEM_UNLOCKED_VALUE) | 177 | if (ret == RWSEM_UNLOCKED_VALUE) |
181 | return 1; | 178 | return 1; |
182 | return 0; | 179 | return 0; |
@@ -187,7 +184,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
187 | */ | 184 | */ |
188 | static inline void __up_read(struct rw_semaphore *sem) | 185 | static inline void __up_read(struct rw_semaphore *sem) |
189 | { | 186 | { |
190 | rwsem_count_t tmp; | 187 | long tmp; |
191 | asm volatile("# beginning __up_read\n\t" | 188 | asm volatile("# beginning __up_read\n\t" |
192 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 189 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
193 | /* subtracts 1, returns the old value */ | 190 | /* subtracts 1, returns the old value */ |
@@ -205,7 +202,7 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
205 | */ | 202 | */ |
206 | static inline void __up_write(struct rw_semaphore *sem) | 203 | static inline void __up_write(struct rw_semaphore *sem) |
207 | { | 204 | { |
208 | rwsem_count_t tmp; | 205 | long tmp; |
209 | asm volatile("# beginning __up_write\n\t" | 206 | asm volatile("# beginning __up_write\n\t" |
210 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 207 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
211 | /* subtracts 0xffff0001, returns the old value */ | 208 | /* subtracts 0xffff0001, returns the old value */ |
@@ -241,8 +238,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
241 | /* | 238 | /* |
242 | * implement atomic add functionality | 239 | * implement atomic add functionality |
243 | */ | 240 | */ |
244 | static inline void rwsem_atomic_add(rwsem_count_t delta, | 241 | static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
245 | struct rw_semaphore *sem) | ||
246 | { | 242 | { |
247 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" | 243 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
248 | : "+m" (sem->count) | 244 | : "+m" (sem->count) |
@@ -252,10 +248,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta, | |||
252 | /* | 248 | /* |
253 | * implement exchange and add functionality | 249 | * implement exchange and add functionality |
254 | */ | 250 | */ |
255 | static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, | 251 | static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
256 | struct rw_semaphore *sem) | ||
257 | { | 252 | { |
258 | rwsem_count_t tmp = delta; | 253 | long tmp = delta; |
259 | 254 | ||
260 | asm volatile(LOCK_PREFIX "xadd %0,%1" | 255 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
261 | : "+r" (tmp), "+m" (sem->count) | 256 | : "+r" (tmp), "+m" (sem->count) |