aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/rwsem.h53
1 files changed, 40 insertions, 13 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 5f9af3081d66..10204a25bf93 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -41,6 +41,7 @@
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/spinlock.h> 42#include <linux/spinlock.h>
43#include <linux/lockdep.h> 43#include <linux/lockdep.h>
44#include <asm/asm.h>
44 45
45struct rwsem_waiter; 46struct rwsem_waiter;
46 47
@@ -56,18 +57,24 @@ extern asmregparm struct rw_semaphore *
56/* 57/*
57 * the semaphore definition 58 * the semaphore definition
58 * 59 *
59 * The bias values and the counter type needs to be extended to 64 bits 60 * The bias values and the counter type limits the number of
60 * if we want to have more than 32767 potential readers/writers 61 * potential readers/writers to 32767 for 32 bits and 2147483647
62 * for 64 bits.
61 */ 63 */
62 64
63#define RWSEM_UNLOCKED_VALUE 0x00000000 65#ifdef CONFIG_X86_64
64#define RWSEM_ACTIVE_BIAS 0x00000001 66# define RWSEM_ACTIVE_MASK 0xffffffffL
65#define RWSEM_ACTIVE_MASK 0x0000ffff 67#else
66#define RWSEM_WAITING_BIAS (-0x00010000) 68# define RWSEM_ACTIVE_MASK 0x0000ffffL
69#endif
70
71#define RWSEM_UNLOCKED_VALUE 0x00000000L
72#define RWSEM_ACTIVE_BIAS 0x00000001L
73#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
67#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 74#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
68#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 75#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
69 76
70typedef signed int rwsem_count_t; 77typedef signed long rwsem_count_t;
71 78
72struct rw_semaphore { 79struct rw_semaphore {
73 rwsem_count_t count; 80 rwsem_count_t count;
@@ -110,7 +117,7 @@ do { \
110static inline void __down_read(struct rw_semaphore *sem) 117static inline void __down_read(struct rw_semaphore *sem)
111{ 118{
112 asm volatile("# beginning down_read\n\t" 119 asm volatile("# beginning down_read\n\t"
113 LOCK_PREFIX " inc%z0 (%1)\n\t" 120 LOCK_PREFIX _ASM_INC "(%1)\n\t"
114 /* adds 0x00000001, returns the old value */ 121 /* adds 0x00000001, returns the old value */
115 " jns 1f\n" 122 " jns 1f\n"
116 " call call_rwsem_down_read_failed\n" 123 " call call_rwsem_down_read_failed\n"
@@ -225,8 +232,25 @@ static inline void __up_write(struct rw_semaphore *sem)
225 */ 232 */
226static inline void __downgrade_write(struct rw_semaphore *sem) 233static inline void __downgrade_write(struct rw_semaphore *sem)
227{ 234{
235#ifdef CONFIG_X86_64
236# if RWSEM_WAITING_BIAS != -0x100000000
237# error "This code assumes RWSEM_WAITING_BIAS == -2^32"
238# endif
239
240 /* 64-bit immediates are special and expensive, and not needed here */
241 asm volatile("# beginning __downgrade_write\n\t"
242 LOCK_PREFIX "incl 4(%1)\n\t"
243 /* transitions 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 */
244 " jns 1f\n\t"
245 " call call_rwsem_downgrade_wake\n"
246 "1:\n\t"
247 "# ending __downgrade_write\n"
248 : "+m" (sem->count)
249 : "a" (sem)
250 : "memory", "cc");
251#else
228 asm volatile("# beginning __downgrade_write\n\t" 252 asm volatile("# beginning __downgrade_write\n\t"
229 LOCK_PREFIX " add%z0 %2,(%1)\n\t" 253 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
230 /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ 254 /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
231 " jns 1f\n\t" 255 " jns 1f\n\t"
232 " call call_rwsem_downgrade_wake\n" 256 " call call_rwsem_downgrade_wake\n"
@@ -235,22 +259,25 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
235 : "+m" (sem->count) 259 : "+m" (sem->count)
236 : "a" (sem), "i" (-RWSEM_WAITING_BIAS) 260 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
237 : "memory", "cc"); 261 : "memory", "cc");
262#endif
238} 263}
239 264
240/* 265/*
241 * implement atomic add functionality 266 * implement atomic add functionality
242 */ 267 */
243static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 268static inline void rwsem_atomic_add(rwsem_count_t delta,
269 struct rw_semaphore *sem)
244{ 270{
245 asm volatile(LOCK_PREFIX "add%z0 %1,%0" 271 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
246 : "+m" (sem->count) 272 : "+m" (sem->count)
247 : "ir" (delta)); 273 : "er" (delta));
248} 274}
249 275
250/* 276/*
251 * implement exchange and add functionality 277 * implement exchange and add functionality
252 */ 278 */
253static inline rwsem_count_t rwsem_atomic_update(int delta, struct rw_semaphore *sem) 279static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
280 struct rw_semaphore *sem)
254{ 281{
255 rwsem_count_t tmp = delta; 282 rwsem_count_t tmp = delta;
256 283