diff options
| -rw-r--r-- | arch/x86/include/asm/rwsem.h | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index 413620024768..5f9af3081d66 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
| @@ -55,6 +55,9 @@ extern asmregparm struct rw_semaphore * | |||
| 55 | 55 | ||
| 56 | /* | 56 | /* |
| 57 | * the semaphore definition | 57 | * the semaphore definition |
| 58 | * | ||
| 59 | * The bias values and the counter type needs to be extended to 64 bits | ||
| 60 | * if we want to have more than 32767 potential readers/writers | ||
| 58 | */ | 61 | */ |
| 59 | 62 | ||
| 60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 63 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
| @@ -64,8 +67,10 @@ extern asmregparm struct rw_semaphore * | |||
| 64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 67 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
| 65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 68 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
| 66 | 69 | ||
| 70 | typedef signed int rwsem_count_t; | ||
| 71 | |||
| 67 | struct rw_semaphore { | 72 | struct rw_semaphore { |
| 68 | signed long count; | 73 | rwsem_count_t count; |
| 69 | spinlock_t wait_lock; | 74 | spinlock_t wait_lock; |
| 70 | struct list_head wait_list; | 75 | struct list_head wait_list; |
| 71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 76 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| @@ -121,7 +126,7 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
| 121 | */ | 126 | */ |
| 122 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 127 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
| 123 | { | 128 | { |
| 124 | __s32 result, tmp; | 129 | rwsem_count_t result, tmp; |
| 125 | asm volatile("# beginning __down_read_trylock\n\t" | 130 | asm volatile("# beginning __down_read_trylock\n\t" |
| 126 | " mov %0,%1\n\t" | 131 | " mov %0,%1\n\t" |
| 127 | "1:\n\t" | 132 | "1:\n\t" |
| @@ -143,7 +148,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
| 143 | */ | 148 | */ |
| 144 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 149 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
| 145 | { | 150 | { |
| 146 | int tmp; | 151 | rwsem_count_t tmp; |
| 147 | 152 | ||
| 148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 153 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
| 149 | asm volatile("# beginning down_write\n\t" | 154 | asm volatile("# beginning down_write\n\t" |
| @@ -170,9 +175,9 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
| 170 | */ | 175 | */ |
| 171 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 176 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
| 172 | { | 177 | { |
| 173 | signed long ret = cmpxchg(&sem->count, | 178 | rwsem_count_t ret = cmpxchg(&sem->count, |
| 174 | RWSEM_UNLOCKED_VALUE, | 179 | RWSEM_UNLOCKED_VALUE, |
| 175 | RWSEM_ACTIVE_WRITE_BIAS); | 180 | RWSEM_ACTIVE_WRITE_BIAS); |
| 176 | if (ret == RWSEM_UNLOCKED_VALUE) | 181 | if (ret == RWSEM_UNLOCKED_VALUE) |
| 177 | return 1; | 182 | return 1; |
| 178 | return 0; | 183 | return 0; |
| @@ -183,7 +188,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
| 183 | */ | 188 | */ |
| 184 | static inline void __up_read(struct rw_semaphore *sem) | 189 | static inline void __up_read(struct rw_semaphore *sem) |
| 185 | { | 190 | { |
| 186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 191 | rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; |
| 187 | asm volatile("# beginning __up_read\n\t" | 192 | asm volatile("# beginning __up_read\n\t" |
| 188 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 193 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
| 189 | /* subtracts 1, returns the old value */ | 194 | /* subtracts 1, returns the old value */ |
| @@ -201,7 +206,7 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
| 201 | */ | 206 | */ |
| 202 | static inline void __up_write(struct rw_semaphore *sem) | 207 | static inline void __up_write(struct rw_semaphore *sem) |
| 203 | { | 208 | { |
| 204 | unsigned long tmp; | 209 | rwsem_count_t tmp; |
| 205 | asm volatile("# beginning __up_write\n\t" | 210 | asm volatile("# beginning __up_write\n\t" |
| 206 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 211 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
| 207 | /* tries to transition | 212 | /* tries to transition |
| @@ -245,9 +250,9 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | |||
| 245 | /* | 250 | /* |
| 246 | * implement exchange and add functionality | 251 | * implement exchange and add functionality |
| 247 | */ | 252 | */ |
| 248 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | 253 | static inline rwsem_count_t rwsem_atomic_update(int delta, struct rw_semaphore *sem) |
| 249 | { | 254 | { |
| 250 | int tmp = delta; | 255 | rwsem_count_t tmp = delta; |
| 251 | 256 | ||
| 252 | asm volatile(LOCK_PREFIX "xadd %0,%1" | 257 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
| 253 | : "+r" (tmp), "+m" (sem->count) | 258 | : "+r" (tmp), "+m" (sem->count) |
