diff options
Diffstat (limited to 'include/asm-x86/rwsem.h')
-rw-r--r-- | include/asm-x86/rwsem.h | 169 |
1 files changed, 86 insertions, 83 deletions
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 520a379f4b80..750f2a3542b3 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h | |||
@@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore * | |||
56 | /* | 56 | /* |
57 | * the semaphore definition | 57 | * the semaphore definition |
58 | */ | 58 | */ |
59 | struct rw_semaphore { | 59 | |
60 | signed long count; | ||
61 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
62 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 61 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
63 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 62 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
64 | #define RWSEM_WAITING_BIAS (-0x00010000) | 63 | #define RWSEM_WAITING_BIAS (-0x00010000) |
65 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
66 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
66 | |||
67 | struct rw_semaphore { | ||
68 | signed long count; | ||
67 | spinlock_t wait_lock; | 69 | spinlock_t wait_lock; |
68 | struct list_head wait_list; | 70 | struct list_head wait_list; |
69 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -78,11 +80,13 @@ struct rw_semaphore { | |||
78 | #endif | 80 | #endif |
79 | 81 | ||
80 | 82 | ||
81 | #define __RWSEM_INITIALIZER(name) \ | 83 | #define __RWSEM_INITIALIZER(name) \ |
82 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | 84 | { \ |
83 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | 85 | RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ |
86 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ | ||
87 | } | ||
84 | 88 | ||
85 | #define DECLARE_RWSEM(name) \ | 89 | #define DECLARE_RWSEM(name) \ |
86 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 90 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
87 | 91 | ||
88 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | 92 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
@@ -100,16 +104,16 @@ do { \ | |||
100 | */ | 104 | */ |
101 | static inline void __down_read(struct rw_semaphore *sem) | 105 | static inline void __down_read(struct rw_semaphore *sem) |
102 | { | 106 | { |
103 | __asm__ __volatile__( | 107 | asm volatile("# beginning down_read\n\t" |
104 | "# beginning down_read\n\t" | 108 | LOCK_PREFIX " incl (%%eax)\n\t" |
105 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | 109 | /* adds 0x00000001, returns the old value */ |
106 | " jns 1f\n" | 110 | " jns 1f\n" |
107 | " call call_rwsem_down_read_failed\n" | 111 | " call call_rwsem_down_read_failed\n" |
108 | "1:\n\t" | 112 | "1:\n\t" |
109 | "# ending down_read\n\t" | 113 | "# ending down_read\n\t" |
110 | : "+m" (sem->count) | 114 | : "+m" (sem->count) |
111 | : "a" (sem) | 115 | : "a" (sem) |
112 | : "memory", "cc"); | 116 | : "memory", "cc"); |
113 | } | 117 | } |
114 | 118 | ||
115 | /* | 119 | /* |
@@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value | |||
118 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 122 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
119 | { | 123 | { |
120 | __s32 result, tmp; | 124 | __s32 result, tmp; |
121 | __asm__ __volatile__( | 125 | asm volatile("# beginning __down_read_trylock\n\t" |
122 | "# beginning __down_read_trylock\n\t" | 126 | " movl %0,%1\n\t" |
123 | " movl %0,%1\n\t" | 127 | "1:\n\t" |
124 | "1:\n\t" | 128 | " movl %1,%2\n\t" |
125 | " movl %1,%2\n\t" | 129 | " addl %3,%2\n\t" |
126 | " addl %3,%2\n\t" | 130 | " jle 2f\n\t" |
127 | " jle 2f\n\t" | 131 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" |
128 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 132 | " jnz 1b\n\t" |
129 | " jnz 1b\n\t" | 133 | "2:\n\t" |
130 | "2:\n\t" | 134 | "# ending __down_read_trylock\n\t" |
131 | "# ending __down_read_trylock\n\t" | 135 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) |
132 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) | 136 | : "i" (RWSEM_ACTIVE_READ_BIAS) |
133 | : "i" (RWSEM_ACTIVE_READ_BIAS) | 137 | : "memory", "cc"); |
134 | : "memory", "cc"); | 138 | return result >= 0 ? 1 : 0; |
135 | return result>=0 ? 1 : 0; | ||
136 | } | 139 | } |
137 | 140 | ||
138 | /* | 141 | /* |
@@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
143 | int tmp; | 146 | int tmp; |
144 | 147 | ||
145 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
146 | __asm__ __volatile__( | 149 | asm volatile("# beginning down_write\n\t" |
147 | "# beginning down_write\n\t" | 150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" |
148 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | 151 | /* subtract 0x0000ffff, returns the old value */ |
149 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | 152 | " testl %%edx,%%edx\n\t" |
150 | " jz 1f\n" | 153 | /* was the count 0 before? */ |
151 | " call call_rwsem_down_write_failed\n" | 154 | " jz 1f\n" |
152 | "1:\n" | 155 | " call call_rwsem_down_write_failed\n" |
153 | "# ending down_write" | 156 | "1:\n" |
154 | : "+m" (sem->count), "=d" (tmp) | 157 | "# ending down_write" |
155 | : "a" (sem), "1" (tmp) | 158 | : "+m" (sem->count), "=d" (tmp) |
156 | : "memory", "cc"); | 159 | : "a" (sem), "1" (tmp) |
160 | : "memory", "cc"); | ||
157 | } | 161 | } |
158 | 162 | ||
159 | static inline void __down_write(struct rw_semaphore *sem) | 163 | static inline void __down_write(struct rw_semaphore *sem) |
@@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
167 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 171 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
168 | { | 172 | { |
169 | signed long ret = cmpxchg(&sem->count, | 173 | signed long ret = cmpxchg(&sem->count, |
170 | RWSEM_UNLOCKED_VALUE, | 174 | RWSEM_UNLOCKED_VALUE, |
171 | RWSEM_ACTIVE_WRITE_BIAS); | 175 | RWSEM_ACTIVE_WRITE_BIAS); |
172 | if (ret == RWSEM_UNLOCKED_VALUE) | 176 | if (ret == RWSEM_UNLOCKED_VALUE) |
173 | return 1; | 177 | return 1; |
@@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
180 | static inline void __up_read(struct rw_semaphore *sem) | 184 | static inline void __up_read(struct rw_semaphore *sem) |
181 | { | 185 | { |
182 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; |
183 | __asm__ __volatile__( | 187 | asm volatile("# beginning __up_read\n\t" |
184 | "# beginning __up_read\n\t" | 188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" |
185 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | 189 | /* subtracts 1, returns the old value */ |
186 | " jns 1f\n\t" | 190 | " jns 1f\n\t" |
187 | " call call_rwsem_wake\n" | 191 | " call call_rwsem_wake\n" |
188 | "1:\n" | 192 | "1:\n" |
189 | "# ending __up_read\n" | 193 | "# ending __up_read\n" |
190 | : "+m" (sem->count), "=d" (tmp) | 194 | : "+m" (sem->count), "=d" (tmp) |
191 | : "a" (sem), "1" (tmp) | 195 | : "a" (sem), "1" (tmp) |
192 | : "memory", "cc"); | 196 | : "memory", "cc"); |
193 | } | 197 | } |
194 | 198 | ||
195 | /* | 199 | /* |
@@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu | |||
197 | */ | 201 | */ |
198 | static inline void __up_write(struct rw_semaphore *sem) | 202 | static inline void __up_write(struct rw_semaphore *sem) |
199 | { | 203 | { |
200 | __asm__ __volatile__( | 204 | asm volatile("# beginning __up_write\n\t" |
201 | "# beginning __up_write\n\t" | 205 | " movl %2,%%edx\n\t" |
202 | " movl %2,%%edx\n\t" | 206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" |
203 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | 207 | /* tries to transition |
204 | " jz 1f\n" | 208 | 0xffff0001 -> 0x00000000 */ |
205 | " call call_rwsem_wake\n" | 209 | " jz 1f\n" |
206 | "1:\n\t" | 210 | " call call_rwsem_wake\n" |
207 | "# ending __up_write\n" | 211 | "1:\n\t" |
208 | : "+m" (sem->count) | 212 | "# ending __up_write\n" |
209 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 213 | : "+m" (sem->count) |
210 | : "memory", "cc", "edx"); | 214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | ||
211 | } | 216 | } |
212 | 217 | ||
213 | /* | 218 | /* |
@@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> | |||
215 | */ | 220 | */ |
216 | static inline void __downgrade_write(struct rw_semaphore *sem) | 221 | static inline void __downgrade_write(struct rw_semaphore *sem) |
217 | { | 222 | { |
218 | __asm__ __volatile__( | 223 | asm volatile("# beginning __downgrade_write\n\t" |
219 | "# beginning __downgrade_write\n\t" | 224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" |
220 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ |
221 | " jns 1f\n\t" | 226 | " jns 1f\n\t" |
222 | " call call_rwsem_downgrade_wake\n" | 227 | " call call_rwsem_downgrade_wake\n" |
223 | "1:\n\t" | 228 | "1:\n\t" |
224 | "# ending __downgrade_write\n" | 229 | "# ending __downgrade_write\n" |
225 | : "+m" (sem->count) | 230 | : "+m" (sem->count) |
226 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 231 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) |
227 | : "memory", "cc"); | 232 | : "memory", "cc"); |
228 | } | 233 | } |
229 | 234 | ||
230 | /* | 235 | /* |
@@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 | |||
232 | */ | 237 | */ |
233 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) |
234 | { | 239 | { |
235 | __asm__ __volatile__( | 240 | asm volatile(LOCK_PREFIX "addl %1,%0" |
236 | LOCK_PREFIX "addl %1,%0" | 241 | : "+m" (sem->count) |
237 | : "+m" (sem->count) | 242 | : "ir" (delta)); |
238 | : "ir" (delta)); | ||
239 | } | 243 | } |
240 | 244 | ||
241 | /* | 245 | /* |
@@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | |||
245 | { | 249 | { |
246 | int tmp = delta; | 250 | int tmp = delta; |
247 | 251 | ||
248 | __asm__ __volatile__( | 252 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
249 | LOCK_PREFIX "xadd %0,%1" | 253 | : "+r" (tmp), "+m" (sem->count) |
250 | : "+r" (tmp), "+m" (sem->count) | 254 | : : "memory"); |
251 | : : "memory"); | ||
252 | 255 | ||
253 | return tmp+delta; | 256 | return tmp + delta; |
254 | } | 257 | } |
255 | 258 | ||
256 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 259 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |