diff options
Diffstat (limited to 'lib/rwsem-spinlock.c')
-rw-r--r-- | lib/rwsem-spinlock.c | 66 |
1 files changed, 19 insertions, 47 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..db4fed74b940 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -17,27 +17,22 @@ struct rwsem_waiter { | |||
17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
18 | }; | 18 | }; |
19 | 19 | ||
20 | #if RWSEM_DEBUG | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk("[%d] %s({%d,%d})\n", | ||
25 | current->pid, str, sem->activity, | ||
26 | list_empty(&sem->wait_list) ? 0 : 1); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | 20 | /* |
31 | * initialise the semaphore | 21 | * initialise the semaphore |
32 | */ | 22 | */ |
33 | void fastcall init_rwsem(struct rw_semaphore *sem) | 23 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
24 | struct lock_class_key *key) | ||
34 | { | 25 | { |
26 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
27 | /* | ||
28 | * Make sure we are not reinitializing a held semaphore: | ||
29 | */ | ||
30 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | ||
31 | lockdep_init_map(&sem->dep_map, name, key); | ||
32 | #endif | ||
35 | sem->activity = 0; | 33 | sem->activity = 0; |
36 | spin_lock_init(&sem->wait_lock); | 34 | spin_lock_init(&sem->wait_lock); |
37 | INIT_LIST_HEAD(&sem->wait_list); | 35 | INIT_LIST_HEAD(&sem->wait_list); |
38 | #if RWSEM_DEBUG | ||
39 | sem->debug = 0; | ||
40 | #endif | ||
41 | } | 36 | } |
42 | 37 | ||
43 | /* | 38 | /* |
@@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
56 | struct task_struct *tsk; | 51 | struct task_struct *tsk; |
57 | int woken; | 52 | int woken; |
58 | 53 | ||
59 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
60 | |||
61 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 54 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
62 | 55 | ||
63 | if (!wakewrite) { | 56 | if (!wakewrite) { |
@@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
104 | sem->activity += woken; | 97 | sem->activity += woken; |
105 | 98 | ||
106 | out: | 99 | out: |
107 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
108 | return sem; | 100 | return sem; |
109 | } | 101 | } |
110 | 102 | ||
@@ -138,8 +130,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
138 | struct rwsem_waiter waiter; | 130 | struct rwsem_waiter waiter; |
139 | struct task_struct *tsk; | 131 | struct task_struct *tsk; |
140 | 132 | ||
141 | rwsemtrace(sem, "Entering __down_read"); | ||
142 | |||
143 | spin_lock_irq(&sem->wait_lock); | 133 | spin_lock_irq(&sem->wait_lock); |
144 | 134 | ||
145 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 135 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
@@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
171 | } | 161 | } |
172 | 162 | ||
173 | tsk->state = TASK_RUNNING; | 163 | tsk->state = TASK_RUNNING; |
174 | |||
175 | out: | 164 | out: |
176 | rwsemtrace(sem, "Leaving __down_read"); | 165 | ; |
177 | } | 166 | } |
178 | 167 | ||
179 | /* | 168 | /* |
@@ -184,7 +173,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
184 | unsigned long flags; | 173 | unsigned long flags; |
185 | int ret = 0; | 174 | int ret = 0; |
186 | 175 | ||
187 | rwsemtrace(sem, "Entering __down_read_trylock"); | ||
188 | 176 | ||
189 | spin_lock_irqsave(&sem->wait_lock, flags); | 177 | spin_lock_irqsave(&sem->wait_lock, flags); |
190 | 178 | ||
@@ -196,7 +184,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
196 | 184 | ||
197 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 185 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
198 | 186 | ||
199 | rwsemtrace(sem, "Leaving __down_read_trylock"); | ||
200 | return ret; | 187 | return ret; |
201 | } | 188 | } |
202 | 189 | ||
@@ -204,13 +191,11 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
204 | * get a write lock on the semaphore | 191 | * get a write lock on the semaphore |
205 | * - we increment the waiting count anyway to indicate an exclusive lock | 192 | * - we increment the waiting count anyway to indicate an exclusive lock |
206 | */ | 193 | */ |
207 | void fastcall __sched __down_write(struct rw_semaphore *sem) | 194 | void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
208 | { | 195 | { |
209 | struct rwsem_waiter waiter; | 196 | struct rwsem_waiter waiter; |
210 | struct task_struct *tsk; | 197 | struct task_struct *tsk; |
211 | 198 | ||
212 | rwsemtrace(sem, "Entering __down_write"); | ||
213 | |||
214 | spin_lock_irq(&sem->wait_lock); | 199 | spin_lock_irq(&sem->wait_lock); |
215 | 200 | ||
216 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 201 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -242,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
242 | } | 227 | } |
243 | 228 | ||
244 | tsk->state = TASK_RUNNING; | 229 | tsk->state = TASK_RUNNING; |
245 | |||
246 | out: | 230 | out: |
247 | rwsemtrace(sem, "Leaving __down_write"); | 231 | ; |
232 | } | ||
233 | |||
234 | void fastcall __sched __down_write(struct rw_semaphore *sem) | ||
235 | { | ||
236 | __down_write_nested(sem, 0); | ||
248 | } | 237 | } |
249 | 238 | ||
250 | /* | 239 | /* |
@@ -255,8 +244,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
255 | unsigned long flags; | 244 | unsigned long flags; |
256 | int ret = 0; | 245 | int ret = 0; |
257 | 246 | ||
258 | rwsemtrace(sem, "Entering __down_write_trylock"); | ||
259 | |||
260 | spin_lock_irqsave(&sem->wait_lock, flags); | 247 | spin_lock_irqsave(&sem->wait_lock, flags); |
261 | 248 | ||
262 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 249 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -267,7 +254,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
267 | 254 | ||
268 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 255 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
269 | 256 | ||
270 | rwsemtrace(sem, "Leaving __down_write_trylock"); | ||
271 | return ret; | 257 | return ret; |
272 | } | 258 | } |
273 | 259 | ||
@@ -278,16 +264,12 @@ void fastcall __up_read(struct rw_semaphore *sem) | |||
278 | { | 264 | { |
279 | unsigned long flags; | 265 | unsigned long flags; |
280 | 266 | ||
281 | rwsemtrace(sem, "Entering __up_read"); | ||
282 | |||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | 267 | spin_lock_irqsave(&sem->wait_lock, flags); |
284 | 268 | ||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 269 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
286 | sem = __rwsem_wake_one_writer(sem); | 270 | sem = __rwsem_wake_one_writer(sem); |
287 | 271 | ||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 272 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
289 | |||
290 | rwsemtrace(sem, "Leaving __up_read"); | ||
291 | } | 273 | } |
292 | 274 | ||
293 | /* | 275 | /* |
@@ -297,8 +279,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
297 | { | 279 | { |
298 | unsigned long flags; | 280 | unsigned long flags; |
299 | 281 | ||
300 | rwsemtrace(sem, "Entering __up_write"); | ||
301 | |||
302 | spin_lock_irqsave(&sem->wait_lock, flags); | 282 | spin_lock_irqsave(&sem->wait_lock, flags); |
303 | 283 | ||
304 | sem->activity = 0; | 284 | sem->activity = 0; |
@@ -306,8 +286,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
306 | sem = __rwsem_do_wake(sem, 1); | 286 | sem = __rwsem_do_wake(sem, 1); |
307 | 287 | ||
308 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 288 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
309 | |||
310 | rwsemtrace(sem, "Leaving __up_write"); | ||
311 | } | 289 | } |
312 | 290 | ||
313 | /* | 291 | /* |
@@ -318,8 +296,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
318 | { | 296 | { |
319 | unsigned long flags; | 297 | unsigned long flags; |
320 | 298 | ||
321 | rwsemtrace(sem, "Entering __downgrade_write"); | ||
322 | |||
323 | spin_lock_irqsave(&sem->wait_lock, flags); | 299 | spin_lock_irqsave(&sem->wait_lock, flags); |
324 | 300 | ||
325 | sem->activity = 1; | 301 | sem->activity = 1; |
@@ -327,18 +303,14 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
327 | sem = __rwsem_do_wake(sem, 0); | 303 | sem = __rwsem_do_wake(sem, 0); |
328 | 304 | ||
329 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 305 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
330 | |||
331 | rwsemtrace(sem, "Leaving __downgrade_write"); | ||
332 | } | 306 | } |
333 | 307 | ||
334 | EXPORT_SYMBOL(init_rwsem); | 308 | EXPORT_SYMBOL(__init_rwsem); |
335 | EXPORT_SYMBOL(__down_read); | 309 | EXPORT_SYMBOL(__down_read); |
336 | EXPORT_SYMBOL(__down_read_trylock); | 310 | EXPORT_SYMBOL(__down_read_trylock); |
311 | EXPORT_SYMBOL(__down_write_nested); | ||
337 | EXPORT_SYMBOL(__down_write); | 312 | EXPORT_SYMBOL(__down_write); |
338 | EXPORT_SYMBOL(__down_write_trylock); | 313 | EXPORT_SYMBOL(__down_write_trylock); |
339 | EXPORT_SYMBOL(__up_read); | 314 | EXPORT_SYMBOL(__up_read); |
340 | EXPORT_SYMBOL(__up_write); | 315 | EXPORT_SYMBOL(__up_write); |
341 | EXPORT_SYMBOL(__downgrade_write); | 316 | EXPORT_SYMBOL(__downgrade_write); |
342 | #if RWSEM_DEBUG | ||
343 | EXPORT_SYMBOL(rwsemtrace); | ||
344 | #endif | ||