diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:24:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:01 -0400 |
commit | c4e05116a2c4d8187127dbf77ab790aa57a47388 (patch) | |
tree | 58f07aed47ee4d30e9588d5a820c245faa163b09 /lib/rwsem-spinlock.c | |
parent | 8b3db9c542e18b71d4820da4dd9401ee030feacb (diff) |
[PATCH] lockdep: clean up rwsems
Clean up rwsems.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib/rwsem-spinlock.c')
-rw-r--r-- | lib/rwsem-spinlock.c | 46 |
1 files changed, 2 insertions, 44 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..03b6097eb04e 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -17,16 +17,6 @@ struct rwsem_waiter { | |||
17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
18 | }; | 18 | }; |
19 | 19 | ||
20 | #if RWSEM_DEBUG | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk("[%d] %s({%d,%d})\n", | ||
25 | current->pid, str, sem->activity, | ||
26 | list_empty(&sem->wait_list) ? 0 : 1); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | 20 | /* |
31 | * initialise the semaphore | 21 | * initialise the semaphore |
32 | */ | 22 | */ |
@@ -35,9 +25,6 @@ void fastcall init_rwsem(struct rw_semaphore *sem) | |||
35 | sem->activity = 0; | 25 | sem->activity = 0; |
36 | spin_lock_init(&sem->wait_lock); | 26 | spin_lock_init(&sem->wait_lock); |
37 | INIT_LIST_HEAD(&sem->wait_list); | 27 | INIT_LIST_HEAD(&sem->wait_list); |
38 | #if RWSEM_DEBUG | ||
39 | sem->debug = 0; | ||
40 | #endif | ||
41 | } | 28 | } |
42 | 29 | ||
43 | /* | 30 | /* |
@@ -56,8 +43,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
56 | struct task_struct *tsk; | 43 | struct task_struct *tsk; |
57 | int woken; | 44 | int woken; |
58 | 45 | ||
59 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
60 | |||
61 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 46 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
62 | 47 | ||
63 | if (!wakewrite) { | 48 | if (!wakewrite) { |
@@ -104,7 +89,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
104 | sem->activity += woken; | 89 | sem->activity += woken; |
105 | 90 | ||
106 | out: | 91 | out: |
107 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
108 | return sem; | 92 | return sem; |
109 | } | 93 | } |
110 | 94 | ||
@@ -138,8 +122,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
138 | struct rwsem_waiter waiter; | 122 | struct rwsem_waiter waiter; |
139 | struct task_struct *tsk; | 123 | struct task_struct *tsk; |
140 | 124 | ||
141 | rwsemtrace(sem, "Entering __down_read"); | ||
142 | |||
143 | spin_lock_irq(&sem->wait_lock); | 125 | spin_lock_irq(&sem->wait_lock); |
144 | 126 | ||
145 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 127 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
@@ -171,9 +153,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
171 | } | 153 | } |
172 | 154 | ||
173 | tsk->state = TASK_RUNNING; | 155 | tsk->state = TASK_RUNNING; |
174 | |||
175 | out: | 156 | out: |
176 | rwsemtrace(sem, "Leaving __down_read"); | 157 | ; |
177 | } | 158 | } |
178 | 159 | ||
179 | /* | 160 | /* |
@@ -184,7 +165,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
184 | unsigned long flags; | 165 | unsigned long flags; |
185 | int ret = 0; | 166 | int ret = 0; |
186 | 167 | ||
187 | rwsemtrace(sem, "Entering __down_read_trylock"); | ||
188 | 168 | ||
189 | spin_lock_irqsave(&sem->wait_lock, flags); | 169 | spin_lock_irqsave(&sem->wait_lock, flags); |
190 | 170 | ||
@@ -196,7 +176,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
196 | 176 | ||
197 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 177 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
198 | 178 | ||
199 | rwsemtrace(sem, "Leaving __down_read_trylock"); | ||
200 | return ret; | 179 | return ret; |
201 | } | 180 | } |
202 | 181 | ||
@@ -209,8 +188,6 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
209 | struct rwsem_waiter waiter; | 188 | struct rwsem_waiter waiter; |
210 | struct task_struct *tsk; | 189 | struct task_struct *tsk; |
211 | 190 | ||
212 | rwsemtrace(sem, "Entering __down_write"); | ||
213 | |||
214 | spin_lock_irq(&sem->wait_lock); | 191 | spin_lock_irq(&sem->wait_lock); |
215 | 192 | ||
216 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 193 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -242,9 +219,8 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
242 | } | 219 | } |
243 | 220 | ||
244 | tsk->state = TASK_RUNNING; | 221 | tsk->state = TASK_RUNNING; |
245 | |||
246 | out: | 222 | out: |
247 | rwsemtrace(sem, "Leaving __down_write"); | 223 | ; |
248 | } | 224 | } |
249 | 225 | ||
250 | /* | 226 | /* |
@@ -255,8 +231,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
255 | unsigned long flags; | 231 | unsigned long flags; |
256 | int ret = 0; | 232 | int ret = 0; |
257 | 233 | ||
258 | rwsemtrace(sem, "Entering __down_write_trylock"); | ||
259 | |||
260 | spin_lock_irqsave(&sem->wait_lock, flags); | 234 | spin_lock_irqsave(&sem->wait_lock, flags); |
261 | 235 | ||
262 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 236 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -267,7 +241,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
267 | 241 | ||
268 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 242 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
269 | 243 | ||
270 | rwsemtrace(sem, "Leaving __down_write_trylock"); | ||
271 | return ret; | 244 | return ret; |
272 | } | 245 | } |
273 | 246 | ||
@@ -278,16 +251,12 @@ void fastcall __up_read(struct rw_semaphore *sem) | |||
278 | { | 251 | { |
279 | unsigned long flags; | 252 | unsigned long flags; |
280 | 253 | ||
281 | rwsemtrace(sem, "Entering __up_read"); | ||
282 | |||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | 254 | spin_lock_irqsave(&sem->wait_lock, flags); |
284 | 255 | ||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 256 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
286 | sem = __rwsem_wake_one_writer(sem); | 257 | sem = __rwsem_wake_one_writer(sem); |
287 | 258 | ||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 259 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
289 | |||
290 | rwsemtrace(sem, "Leaving __up_read"); | ||
291 | } | 260 | } |
292 | 261 | ||
293 | /* | 262 | /* |
@@ -297,8 +266,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
297 | { | 266 | { |
298 | unsigned long flags; | 267 | unsigned long flags; |
299 | 268 | ||
300 | rwsemtrace(sem, "Entering __up_write"); | ||
301 | |||
302 | spin_lock_irqsave(&sem->wait_lock, flags); | 269 | spin_lock_irqsave(&sem->wait_lock, flags); |
303 | 270 | ||
304 | sem->activity = 0; | 271 | sem->activity = 0; |
@@ -306,8 +273,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
306 | sem = __rwsem_do_wake(sem, 1); | 273 | sem = __rwsem_do_wake(sem, 1); |
307 | 274 | ||
308 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 275 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
309 | |||
310 | rwsemtrace(sem, "Leaving __up_write"); | ||
311 | } | 276 | } |
312 | 277 | ||
313 | /* | 278 | /* |
@@ -318,8 +283,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
318 | { | 283 | { |
319 | unsigned long flags; | 284 | unsigned long flags; |
320 | 285 | ||
321 | rwsemtrace(sem, "Entering __downgrade_write"); | ||
322 | |||
323 | spin_lock_irqsave(&sem->wait_lock, flags); | 286 | spin_lock_irqsave(&sem->wait_lock, flags); |
324 | 287 | ||
325 | sem->activity = 1; | 288 | sem->activity = 1; |
@@ -327,8 +290,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
327 | sem = __rwsem_do_wake(sem, 0); | 290 | sem = __rwsem_do_wake(sem, 0); |
328 | 291 | ||
329 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 292 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
330 | |||
331 | rwsemtrace(sem, "Leaving __downgrade_write"); | ||
332 | } | 293 | } |
333 | 294 | ||
334 | EXPORT_SYMBOL(init_rwsem); | 295 | EXPORT_SYMBOL(init_rwsem); |
@@ -339,6 +300,3 @@ EXPORT_SYMBOL(__down_write_trylock); | |||
339 | EXPORT_SYMBOL(__up_read); | 300 | EXPORT_SYMBOL(__up_read); |
340 | EXPORT_SYMBOL(__up_write); | 301 | EXPORT_SYMBOL(__up_write); |
341 | EXPORT_SYMBOL(__downgrade_write); | 302 | EXPORT_SYMBOL(__downgrade_write); |
342 | #if RWSEM_DEBUG | ||
343 | EXPORT_SYMBOL(rwsemtrace); | ||
344 | #endif | ||