aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:29 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:01 -0400
commitc4e05116a2c4d8187127dbf77ab790aa57a47388 (patch)
tree58f07aed47ee4d30e9588d5a820c245faa163b09 /lib
parent8b3db9c542e18b71d4820da4dd9401ee030feacb (diff)
[PATCH] lockdep: clean up rwsems
Clean up rwsems. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/rwsem-spinlock.c46
-rw-r--r--lib/rwsem.c31
2 files changed, 2 insertions, 75 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 40ffde940a86..03b6097eb04e 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,16 +17,6 @@ struct rwsem_waiter {
17#define RWSEM_WAITING_FOR_WRITE 0x00000002 17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 18};
19 19
20#if RWSEM_DEBUG
21void rwsemtrace(struct rw_semaphore *sem, const char *str)
22{
23 if (sem->debug)
24 printk("[%d] %s({%d,%d})\n",
25 current->pid, str, sem->activity,
26 list_empty(&sem->wait_list) ? 0 : 1);
27}
28#endif
29
30/* 20/*
31 * initialise the semaphore 21 * initialise the semaphore
32 */ 22 */
@@ -35,9 +25,6 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
35 sem->activity = 0; 25 sem->activity = 0;
36 spin_lock_init(&sem->wait_lock); 26 spin_lock_init(&sem->wait_lock);
37 INIT_LIST_HEAD(&sem->wait_list); 27 INIT_LIST_HEAD(&sem->wait_list);
38#if RWSEM_DEBUG
39 sem->debug = 0;
40#endif
41} 28}
42 29
43/* 30/*
@@ -56,8 +43,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
56 struct task_struct *tsk; 43 struct task_struct *tsk;
57 int woken; 44 int woken;
58 45
59 rwsemtrace(sem, "Entering __rwsem_do_wake");
60
61 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 46 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
62 47
63 if (!wakewrite) { 48 if (!wakewrite) {
@@ -104,7 +89,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
104 sem->activity += woken; 89 sem->activity += woken;
105 90
106 out: 91 out:
107 rwsemtrace(sem, "Leaving __rwsem_do_wake");
108 return sem; 92 return sem;
109} 93}
110 94
@@ -138,8 +122,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
138 struct rwsem_waiter waiter; 122 struct rwsem_waiter waiter;
139 struct task_struct *tsk; 123 struct task_struct *tsk;
140 124
141 rwsemtrace(sem, "Entering __down_read");
142
143 spin_lock_irq(&sem->wait_lock); 125 spin_lock_irq(&sem->wait_lock);
144 126
145 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 127 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
@@ -171,9 +153,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
171 } 153 }
172 154
173 tsk->state = TASK_RUNNING; 155 tsk->state = TASK_RUNNING;
174
175 out: 156 out:
176 rwsemtrace(sem, "Leaving __down_read"); 157 ;
177} 158}
178 159
179/* 160/*
@@ -184,7 +165,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
184 unsigned long flags; 165 unsigned long flags;
185 int ret = 0; 166 int ret = 0;
186 167
187 rwsemtrace(sem, "Entering __down_read_trylock");
188 168
189 spin_lock_irqsave(&sem->wait_lock, flags); 169 spin_lock_irqsave(&sem->wait_lock, flags);
190 170
@@ -196,7 +176,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
196 176
197 spin_unlock_irqrestore(&sem->wait_lock, flags); 177 spin_unlock_irqrestore(&sem->wait_lock, flags);
198 178
199 rwsemtrace(sem, "Leaving __down_read_trylock");
200 return ret; 179 return ret;
201} 180}
202 181
@@ -209,8 +188,6 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
209 struct rwsem_waiter waiter; 188 struct rwsem_waiter waiter;
210 struct task_struct *tsk; 189 struct task_struct *tsk;
211 190
212 rwsemtrace(sem, "Entering __down_write");
213
214 spin_lock_irq(&sem->wait_lock); 191 spin_lock_irq(&sem->wait_lock);
215 192
216 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 193 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -242,9 +219,8 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
242 } 219 }
243 220
244 tsk->state = TASK_RUNNING; 221 tsk->state = TASK_RUNNING;
245
246 out: 222 out:
247 rwsemtrace(sem, "Leaving __down_write"); 223 ;
248} 224}
249 225
250/* 226/*
@@ -255,8 +231,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
255 unsigned long flags; 231 unsigned long flags;
256 int ret = 0; 232 int ret = 0;
257 233
258 rwsemtrace(sem, "Entering __down_write_trylock");
259
260 spin_lock_irqsave(&sem->wait_lock, flags); 234 spin_lock_irqsave(&sem->wait_lock, flags);
261 235
262 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 236 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -267,7 +241,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
267 241
268 spin_unlock_irqrestore(&sem->wait_lock, flags); 242 spin_unlock_irqrestore(&sem->wait_lock, flags);
269 243
270 rwsemtrace(sem, "Leaving __down_write_trylock");
271 return ret; 244 return ret;
272} 245}
273 246
@@ -278,16 +251,12 @@ void fastcall __up_read(struct rw_semaphore *sem)
278{ 251{
279 unsigned long flags; 252 unsigned long flags;
280 253
281 rwsemtrace(sem, "Entering __up_read");
282
283 spin_lock_irqsave(&sem->wait_lock, flags); 254 spin_lock_irqsave(&sem->wait_lock, flags);
284 255
285 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 256 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
286 sem = __rwsem_wake_one_writer(sem); 257 sem = __rwsem_wake_one_writer(sem);
287 258
288 spin_unlock_irqrestore(&sem->wait_lock, flags); 259 spin_unlock_irqrestore(&sem->wait_lock, flags);
289
290 rwsemtrace(sem, "Leaving __up_read");
291} 260}
292 261
293/* 262/*
@@ -297,8 +266,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
297{ 266{
298 unsigned long flags; 267 unsigned long flags;
299 268
300 rwsemtrace(sem, "Entering __up_write");
301
302 spin_lock_irqsave(&sem->wait_lock, flags); 269 spin_lock_irqsave(&sem->wait_lock, flags);
303 270
304 sem->activity = 0; 271 sem->activity = 0;
@@ -306,8 +273,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
306 sem = __rwsem_do_wake(sem, 1); 273 sem = __rwsem_do_wake(sem, 1);
307 274
308 spin_unlock_irqrestore(&sem->wait_lock, flags); 275 spin_unlock_irqrestore(&sem->wait_lock, flags);
309
310 rwsemtrace(sem, "Leaving __up_write");
311} 276}
312 277
313/* 278/*
@@ -318,8 +283,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
318{ 283{
319 unsigned long flags; 284 unsigned long flags;
320 285
321 rwsemtrace(sem, "Entering __downgrade_write");
322
323 spin_lock_irqsave(&sem->wait_lock, flags); 286 spin_lock_irqsave(&sem->wait_lock, flags);
324 287
325 sem->activity = 1; 288 sem->activity = 1;
@@ -327,8 +290,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
327 sem = __rwsem_do_wake(sem, 0); 290 sem = __rwsem_do_wake(sem, 0);
328 291
329 spin_unlock_irqrestore(&sem->wait_lock, flags); 292 spin_unlock_irqrestore(&sem->wait_lock, flags);
330
331 rwsemtrace(sem, "Leaving __downgrade_write");
332} 293}
333 294
334EXPORT_SYMBOL(init_rwsem); 295EXPORT_SYMBOL(init_rwsem);
@@ -339,6 +300,3 @@ EXPORT_SYMBOL(__down_write_trylock);
339EXPORT_SYMBOL(__up_read); 300EXPORT_SYMBOL(__up_read);
340EXPORT_SYMBOL(__up_write); 301EXPORT_SYMBOL(__up_write);
341EXPORT_SYMBOL(__downgrade_write); 302EXPORT_SYMBOL(__downgrade_write);
342#if RWSEM_DEBUG
343EXPORT_SYMBOL(rwsemtrace);
344#endif
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 62fa4eba9ffe..bae597284889 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -16,17 +16,6 @@ struct rwsem_waiter {
16#define RWSEM_WAITING_FOR_WRITE 0x00000002 16#define RWSEM_WAITING_FOR_WRITE 0x00000002
17}; 17};
18 18
19#if RWSEM_DEBUG
20#undef rwsemtrace
21void rwsemtrace(struct rw_semaphore *sem, const char *str)
22{
23 printk("sem=%p\n", sem);
24 printk("(sem)=%08lx\n", sem->count);
25 if (sem->debug)
26 printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
27}
28#endif
29
30/* 19/*
31 * handle the lock release when processes blocked on it that can now run 20 * handle the lock release when processes blocked on it that can now run
32 * - if we come here from up_xxxx(), then: 21 * - if we come here from up_xxxx(), then:
@@ -45,8 +34,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
45 struct list_head *next; 34 struct list_head *next;
46 signed long oldcount, woken, loop; 35 signed long oldcount, woken, loop;
47 36
48 rwsemtrace(sem, "Entering __rwsem_do_wake");
49
50 if (downgrading) 37 if (downgrading)
51 goto dont_wake_writers; 38 goto dont_wake_writers;
52 39
@@ -127,7 +114,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
127 next->prev = &sem->wait_list; 114 next->prev = &sem->wait_list;
128 115
129 out: 116 out:
130 rwsemtrace(sem, "Leaving __rwsem_do_wake");
131 return sem; 117 return sem;
132 118
133 /* undo the change to count, but check for a transition 1->0 */ 119 /* undo the change to count, but check for a transition 1->0 */
@@ -186,13 +172,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
186{ 172{
187 struct rwsem_waiter waiter; 173 struct rwsem_waiter waiter;
188 174
189 rwsemtrace(sem, "Entering rwsem_down_read_failed");
190
191 waiter.flags = RWSEM_WAITING_FOR_READ; 175 waiter.flags = RWSEM_WAITING_FOR_READ;
192 rwsem_down_failed_common(sem, &waiter, 176 rwsem_down_failed_common(sem, &waiter,
193 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); 177 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
194
195 rwsemtrace(sem, "Leaving rwsem_down_read_failed");
196 return sem; 178 return sem;
197} 179}
198 180
@@ -204,12 +186,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
204{ 186{
205 struct rwsem_waiter waiter; 187 struct rwsem_waiter waiter;
206 188
207 rwsemtrace(sem, "Entering rwsem_down_write_failed");
208
209 waiter.flags = RWSEM_WAITING_FOR_WRITE; 189 waiter.flags = RWSEM_WAITING_FOR_WRITE;
210 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); 190 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
211 191
212 rwsemtrace(sem, "Leaving rwsem_down_write_failed");
213 return sem; 192 return sem;
214} 193}
215 194
@@ -221,8 +200,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
221{ 200{
222 unsigned long flags; 201 unsigned long flags;
223 202
224 rwsemtrace(sem, "Entering rwsem_wake");
225
226 spin_lock_irqsave(&sem->wait_lock, flags); 203 spin_lock_irqsave(&sem->wait_lock, flags);
227 204
228 /* do nothing if list empty */ 205 /* do nothing if list empty */
@@ -231,8 +208,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
231 208
232 spin_unlock_irqrestore(&sem->wait_lock, flags); 209 spin_unlock_irqrestore(&sem->wait_lock, flags);
233 210
234 rwsemtrace(sem, "Leaving rwsem_wake");
235
236 return sem; 211 return sem;
237} 212}
238 213
@@ -245,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
245{ 220{
246 unsigned long flags; 221 unsigned long flags;
247 222
248 rwsemtrace(sem, "Entering rwsem_downgrade_wake");
249
250 spin_lock_irqsave(&sem->wait_lock, flags); 223 spin_lock_irqsave(&sem->wait_lock, flags);
251 224
252 /* do nothing if list empty */ 225 /* do nothing if list empty */
@@ -255,7 +228,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
255 228
256 spin_unlock_irqrestore(&sem->wait_lock, flags); 229 spin_unlock_irqrestore(&sem->wait_lock, flags);
257 230
258 rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
259 return sem; 231 return sem;
260} 232}
261 233
@@ -263,6 +235,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed);
263EXPORT_SYMBOL(rwsem_down_write_failed); 235EXPORT_SYMBOL(rwsem_down_write_failed);
264EXPORT_SYMBOL(rwsem_wake); 236EXPORT_SYMBOL(rwsem_wake);
265EXPORT_SYMBOL(rwsem_downgrade_wake); 237EXPORT_SYMBOL(rwsem_downgrade_wake);
266#if RWSEM_DEBUG
267EXPORT_SYMBOL(rwsemtrace);
268#endif