diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:24:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:01 -0400 |
commit | c4e05116a2c4d8187127dbf77ab790aa57a47388 (patch) | |
tree | 58f07aed47ee4d30e9588d5a820c245faa163b09 | |
parent | 8b3db9c542e18b71d4820da4dd9401ee030feacb (diff) |
[PATCH] lockdep: clean up rwsems
Clean up rwsems.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/asm-i386/rwsem.h | 17 | ||||
-rw-r--r-- | include/linux/rwsem-spinlock.h | 14 | ||||
-rw-r--r-- | include/linux/rwsem.h | 24 | ||||
-rw-r--r-- | kernel/rwsem.c | 105 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 46 | ||||
-rw-r--r-- | lib/rwsem.c | 31 |
6 files changed, 109 insertions, 128 deletions
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index be4ab859238e..558804e4a039 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h | |||
@@ -61,23 +61,11 @@ struct rw_semaphore { | |||
61 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 61 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
62 | spinlock_t wait_lock; | 62 | spinlock_t wait_lock; |
63 | struct list_head wait_list; | 63 | struct list_head wait_list; |
64 | #if RWSEM_DEBUG | ||
65 | int debug; | ||
66 | #endif | ||
67 | }; | 64 | }; |
68 | 65 | ||
69 | /* | ||
70 | * initialisation | ||
71 | */ | ||
72 | #if RWSEM_DEBUG | ||
73 | #define __RWSEM_DEBUG_INIT , 0 | ||
74 | #else | ||
75 | #define __RWSEM_DEBUG_INIT /* */ | ||
76 | #endif | ||
77 | |||
78 | #define __RWSEM_INITIALIZER(name) \ | 66 | #define __RWSEM_INITIALIZER(name) \ |
79 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ | 67 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ |
80 | __RWSEM_DEBUG_INIT } | 68 | } |
81 | 69 | ||
82 | #define DECLARE_RWSEM(name) \ | 70 | #define DECLARE_RWSEM(name) \ |
83 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 71 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
@@ -87,9 +75,6 @@ static inline void init_rwsem(struct rw_semaphore *sem) | |||
87 | sem->count = RWSEM_UNLOCKED_VALUE; | 75 | sem->count = RWSEM_UNLOCKED_VALUE; |
88 | spin_lock_init(&sem->wait_lock); | 76 | spin_lock_init(&sem->wait_lock); |
89 | INIT_LIST_HEAD(&sem->wait_list); | 77 | INIT_LIST_HEAD(&sem->wait_list); |
90 | #if RWSEM_DEBUG | ||
91 | sem->debug = 0; | ||
92 | #endif | ||
93 | } | 78 | } |
94 | 79 | ||
95 | /* | 80 | /* |
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index f30f805080ae..d68afcc36ac9 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h | |||
@@ -32,22 +32,10 @@ struct rw_semaphore { | |||
32 | __s32 activity; | 32 | __s32 activity; |
33 | spinlock_t wait_lock; | 33 | spinlock_t wait_lock; |
34 | struct list_head wait_list; | 34 | struct list_head wait_list; |
35 | #if RWSEM_DEBUG | ||
36 | int debug; | ||
37 | #endif | ||
38 | }; | 35 | }; |
39 | 36 | ||
40 | /* | ||
41 | * initialisation | ||
42 | */ | ||
43 | #if RWSEM_DEBUG | ||
44 | #define __RWSEM_DEBUG_INIT , 0 | ||
45 | #else | ||
46 | #define __RWSEM_DEBUG_INIT /* */ | ||
47 | #endif | ||
48 | |||
49 | #define __RWSEM_INITIALIZER(name) \ | 37 | #define __RWSEM_INITIALIZER(name) \ |
50 | { 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } | 38 | { 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } |
51 | 39 | ||
52 | #define DECLARE_RWSEM(name) \ | 40 | #define DECLARE_RWSEM(name) \ |
53 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 41 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f99fe90732ab..93581534b915 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
@@ -9,8 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | 11 | ||
12 | #define RWSEM_DEBUG 0 | ||
13 | |||
14 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
15 | 13 | ||
16 | #include <linux/types.h> | 14 | #include <linux/types.h> |
@@ -26,23 +24,13 @@ struct rw_semaphore; | |||
26 | #include <asm/rwsem.h> /* use an arch-specific implementation */ | 24 | #include <asm/rwsem.h> /* use an arch-specific implementation */ |
27 | #endif | 25 | #endif |
28 | 26 | ||
29 | #ifndef rwsemtrace | ||
30 | #if RWSEM_DEBUG | ||
31 | extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str)); | ||
32 | #else | ||
33 | #define rwsemtrace(SEM,FMT) | ||
34 | #endif | ||
35 | #endif | ||
36 | |||
37 | /* | 27 | /* |
38 | * lock for reading | 28 | * lock for reading |
39 | */ | 29 | */ |
40 | static inline void down_read(struct rw_semaphore *sem) | 30 | static inline void down_read(struct rw_semaphore *sem) |
41 | { | 31 | { |
42 | might_sleep(); | 32 | might_sleep(); |
43 | rwsemtrace(sem,"Entering down_read"); | ||
44 | __down_read(sem); | 33 | __down_read(sem); |
45 | rwsemtrace(sem,"Leaving down_read"); | ||
46 | } | 34 | } |
47 | 35 | ||
48 | /* | 36 | /* |
@@ -51,9 +39,7 @@ static inline void down_read(struct rw_semaphore *sem) | |||
51 | static inline int down_read_trylock(struct rw_semaphore *sem) | 39 | static inline int down_read_trylock(struct rw_semaphore *sem) |
52 | { | 40 | { |
53 | int ret; | 41 | int ret; |
54 | rwsemtrace(sem,"Entering down_read_trylock"); | ||
55 | ret = __down_read_trylock(sem); | 42 | ret = __down_read_trylock(sem); |
56 | rwsemtrace(sem,"Leaving down_read_trylock"); | ||
57 | return ret; | 43 | return ret; |
58 | } | 44 | } |
59 | 45 | ||
@@ -63,9 +49,7 @@ static inline int down_read_trylock(struct rw_semaphore *sem) | |||
63 | static inline void down_write(struct rw_semaphore *sem) | 49 | static inline void down_write(struct rw_semaphore *sem) |
64 | { | 50 | { |
65 | might_sleep(); | 51 | might_sleep(); |
66 | rwsemtrace(sem,"Entering down_write"); | ||
67 | __down_write(sem); | 52 | __down_write(sem); |
68 | rwsemtrace(sem,"Leaving down_write"); | ||
69 | } | 53 | } |
70 | 54 | ||
71 | /* | 55 | /* |
@@ -74,9 +58,7 @@ static inline void down_write(struct rw_semaphore *sem) | |||
74 | static inline int down_write_trylock(struct rw_semaphore *sem) | 58 | static inline int down_write_trylock(struct rw_semaphore *sem) |
75 | { | 59 | { |
76 | int ret; | 60 | int ret; |
77 | rwsemtrace(sem,"Entering down_write_trylock"); | ||
78 | ret = __down_write_trylock(sem); | 61 | ret = __down_write_trylock(sem); |
79 | rwsemtrace(sem,"Leaving down_write_trylock"); | ||
80 | return ret; | 62 | return ret; |
81 | } | 63 | } |
82 | 64 | ||
@@ -85,9 +67,7 @@ static inline int down_write_trylock(struct rw_semaphore *sem) | |||
85 | */ | 67 | */ |
86 | static inline void up_read(struct rw_semaphore *sem) | 68 | static inline void up_read(struct rw_semaphore *sem) |
87 | { | 69 | { |
88 | rwsemtrace(sem,"Entering up_read"); | ||
89 | __up_read(sem); | 70 | __up_read(sem); |
90 | rwsemtrace(sem,"Leaving up_read"); | ||
91 | } | 71 | } |
92 | 72 | ||
93 | /* | 73 | /* |
@@ -95,9 +75,7 @@ static inline void up_read(struct rw_semaphore *sem) | |||
95 | */ | 75 | */ |
96 | static inline void up_write(struct rw_semaphore *sem) | 76 | static inline void up_write(struct rw_semaphore *sem) |
97 | { | 77 | { |
98 | rwsemtrace(sem,"Entering up_write"); | ||
99 | __up_write(sem); | 78 | __up_write(sem); |
100 | rwsemtrace(sem,"Leaving up_write"); | ||
101 | } | 79 | } |
102 | 80 | ||
103 | /* | 81 | /* |
@@ -105,9 +83,7 @@ static inline void up_write(struct rw_semaphore *sem) | |||
105 | */ | 83 | */ |
106 | static inline void downgrade_write(struct rw_semaphore *sem) | 84 | static inline void downgrade_write(struct rw_semaphore *sem) |
107 | { | 85 | { |
108 | rwsemtrace(sem,"Entering downgrade_write"); | ||
109 | __downgrade_write(sem); | 86 | __downgrade_write(sem); |
110 | rwsemtrace(sem,"Leaving downgrade_write"); | ||
111 | } | 87 | } |
112 | 88 | ||
113 | #endif /* __KERNEL__ */ | 89 | #endif /* __KERNEL__ */ |
diff --git a/kernel/rwsem.c b/kernel/rwsem.c new file mode 100644 index 000000000000..790a99bb25aa --- /dev/null +++ b/kernel/rwsem.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* kernel/rwsem.c: R/W semaphores, public implementation | ||
2 | * | ||
3 | * Written by David Howells (dhowells@redhat.com). | ||
4 | * Derived from asm-i386/semaphore.h | ||
5 | */ | ||
6 | |||
7 | #include <linux/types.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | #include <asm/system.h> | ||
13 | #include <asm/atomic.h> | ||
14 | |||
15 | /* | ||
16 | * lock for reading | ||
17 | */ | ||
18 | void down_read(struct rw_semaphore *sem) | ||
19 | { | ||
20 | might_sleep(); | ||
21 | rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); | ||
22 | |||
23 | __down_read(sem); | ||
24 | } | ||
25 | |||
26 | EXPORT_SYMBOL(down_read); | ||
27 | |||
28 | /* | ||
29 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
30 | */ | ||
31 | int down_read_trylock(struct rw_semaphore *sem) | ||
32 | { | ||
33 | int ret = __down_read_trylock(sem); | ||
34 | |||
35 | if (ret == 1) | ||
36 | rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); | ||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | EXPORT_SYMBOL(down_read_trylock); | ||
41 | |||
42 | /* | ||
43 | * lock for writing | ||
44 | */ | ||
45 | void down_write(struct rw_semaphore *sem) | ||
46 | { | ||
47 | might_sleep(); | ||
48 | rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); | ||
49 | |||
50 | __down_write(sem); | ||
51 | } | ||
52 | |||
53 | EXPORT_SYMBOL(down_write); | ||
54 | |||
55 | /* | ||
56 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
57 | */ | ||
58 | int down_write_trylock(struct rw_semaphore *sem) | ||
59 | { | ||
60 | int ret = __down_write_trylock(sem); | ||
61 | |||
62 | if (ret == 1) | ||
63 | rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | EXPORT_SYMBOL(down_write_trylock); | ||
68 | |||
69 | /* | ||
70 | * release a read lock | ||
71 | */ | ||
72 | void up_read(struct rw_semaphore *sem) | ||
73 | { | ||
74 | rwsem_release(&sem->dep_map, 1, _RET_IP_); | ||
75 | |||
76 | __up_read(sem); | ||
77 | } | ||
78 | |||
79 | EXPORT_SYMBOL(up_read); | ||
80 | |||
81 | /* | ||
82 | * release a write lock | ||
83 | */ | ||
84 | void up_write(struct rw_semaphore *sem) | ||
85 | { | ||
86 | rwsem_release(&sem->dep_map, 1, _RET_IP_); | ||
87 | |||
88 | __up_write(sem); | ||
89 | } | ||
90 | |||
91 | EXPORT_SYMBOL(up_write); | ||
92 | |||
93 | /* | ||
94 | * downgrade write lock to read lock | ||
95 | */ | ||
96 | void downgrade_write(struct rw_semaphore *sem) | ||
97 | { | ||
98 | /* | ||
99 | * lockdep: a downgraded write will live on as a write | ||
100 | * dependency. | ||
101 | */ | ||
102 | __downgrade_write(sem); | ||
103 | } | ||
104 | |||
105 | EXPORT_SYMBOL(downgrade_write); | ||
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..03b6097eb04e 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -17,16 +17,6 @@ struct rwsem_waiter { | |||
17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
18 | }; | 18 | }; |
19 | 19 | ||
20 | #if RWSEM_DEBUG | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk("[%d] %s({%d,%d})\n", | ||
25 | current->pid, str, sem->activity, | ||
26 | list_empty(&sem->wait_list) ? 0 : 1); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | 20 | /* |
31 | * initialise the semaphore | 21 | * initialise the semaphore |
32 | */ | 22 | */ |
@@ -35,9 +25,6 @@ void fastcall init_rwsem(struct rw_semaphore *sem) | |||
35 | sem->activity = 0; | 25 | sem->activity = 0; |
36 | spin_lock_init(&sem->wait_lock); | 26 | spin_lock_init(&sem->wait_lock); |
37 | INIT_LIST_HEAD(&sem->wait_list); | 27 | INIT_LIST_HEAD(&sem->wait_list); |
38 | #if RWSEM_DEBUG | ||
39 | sem->debug = 0; | ||
40 | #endif | ||
41 | } | 28 | } |
42 | 29 | ||
43 | /* | 30 | /* |
@@ -56,8 +43,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
56 | struct task_struct *tsk; | 43 | struct task_struct *tsk; |
57 | int woken; | 44 | int woken; |
58 | 45 | ||
59 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
60 | |||
61 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 46 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
62 | 47 | ||
63 | if (!wakewrite) { | 48 | if (!wakewrite) { |
@@ -104,7 +89,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
104 | sem->activity += woken; | 89 | sem->activity += woken; |
105 | 90 | ||
106 | out: | 91 | out: |
107 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
108 | return sem; | 92 | return sem; |
109 | } | 93 | } |
110 | 94 | ||
@@ -138,8 +122,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
138 | struct rwsem_waiter waiter; | 122 | struct rwsem_waiter waiter; |
139 | struct task_struct *tsk; | 123 | struct task_struct *tsk; |
140 | 124 | ||
141 | rwsemtrace(sem, "Entering __down_read"); | ||
142 | |||
143 | spin_lock_irq(&sem->wait_lock); | 125 | spin_lock_irq(&sem->wait_lock); |
144 | 126 | ||
145 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 127 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
@@ -171,9 +153,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
171 | } | 153 | } |
172 | 154 | ||
173 | tsk->state = TASK_RUNNING; | 155 | tsk->state = TASK_RUNNING; |
174 | |||
175 | out: | 156 | out: |
176 | rwsemtrace(sem, "Leaving __down_read"); | 157 | ; |
177 | } | 158 | } |
178 | 159 | ||
179 | /* | 160 | /* |
@@ -184,7 +165,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
184 | unsigned long flags; | 165 | unsigned long flags; |
185 | int ret = 0; | 166 | int ret = 0; |
186 | 167 | ||
187 | rwsemtrace(sem, "Entering __down_read_trylock"); | ||
188 | 168 | ||
189 | spin_lock_irqsave(&sem->wait_lock, flags); | 169 | spin_lock_irqsave(&sem->wait_lock, flags); |
190 | 170 | ||
@@ -196,7 +176,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
196 | 176 | ||
197 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 177 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
198 | 178 | ||
199 | rwsemtrace(sem, "Leaving __down_read_trylock"); | ||
200 | return ret; | 179 | return ret; |
201 | } | 180 | } |
202 | 181 | ||
@@ -209,8 +188,6 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
209 | struct rwsem_waiter waiter; | 188 | struct rwsem_waiter waiter; |
210 | struct task_struct *tsk; | 189 | struct task_struct *tsk; |
211 | 190 | ||
212 | rwsemtrace(sem, "Entering __down_write"); | ||
213 | |||
214 | spin_lock_irq(&sem->wait_lock); | 191 | spin_lock_irq(&sem->wait_lock); |
215 | 192 | ||
216 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 193 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -242,9 +219,8 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
242 | } | 219 | } |
243 | 220 | ||
244 | tsk->state = TASK_RUNNING; | 221 | tsk->state = TASK_RUNNING; |
245 | |||
246 | out: | 222 | out: |
247 | rwsemtrace(sem, "Leaving __down_write"); | 223 | ; |
248 | } | 224 | } |
249 | 225 | ||
250 | /* | 226 | /* |
@@ -255,8 +231,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
255 | unsigned long flags; | 231 | unsigned long flags; |
256 | int ret = 0; | 232 | int ret = 0; |
257 | 233 | ||
258 | rwsemtrace(sem, "Entering __down_write_trylock"); | ||
259 | |||
260 | spin_lock_irqsave(&sem->wait_lock, flags); | 234 | spin_lock_irqsave(&sem->wait_lock, flags); |
261 | 235 | ||
262 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 236 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -267,7 +241,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
267 | 241 | ||
268 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 242 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
269 | 243 | ||
270 | rwsemtrace(sem, "Leaving __down_write_trylock"); | ||
271 | return ret; | 244 | return ret; |
272 | } | 245 | } |
273 | 246 | ||
@@ -278,16 +251,12 @@ void fastcall __up_read(struct rw_semaphore *sem) | |||
278 | { | 251 | { |
279 | unsigned long flags; | 252 | unsigned long flags; |
280 | 253 | ||
281 | rwsemtrace(sem, "Entering __up_read"); | ||
282 | |||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | 254 | spin_lock_irqsave(&sem->wait_lock, flags); |
284 | 255 | ||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 256 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
286 | sem = __rwsem_wake_one_writer(sem); | 257 | sem = __rwsem_wake_one_writer(sem); |
287 | 258 | ||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 259 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
289 | |||
290 | rwsemtrace(sem, "Leaving __up_read"); | ||
291 | } | 260 | } |
292 | 261 | ||
293 | /* | 262 | /* |
@@ -297,8 +266,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
297 | { | 266 | { |
298 | unsigned long flags; | 267 | unsigned long flags; |
299 | 268 | ||
300 | rwsemtrace(sem, "Entering __up_write"); | ||
301 | |||
302 | spin_lock_irqsave(&sem->wait_lock, flags); | 269 | spin_lock_irqsave(&sem->wait_lock, flags); |
303 | 270 | ||
304 | sem->activity = 0; | 271 | sem->activity = 0; |
@@ -306,8 +273,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
306 | sem = __rwsem_do_wake(sem, 1); | 273 | sem = __rwsem_do_wake(sem, 1); |
307 | 274 | ||
308 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 275 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
309 | |||
310 | rwsemtrace(sem, "Leaving __up_write"); | ||
311 | } | 276 | } |
312 | 277 | ||
313 | /* | 278 | /* |
@@ -318,8 +283,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
318 | { | 283 | { |
319 | unsigned long flags; | 284 | unsigned long flags; |
320 | 285 | ||
321 | rwsemtrace(sem, "Entering __downgrade_write"); | ||
322 | |||
323 | spin_lock_irqsave(&sem->wait_lock, flags); | 286 | spin_lock_irqsave(&sem->wait_lock, flags); |
324 | 287 | ||
325 | sem->activity = 1; | 288 | sem->activity = 1; |
@@ -327,8 +290,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
327 | sem = __rwsem_do_wake(sem, 0); | 290 | sem = __rwsem_do_wake(sem, 0); |
328 | 291 | ||
329 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 292 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
330 | |||
331 | rwsemtrace(sem, "Leaving __downgrade_write"); | ||
332 | } | 293 | } |
333 | 294 | ||
334 | EXPORT_SYMBOL(init_rwsem); | 295 | EXPORT_SYMBOL(init_rwsem); |
@@ -339,6 +300,3 @@ EXPORT_SYMBOL(__down_write_trylock); | |||
339 | EXPORT_SYMBOL(__up_read); | 300 | EXPORT_SYMBOL(__up_read); |
340 | EXPORT_SYMBOL(__up_write); | 301 | EXPORT_SYMBOL(__up_write); |
341 | EXPORT_SYMBOL(__downgrade_write); | 302 | EXPORT_SYMBOL(__downgrade_write); |
342 | #if RWSEM_DEBUG | ||
343 | EXPORT_SYMBOL(rwsemtrace); | ||
344 | #endif | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index 62fa4eba9ffe..bae597284889 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -16,17 +16,6 @@ struct rwsem_waiter { | |||
16 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 16 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
17 | }; | 17 | }; |
18 | 18 | ||
19 | #if RWSEM_DEBUG | ||
20 | #undef rwsemtrace | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | printk("sem=%p\n", sem); | ||
24 | printk("(sem)=%08lx\n", sem->count); | ||
25 | if (sem->debug) | ||
26 | printk("[%d] %s({%08lx})\n", current->pid, str, sem->count); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | 19 | /* |
31 | * handle the lock release when processes blocked on it that can now run | 20 | * handle the lock release when processes blocked on it that can now run |
32 | * - if we come here from up_xxxx(), then: | 21 | * - if we come here from up_xxxx(), then: |
@@ -45,8 +34,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
45 | struct list_head *next; | 34 | struct list_head *next; |
46 | signed long oldcount, woken, loop; | 35 | signed long oldcount, woken, loop; |
47 | 36 | ||
48 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
49 | |||
50 | if (downgrading) | 37 | if (downgrading) |
51 | goto dont_wake_writers; | 38 | goto dont_wake_writers; |
52 | 39 | ||
@@ -127,7 +114,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
127 | next->prev = &sem->wait_list; | 114 | next->prev = &sem->wait_list; |
128 | 115 | ||
129 | out: | 116 | out: |
130 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
131 | return sem; | 117 | return sem; |
132 | 118 | ||
133 | /* undo the change to count, but check for a transition 1->0 */ | 119 | /* undo the change to count, but check for a transition 1->0 */ |
@@ -186,13 +172,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem) | |||
186 | { | 172 | { |
187 | struct rwsem_waiter waiter; | 173 | struct rwsem_waiter waiter; |
188 | 174 | ||
189 | rwsemtrace(sem, "Entering rwsem_down_read_failed"); | ||
190 | |||
191 | waiter.flags = RWSEM_WAITING_FOR_READ; | 175 | waiter.flags = RWSEM_WAITING_FOR_READ; |
192 | rwsem_down_failed_common(sem, &waiter, | 176 | rwsem_down_failed_common(sem, &waiter, |
193 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); | 177 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); |
194 | |||
195 | rwsemtrace(sem, "Leaving rwsem_down_read_failed"); | ||
196 | return sem; | 178 | return sem; |
197 | } | 179 | } |
198 | 180 | ||
@@ -204,12 +186,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem) | |||
204 | { | 186 | { |
205 | struct rwsem_waiter waiter; | 187 | struct rwsem_waiter waiter; |
206 | 188 | ||
207 | rwsemtrace(sem, "Entering rwsem_down_write_failed"); | ||
208 | |||
209 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | 189 | waiter.flags = RWSEM_WAITING_FOR_WRITE; |
210 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); | 190 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); |
211 | 191 | ||
212 | rwsemtrace(sem, "Leaving rwsem_down_write_failed"); | ||
213 | return sem; | 192 | return sem; |
214 | } | 193 | } |
215 | 194 | ||
@@ -221,8 +200,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | |||
221 | { | 200 | { |
222 | unsigned long flags; | 201 | unsigned long flags; |
223 | 202 | ||
224 | rwsemtrace(sem, "Entering rwsem_wake"); | ||
225 | |||
226 | spin_lock_irqsave(&sem->wait_lock, flags); | 203 | spin_lock_irqsave(&sem->wait_lock, flags); |
227 | 204 | ||
228 | /* do nothing if list empty */ | 205 | /* do nothing if list empty */ |
@@ -231,8 +208,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | |||
231 | 208 | ||
232 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 209 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
233 | 210 | ||
234 | rwsemtrace(sem, "Leaving rwsem_wake"); | ||
235 | |||
236 | return sem; | 211 | return sem; |
237 | } | 212 | } |
238 | 213 | ||
@@ -245,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
245 | { | 220 | { |
246 | unsigned long flags; | 221 | unsigned long flags; |
247 | 222 | ||
248 | rwsemtrace(sem, "Entering rwsem_downgrade_wake"); | ||
249 | |||
250 | spin_lock_irqsave(&sem->wait_lock, flags); | 223 | spin_lock_irqsave(&sem->wait_lock, flags); |
251 | 224 | ||
252 | /* do nothing if list empty */ | 225 | /* do nothing if list empty */ |
@@ -255,7 +228,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
255 | 228 | ||
256 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 229 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
257 | 230 | ||
258 | rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); | ||
259 | return sem; | 231 | return sem; |
260 | } | 232 | } |
261 | 233 | ||
@@ -263,6 +235,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed); | |||
263 | EXPORT_SYMBOL(rwsem_down_write_failed); | 235 | EXPORT_SYMBOL(rwsem_down_write_failed); |
264 | EXPORT_SYMBOL(rwsem_wake); | 236 | EXPORT_SYMBOL(rwsem_wake); |
265 | EXPORT_SYMBOL(rwsem_downgrade_wake); | 237 | EXPORT_SYMBOL(rwsem_downgrade_wake); |
266 | #if RWSEM_DEBUG | ||
267 | EXPORT_SYMBOL(rwsemtrace); | ||
268 | #endif | ||