diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:13:03 -0400 |
commit | 5e66dd6d66ffe758b39b6dcadf2330753ee1159b (patch) | |
tree | a72cdcff4448e4af9425cc213ddf56ab23e697fe /lib | |
parent | 026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (diff) | |
parent | ca78f6baca863afe2e6a244a0fe94b3a70211d46 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 127 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/debug_locks.c | 45 | ||||
-rw-r--r-- | lib/kernel_lock.c | 7 | ||||
-rw-r--r-- | lib/locking-selftest-hardirq.h | 9 | ||||
-rw-r--r-- | lib/locking-selftest-mutex.h | 11 | ||||
-rw-r--r-- | lib/locking-selftest-rlock-hardirq.h | 2 | ||||
-rw-r--r-- | lib/locking-selftest-rlock-softirq.h | 2 | ||||
-rw-r--r-- | lib/locking-selftest-rlock.h | 14 | ||||
-rw-r--r-- | lib/locking-selftest-rsem.h | 14 | ||||
-rw-r--r-- | lib/locking-selftest-softirq.h | 9 | ||||
-rw-r--r-- | lib/locking-selftest-spin-hardirq.h | 2 | ||||
-rw-r--r-- | lib/locking-selftest-spin-softirq.h | 2 | ||||
-rw-r--r-- | lib/locking-selftest-spin.h | 11 | ||||
-rw-r--r-- | lib/locking-selftest-wlock-hardirq.h | 2 | ||||
-rw-r--r-- | lib/locking-selftest-wlock-softirq.h | 2 | ||||
-rw-r--r-- | lib/locking-selftest-wlock.h | 14 | ||||
-rw-r--r-- | lib/locking-selftest-wsem.h | 14 | ||||
-rw-r--r-- | lib/locking-selftest.c | 1216 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 66 | ||||
-rw-r--r-- | lib/rwsem.c | 51 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 98 |
22 files changed, 1592 insertions, 129 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4fcbd12cf6e..e5889b1a33ff 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -48,7 +48,7 @@ config DEBUG_KERNEL | |||
48 | config LOG_BUF_SHIFT | 48 | config LOG_BUF_SHIFT |
49 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL | 49 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL |
50 | range 12 21 | 50 | range 12 21 |
51 | default 17 if S390 | 51 | default 17 if S390 || LOCKDEP |
52 | default 16 if X86_NUMAQ || IA64 | 52 | default 16 if X86_NUMAQ || IA64 |
53 | default 15 if SMP | 53 | default 15 if SMP |
54 | default 14 | 54 | default 14 |
@@ -107,7 +107,7 @@ config DEBUG_SLAB_LEAK | |||
107 | 107 | ||
108 | config DEBUG_PREEMPT | 108 | config DEBUG_PREEMPT |
109 | bool "Debug preemptible kernel" | 109 | bool "Debug preemptible kernel" |
110 | depends on DEBUG_KERNEL && PREEMPT | 110 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT |
111 | default y | 111 | default y |
112 | help | 112 | help |
113 | If you say Y here then the kernel will use a debug variant of the | 113 | If you say Y here then the kernel will use a debug variant of the |
@@ -115,14 +115,6 @@ config DEBUG_PREEMPT | |||
115 | if kernel code uses it in a preemption-unsafe way. Also, the kernel | 115 | if kernel code uses it in a preemption-unsafe way. Also, the kernel |
116 | will detect preemption count underflows. | 116 | will detect preemption count underflows. |
117 | 117 | ||
118 | config DEBUG_MUTEXES | ||
119 | bool "Mutex debugging, deadlock detection" | ||
120 | default n | ||
121 | depends on DEBUG_KERNEL | ||
122 | help | ||
123 | This allows mutex semantics violations and mutex related deadlocks | ||
124 | (lockups) to be detected and reported automatically. | ||
125 | |||
126 | config DEBUG_RT_MUTEXES | 118 | config DEBUG_RT_MUTEXES |
127 | bool "RT Mutex debugging, deadlock detection" | 119 | bool "RT Mutex debugging, deadlock detection" |
128 | depends on DEBUG_KERNEL && RT_MUTEXES | 120 | depends on DEBUG_KERNEL && RT_MUTEXES |
@@ -142,7 +134,7 @@ config RT_MUTEX_TESTER | |||
142 | This option enables a rt-mutex tester. | 134 | This option enables a rt-mutex tester. |
143 | 135 | ||
144 | config DEBUG_SPINLOCK | 136 | config DEBUG_SPINLOCK |
145 | bool "Spinlock debugging" | 137 | bool "Spinlock and rw-lock debugging: basic checks" |
146 | depends on DEBUG_KERNEL | 138 | depends on DEBUG_KERNEL |
147 | help | 139 | help |
148 | Say Y here and build SMP to catch missing spinlock initialization | 140 | Say Y here and build SMP to catch missing spinlock initialization |
@@ -150,13 +142,122 @@ config DEBUG_SPINLOCK | |||
150 | best used in conjunction with the NMI watchdog so that spinlock | 142 | best used in conjunction with the NMI watchdog so that spinlock |
151 | deadlocks are also debuggable. | 143 | deadlocks are also debuggable. |
152 | 144 | ||
145 | config DEBUG_MUTEXES | ||
146 | bool "Mutex debugging: basic checks" | ||
147 | depends on DEBUG_KERNEL | ||
148 | help | ||
149 | This feature allows mutex semantics violations to be detected and | ||
150 | reported. | ||
151 | |||
152 | config DEBUG_RWSEMS | ||
153 | bool "RW-sem debugging: basic checks" | ||
154 | depends on DEBUG_KERNEL | ||
155 | help | ||
156 | This feature allows read-write semaphore semantics violations to | ||
157 | be detected and reported. | ||
158 | |||
159 | config DEBUG_LOCK_ALLOC | ||
160 | bool "Lock debugging: detect incorrect freeing of live locks" | ||
161 | depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | ||
162 | select DEBUG_SPINLOCK | ||
163 | select DEBUG_MUTEXES | ||
164 | select DEBUG_RWSEMS | ||
165 | select LOCKDEP | ||
166 | help | ||
167 | This feature will check whether any held lock (spinlock, rwlock, | ||
168 | mutex or rwsem) is incorrectly freed by the kernel, via any of the | ||
169 | memory-freeing routines (kfree(), kmem_cache_free(), free_pages(), | ||
170 | vfree(), etc.), whether a live lock is incorrectly reinitialized via | ||
171 | spin_lock_init()/mutex_init()/etc., or whether there is any lock | ||
172 | held during task exit. | ||
173 | |||
174 | config PROVE_LOCKING | ||
175 | bool "Lock debugging: prove locking correctness" | ||
176 | depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | ||
177 | select LOCKDEP | ||
178 | select DEBUG_SPINLOCK | ||
179 | select DEBUG_MUTEXES | ||
180 | select DEBUG_RWSEMS | ||
181 | select DEBUG_LOCK_ALLOC | ||
182 | default n | ||
183 | help | ||
184 | This feature enables the kernel to prove that all locking | ||
185 | that occurs in the kernel runtime is mathematically | ||
186 | correct: that under no circumstance could an arbitrary (and | ||
187 | not yet triggered) combination of observed locking | ||
188 | sequences (on an arbitrary number of CPUs, running an | ||
189 | arbitrary number of tasks and interrupt contexts) cause a | ||
190 | deadlock. | ||
191 | |||
192 | In short, this feature enables the kernel to report locking | ||
193 | related deadlocks before they actually occur. | ||
194 | |||
195 | The proof does not depend on how hard and complex a | ||
196 | deadlock scenario would be to trigger: how many | ||
197 | participant CPUs, tasks and irq-contexts would be needed | ||
198 | for it to trigger. The proof also does not depend on | ||
199 | timing: if a race and a resulting deadlock is possible | ||
200 | theoretically (no matter how unlikely the race scenario | ||
201 | is), it will be proven so and will immediately be | ||
202 | reported by the kernel (once the event is observed that | ||
203 | makes the deadlock theoretically possible). | ||
204 | |||
205 | If a deadlock is impossible (i.e. the locking rules, as | ||
206 | observed by the kernel, are mathematically correct), the | ||
207 | kernel reports nothing. | ||
208 | |||
209 | NOTE: this feature can also be enabled for rwlocks, mutexes | ||
210 | and rwsems - in which case all dependencies between these | ||
211 | different locking variants are observed and mapped too, and | ||
212 | the proof of observed correctness is also maintained for an | ||
213 | arbitrary combination of these separate locking variants. | ||
214 | |||
215 | For more details, see Documentation/lockdep-design.txt. | ||
216 | |||
217 | config LOCKDEP | ||
218 | bool | ||
219 | depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | ||
220 | select STACKTRACE | ||
221 | select FRAME_POINTER | ||
222 | select KALLSYMS | ||
223 | select KALLSYMS_ALL | ||
224 | |||
225 | config DEBUG_LOCKDEP | ||
226 | bool "Lock dependency engine debugging" | ||
227 | depends on LOCKDEP | ||
228 | help | ||
229 | If you say Y here, the lock dependency engine will do | ||
230 | additional runtime checks to debug itself, at the price | ||
231 | of more runtime overhead. | ||
232 | |||
233 | config TRACE_IRQFLAGS | ||
234 | bool | ||
235 | default y | ||
236 | depends on TRACE_IRQFLAGS_SUPPORT | ||
237 | depends on PROVE_LOCKING | ||
238 | |||
153 | config DEBUG_SPINLOCK_SLEEP | 239 | config DEBUG_SPINLOCK_SLEEP |
154 | bool "Sleep-inside-spinlock checking" | 240 | bool "Spinlock debugging: sleep-inside-spinlock checking" |
155 | depends on DEBUG_KERNEL | 241 | depends on DEBUG_KERNEL |
156 | help | 242 | help |
157 | If you say Y here, various routines which may sleep will become very | 243 | If you say Y here, various routines which may sleep will become very |
158 | noisy if they are called with a spinlock held. | 244 | noisy if they are called with a spinlock held. |
159 | 245 | ||
246 | config DEBUG_LOCKING_API_SELFTESTS | ||
247 | bool "Locking API boot-time self-tests" | ||
248 | depends on DEBUG_KERNEL | ||
249 | help | ||
250 | Say Y here if you want the kernel to run a short self-test during | ||
251 | bootup. The self-test checks whether common types of locking bugs | ||
252 | are detected by debugging mechanisms or not. (if you disable | ||
253 | lock debugging then those bugs wont be detected of course.) | ||
254 | The following locking APIs are covered: spinlocks, rwlocks, | ||
255 | mutexes and rwsems. | ||
256 | |||
257 | config STACKTRACE | ||
258 | bool | ||
259 | depends on STACKTRACE_SUPPORT | ||
260 | |||
160 | config DEBUG_KOBJECT | 261 | config DEBUG_KOBJECT |
161 | bool "kobject debugging" | 262 | bool "kobject debugging" |
162 | depends on DEBUG_KERNEL | 263 | depends on DEBUG_KERNEL |
@@ -212,7 +313,7 @@ config DEBUG_VM | |||
212 | 313 | ||
213 | config FRAME_POINTER | 314 | config FRAME_POINTER |
214 | bool "Compile the kernel with frame pointers" | 315 | bool "Compile the kernel with frame pointers" |
215 | depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) | 316 | depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390) |
216 | default y if DEBUG_INFO && UML | 317 | default y if DEBUG_INFO && UML |
217 | help | 318 | help |
218 | If you say Y here the resulting kernel image will be slightly larger | 319 | If you say Y here the resulting kernel image will be slightly larger |
diff --git a/lib/Makefile b/lib/Makefile index 10c13c9d7824..be9719ae82d0 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -11,13 +11,14 @@ lib-$(CONFIG_SMP) += cpumask.o | |||
11 | 11 | ||
12 | lib-y += kobject.o kref.o kobject_uevent.o klist.o | 12 | lib-y += kobject.o kref.o kobject_uevent.o klist.o |
13 | 13 | ||
14 | obj-y += sort.o parser.o halfmd4.o iomap_copy.o | 14 | obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o |
15 | 15 | ||
16 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 16 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
17 | CFLAGS_kobject.o += -DDEBUG | 17 | CFLAGS_kobject.o += -DDEBUG |
18 | CFLAGS_kobject_uevent.o += -DDEBUG | 18 | CFLAGS_kobject_uevent.o += -DDEBUG |
19 | endif | 19 | endif |
20 | 20 | ||
21 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | ||
21 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 22 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
22 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 23 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
23 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 24 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c new file mode 100644 index 000000000000..0ef01d14727c --- /dev/null +++ b/lib/debug_locks.c | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * lib/debug_locks.c | ||
3 | * | ||
4 | * Generic place for common debugging facilities for various locks: | ||
5 | * spinlocks, rwlocks, mutexes and rwsems. | ||
6 | * | ||
7 | * Started by Ingo Molnar: | ||
8 | * | ||
9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
10 | */ | ||
11 | #include <linux/rwsem.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/debug_locks.h> | ||
16 | |||
17 | /* | ||
18 | * We want to turn all lock-debugging facilities on/off at once, | ||
19 | * via a global flag. The reason is that once a single bug has been | ||
20 | * detected and reported, there might be cascade of followup bugs | ||
21 | * that would just muddy the log. So we report the first one and | ||
22 | * shut up after that. | ||
23 | */ | ||
24 | int debug_locks = 1; | ||
25 | |||
26 | /* | ||
27 | * The locking-testsuite uses <debug_locks_silent> to get a | ||
28 | * 'silent failure': nothing is printed to the console when | ||
29 | * a locking bug is detected. | ||
30 | */ | ||
31 | int debug_locks_silent; | ||
32 | |||
33 | /* | ||
34 | * Generic 'turn off all lock debugging' function: | ||
35 | */ | ||
36 | int debug_locks_off(void) | ||
37 | { | ||
38 | if (xchg(&debug_locks, 0)) { | ||
39 | if (!debug_locks_silent) { | ||
40 | console_verbose(); | ||
41 | return 1; | ||
42 | } | ||
43 | } | ||
44 | return 0; | ||
45 | } | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index e713e86811ae..e0fdfddb406e 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -177,7 +177,12 @@ static inline void __lock_kernel(void) | |||
177 | 177 | ||
178 | static inline void __unlock_kernel(void) | 178 | static inline void __unlock_kernel(void) |
179 | { | 179 | { |
180 | spin_unlock(&kernel_flag); | 180 | /* |
181 | * the BKL is not covered by lockdep, so we open-code the | ||
182 | * unlocking sequence (and thus avoid the dep-chain ops): | ||
183 | */ | ||
184 | _raw_spin_unlock(&kernel_flag); | ||
185 | preempt_enable(); | ||
181 | } | 186 | } |
182 | 187 | ||
183 | /* | 188 | /* |
diff --git a/lib/locking-selftest-hardirq.h b/lib/locking-selftest-hardirq.h new file mode 100644 index 000000000000..10d4a150b259 --- /dev/null +++ b/lib/locking-selftest-hardirq.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #undef IRQ_DISABLE | ||
2 | #undef IRQ_ENABLE | ||
3 | #undef IRQ_ENTER | ||
4 | #undef IRQ_EXIT | ||
5 | |||
6 | #define IRQ_ENABLE HARDIRQ_ENABLE | ||
7 | #define IRQ_DISABLE HARDIRQ_DISABLE | ||
8 | #define IRQ_ENTER HARDIRQ_ENTER | ||
9 | #define IRQ_EXIT HARDIRQ_EXIT | ||
diff --git a/lib/locking-selftest-mutex.h b/lib/locking-selftest-mutex.h new file mode 100644 index 000000000000..68601b6f584b --- /dev/null +++ b/lib/locking-selftest-mutex.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #undef LOCK | ||
2 | #define LOCK ML | ||
3 | |||
4 | #undef UNLOCK | ||
5 | #define UNLOCK MU | ||
6 | |||
7 | #undef RLOCK | ||
8 | #undef WLOCK | ||
9 | |||
10 | #undef INIT | ||
11 | #define INIT MI | ||
diff --git a/lib/locking-selftest-rlock-hardirq.h b/lib/locking-selftest-rlock-hardirq.h new file mode 100644 index 000000000000..9f517ebcb786 --- /dev/null +++ b/lib/locking-selftest-rlock-hardirq.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include "locking-selftest-rlock.h" | ||
2 | #include "locking-selftest-hardirq.h" | ||
diff --git a/lib/locking-selftest-rlock-softirq.h b/lib/locking-selftest-rlock-softirq.h new file mode 100644 index 000000000000..981455db7ff0 --- /dev/null +++ b/lib/locking-selftest-rlock-softirq.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include "locking-selftest-rlock.h" | ||
2 | #include "locking-selftest-softirq.h" | ||
diff --git a/lib/locking-selftest-rlock.h b/lib/locking-selftest-rlock.h new file mode 100644 index 000000000000..6789044f4d0e --- /dev/null +++ b/lib/locking-selftest-rlock.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #undef LOCK | ||
2 | #define LOCK RL | ||
3 | |||
4 | #undef UNLOCK | ||
5 | #define UNLOCK RU | ||
6 | |||
7 | #undef RLOCK | ||
8 | #define RLOCK RL | ||
9 | |||
10 | #undef WLOCK | ||
11 | #define WLOCK WL | ||
12 | |||
13 | #undef INIT | ||
14 | #define INIT RWI | ||
diff --git a/lib/locking-selftest-rsem.h b/lib/locking-selftest-rsem.h new file mode 100644 index 000000000000..62da886680c7 --- /dev/null +++ b/lib/locking-selftest-rsem.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #undef LOCK | ||
2 | #define LOCK RSL | ||
3 | |||
4 | #undef UNLOCK | ||
5 | #define UNLOCK RSU | ||
6 | |||
7 | #undef RLOCK | ||
8 | #define RLOCK RSL | ||
9 | |||
10 | #undef WLOCK | ||
11 | #define WLOCK WSL | ||
12 | |||
13 | #undef INIT | ||
14 | #define INIT RWSI | ||
diff --git a/lib/locking-selftest-softirq.h b/lib/locking-selftest-softirq.h new file mode 100644 index 000000000000..a83de2a04ace --- /dev/null +++ b/lib/locking-selftest-softirq.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #undef IRQ_DISABLE | ||
2 | #undef IRQ_ENABLE | ||
3 | #undef IRQ_ENTER | ||
4 | #undef IRQ_EXIT | ||
5 | |||
6 | #define IRQ_DISABLE SOFTIRQ_DISABLE | ||
7 | #define IRQ_ENABLE SOFTIRQ_ENABLE | ||
8 | #define IRQ_ENTER SOFTIRQ_ENTER | ||
9 | #define IRQ_EXIT SOFTIRQ_EXIT | ||
diff --git a/lib/locking-selftest-spin-hardirq.h b/lib/locking-selftest-spin-hardirq.h new file mode 100644 index 000000000000..693198dce30a --- /dev/null +++ b/lib/locking-selftest-spin-hardirq.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include "locking-selftest-spin.h" | ||
2 | #include "locking-selftest-hardirq.h" | ||
diff --git a/lib/locking-selftest-spin-softirq.h b/lib/locking-selftest-spin-softirq.h new file mode 100644 index 000000000000..c472e2a87ffc --- /dev/null +++ b/lib/locking-selftest-spin-softirq.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include "locking-selftest-spin.h" | ||
2 | #include "locking-selftest-softirq.h" | ||
diff --git a/lib/locking-selftest-spin.h b/lib/locking-selftest-spin.h new file mode 100644 index 000000000000..ccd1b4b09757 --- /dev/null +++ b/lib/locking-selftest-spin.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #undef LOCK | ||
2 | #define LOCK L | ||
3 | |||
4 | #undef UNLOCK | ||
5 | #define UNLOCK U | ||
6 | |||
7 | #undef RLOCK | ||
8 | #undef WLOCK | ||
9 | |||
10 | #undef INIT | ||
11 | #define INIT SI | ||
diff --git a/lib/locking-selftest-wlock-hardirq.h b/lib/locking-selftest-wlock-hardirq.h new file mode 100644 index 000000000000..2dd2e5122caa --- /dev/null +++ b/lib/locking-selftest-wlock-hardirq.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include "locking-selftest-wlock.h" | ||
2 | #include "locking-selftest-hardirq.h" | ||
diff --git a/lib/locking-selftest-wlock-softirq.h b/lib/locking-selftest-wlock-softirq.h new file mode 100644 index 000000000000..cb80d1cb944e --- /dev/null +++ b/lib/locking-selftest-wlock-softirq.h | |||
@@ -0,0 +1,2 @@ | |||
1 | #include "locking-selftest-wlock.h" | ||
2 | #include "locking-selftest-softirq.h" | ||
diff --git a/lib/locking-selftest-wlock.h b/lib/locking-selftest-wlock.h new file mode 100644 index 000000000000..0815322d99ed --- /dev/null +++ b/lib/locking-selftest-wlock.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #undef LOCK | ||
2 | #define LOCK WL | ||
3 | |||
4 | #undef UNLOCK | ||
5 | #define UNLOCK WU | ||
6 | |||
7 | #undef RLOCK | ||
8 | #define RLOCK RL | ||
9 | |||
10 | #undef WLOCK | ||
11 | #define WLOCK WL | ||
12 | |||
13 | #undef INIT | ||
14 | #define INIT RWI | ||
diff --git a/lib/locking-selftest-wsem.h b/lib/locking-selftest-wsem.h new file mode 100644 index 000000000000..b88c5f2dc5f0 --- /dev/null +++ b/lib/locking-selftest-wsem.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #undef LOCK | ||
2 | #define LOCK WSL | ||
3 | |||
4 | #undef UNLOCK | ||
5 | #define UNLOCK WSU | ||
6 | |||
7 | #undef RLOCK | ||
8 | #define RLOCK RSL | ||
9 | |||
10 | #undef WLOCK | ||
11 | #define WLOCK WSL | ||
12 | |||
13 | #undef INIT | ||
14 | #define INIT RWSI | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c new file mode 100644 index 000000000000..7945787f439a --- /dev/null +++ b/lib/locking-selftest.c | |||
@@ -0,0 +1,1216 @@ | |||
1 | /* | ||
2 | * lib/locking-selftest.c | ||
3 | * | ||
4 | * Testsuite for various locking APIs: spinlocks, rwlocks, | ||
5 | * mutexes and rw-semaphores. | ||
6 | * | ||
7 | * It is checking both false positives and false negatives. | ||
8 | * | ||
9 | * Started by Ingo Molnar: | ||
10 | * | ||
11 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
12 | */ | ||
13 | #include <linux/rwsem.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/lockdep.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/kallsyms.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/debug_locks.h> | ||
23 | #include <linux/irqflags.h> | ||
24 | |||
25 | /* | ||
26 | * Change this to 1 if you want to see the failure printouts: | ||
27 | */ | ||
28 | static unsigned int debug_locks_verbose; | ||
29 | |||
30 | static int __init setup_debug_locks_verbose(char *str) | ||
31 | { | ||
32 | get_option(&str, &debug_locks_verbose); | ||
33 | |||
34 | return 1; | ||
35 | } | ||
36 | |||
37 | __setup("debug_locks_verbose=", setup_debug_locks_verbose); | ||
38 | |||
39 | #define FAILURE 0 | ||
40 | #define SUCCESS 1 | ||
41 | |||
42 | #define LOCKTYPE_SPIN 0x1 | ||
43 | #define LOCKTYPE_RWLOCK 0x2 | ||
44 | #define LOCKTYPE_MUTEX 0x4 | ||
45 | #define LOCKTYPE_RWSEM 0x8 | ||
46 | |||
47 | /* | ||
48 | * Normal standalone locks, for the circular and irq-context | ||
49 | * dependency tests: | ||
50 | */ | ||
51 | static DEFINE_SPINLOCK(lock_A); | ||
52 | static DEFINE_SPINLOCK(lock_B); | ||
53 | static DEFINE_SPINLOCK(lock_C); | ||
54 | static DEFINE_SPINLOCK(lock_D); | ||
55 | |||
56 | static DEFINE_RWLOCK(rwlock_A); | ||
57 | static DEFINE_RWLOCK(rwlock_B); | ||
58 | static DEFINE_RWLOCK(rwlock_C); | ||
59 | static DEFINE_RWLOCK(rwlock_D); | ||
60 | |||
61 | static DEFINE_MUTEX(mutex_A); | ||
62 | static DEFINE_MUTEX(mutex_B); | ||
63 | static DEFINE_MUTEX(mutex_C); | ||
64 | static DEFINE_MUTEX(mutex_D); | ||
65 | |||
66 | static DECLARE_RWSEM(rwsem_A); | ||
67 | static DECLARE_RWSEM(rwsem_B); | ||
68 | static DECLARE_RWSEM(rwsem_C); | ||
69 | static DECLARE_RWSEM(rwsem_D); | ||
70 | |||
71 | /* | ||
72 | * Locks that we initialize dynamically as well so that | ||
73 | * e.g. X1 and X2 becomes two instances of the same class, | ||
74 | * but X* and Y* are different classes. We do this so that | ||
75 | * we do not trigger a real lockup: | ||
76 | */ | ||
77 | static DEFINE_SPINLOCK(lock_X1); | ||
78 | static DEFINE_SPINLOCK(lock_X2); | ||
79 | static DEFINE_SPINLOCK(lock_Y1); | ||
80 | static DEFINE_SPINLOCK(lock_Y2); | ||
81 | static DEFINE_SPINLOCK(lock_Z1); | ||
82 | static DEFINE_SPINLOCK(lock_Z2); | ||
83 | |||
84 | static DEFINE_RWLOCK(rwlock_X1); | ||
85 | static DEFINE_RWLOCK(rwlock_X2); | ||
86 | static DEFINE_RWLOCK(rwlock_Y1); | ||
87 | static DEFINE_RWLOCK(rwlock_Y2); | ||
88 | static DEFINE_RWLOCK(rwlock_Z1); | ||
89 | static DEFINE_RWLOCK(rwlock_Z2); | ||
90 | |||
91 | static DEFINE_MUTEX(mutex_X1); | ||
92 | static DEFINE_MUTEX(mutex_X2); | ||
93 | static DEFINE_MUTEX(mutex_Y1); | ||
94 | static DEFINE_MUTEX(mutex_Y2); | ||
95 | static DEFINE_MUTEX(mutex_Z1); | ||
96 | static DEFINE_MUTEX(mutex_Z2); | ||
97 | |||
98 | static DECLARE_RWSEM(rwsem_X1); | ||
99 | static DECLARE_RWSEM(rwsem_X2); | ||
100 | static DECLARE_RWSEM(rwsem_Y1); | ||
101 | static DECLARE_RWSEM(rwsem_Y2); | ||
102 | static DECLARE_RWSEM(rwsem_Z1); | ||
103 | static DECLARE_RWSEM(rwsem_Z2); | ||
104 | |||
105 | /* | ||
106 | * non-inlined runtime initializers, to let separate locks share | ||
107 | * the same lock-class: | ||
108 | */ | ||
109 | #define INIT_CLASS_FUNC(class) \ | ||
110 | static noinline void \ | ||
111 | init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ | ||
112 | struct rw_semaphore *rwsem) \ | ||
113 | { \ | ||
114 | spin_lock_init(lock); \ | ||
115 | rwlock_init(rwlock); \ | ||
116 | mutex_init(mutex); \ | ||
117 | init_rwsem(rwsem); \ | ||
118 | } | ||
119 | |||
120 | INIT_CLASS_FUNC(X) | ||
121 | INIT_CLASS_FUNC(Y) | ||
122 | INIT_CLASS_FUNC(Z) | ||
123 | |||
124 | static void init_shared_classes(void) | ||
125 | { | ||
126 | init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); | ||
127 | init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); | ||
128 | |||
129 | init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1); | ||
130 | init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2); | ||
131 | |||
132 | init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1); | ||
133 | init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2); | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests. | ||
138 | * The following functions use a lock from a simulated hardirq/softirq | ||
139 | * context, causing the locks to be marked as hardirq-safe/softirq-safe: | ||
140 | */ | ||
141 | |||
142 | #define HARDIRQ_DISABLE local_irq_disable | ||
143 | #define HARDIRQ_ENABLE local_irq_enable | ||
144 | |||
145 | #define HARDIRQ_ENTER() \ | ||
146 | local_irq_disable(); \ | ||
147 | irq_enter(); \ | ||
148 | WARN_ON(!in_irq()); | ||
149 | |||
150 | #define HARDIRQ_EXIT() \ | ||
151 | __irq_exit(); \ | ||
152 | local_irq_enable(); | ||
153 | |||
154 | #define SOFTIRQ_DISABLE local_bh_disable | ||
155 | #define SOFTIRQ_ENABLE local_bh_enable | ||
156 | |||
157 | #define SOFTIRQ_ENTER() \ | ||
158 | local_bh_disable(); \ | ||
159 | local_irq_disable(); \ | ||
160 | trace_softirq_enter(); \ | ||
161 | WARN_ON(!in_softirq()); | ||
162 | |||
163 | #define SOFTIRQ_EXIT() \ | ||
164 | trace_softirq_exit(); \ | ||
165 | local_irq_enable(); \ | ||
166 | local_bh_enable(); | ||
167 | |||
168 | /* | ||
169 | * Shortcuts for lock/unlock API variants, to keep | ||
170 | * the testcases compact: | ||
171 | */ | ||
172 | #define L(x) spin_lock(&lock_##x) | ||
173 | #define U(x) spin_unlock(&lock_##x) | ||
174 | #define LU(x) L(x); U(x) | ||
175 | #define SI(x) spin_lock_init(&lock_##x) | ||
176 | |||
177 | #define WL(x) write_lock(&rwlock_##x) | ||
178 | #define WU(x) write_unlock(&rwlock_##x) | ||
179 | #define WLU(x) WL(x); WU(x) | ||
180 | |||
181 | #define RL(x) read_lock(&rwlock_##x) | ||
182 | #define RU(x) read_unlock(&rwlock_##x) | ||
183 | #define RLU(x) RL(x); RU(x) | ||
184 | #define RWI(x) rwlock_init(&rwlock_##x) | ||
185 | |||
186 | #define ML(x) mutex_lock(&mutex_##x) | ||
187 | #define MU(x) mutex_unlock(&mutex_##x) | ||
188 | #define MI(x) mutex_init(&mutex_##x) | ||
189 | |||
190 | #define WSL(x) down_write(&rwsem_##x) | ||
191 | #define WSU(x) up_write(&rwsem_##x) | ||
192 | |||
193 | #define RSL(x) down_read(&rwsem_##x) | ||
194 | #define RSU(x) up_read(&rwsem_##x) | ||
195 | #define RWSI(x) init_rwsem(&rwsem_##x) | ||
196 | |||
197 | #define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x) | ||
198 | |||
199 | /* | ||
200 | * Generate different permutations of the same testcase, using | ||
201 | * the same basic lock-dependency/state events: | ||
202 | */ | ||
203 | |||
204 | #define GENERATE_TESTCASE(name) \ | ||
205 | \ | ||
206 | static void name(void) { E(); } | ||
207 | |||
208 | #define GENERATE_PERMUTATIONS_2_EVENTS(name) \ | ||
209 | \ | ||
210 | static void name##_12(void) { E1(); E2(); } \ | ||
211 | static void name##_21(void) { E2(); E1(); } | ||
212 | |||
213 | #define GENERATE_PERMUTATIONS_3_EVENTS(name) \ | ||
214 | \ | ||
215 | static void name##_123(void) { E1(); E2(); E3(); } \ | ||
216 | static void name##_132(void) { E1(); E3(); E2(); } \ | ||
217 | static void name##_213(void) { E2(); E1(); E3(); } \ | ||
218 | static void name##_231(void) { E2(); E3(); E1(); } \ | ||
219 | static void name##_312(void) { E3(); E1(); E2(); } \ | ||
220 | static void name##_321(void) { E3(); E2(); E1(); } | ||
221 | |||
222 | /* | ||
223 | * AA deadlock: | ||
224 | */ | ||
225 | |||
226 | #define E() \ | ||
227 | \ | ||
228 | LOCK(X1); \ | ||
229 | LOCK(X2); /* this one should fail */ | ||
230 | |||
231 | /* | ||
232 | * 6 testcases: | ||
233 | */ | ||
234 | #include "locking-selftest-spin.h" | ||
235 | GENERATE_TESTCASE(AA_spin) | ||
236 | #include "locking-selftest-wlock.h" | ||
237 | GENERATE_TESTCASE(AA_wlock) | ||
238 | #include "locking-selftest-rlock.h" | ||
239 | GENERATE_TESTCASE(AA_rlock) | ||
240 | #include "locking-selftest-mutex.h" | ||
241 | GENERATE_TESTCASE(AA_mutex) | ||
242 | #include "locking-selftest-wsem.h" | ||
243 | GENERATE_TESTCASE(AA_wsem) | ||
244 | #include "locking-selftest-rsem.h" | ||
245 | GENERATE_TESTCASE(AA_rsem) | ||
246 | |||
247 | #undef E | ||
248 | |||
249 | /* | ||
250 | * Special-case for read-locking, they are | ||
251 | * allowed to recurse on the same lock class: | ||
252 | */ | ||
253 | static void rlock_AA1(void) | ||
254 | { | ||
255 | RL(X1); | ||
256 | RL(X1); // this one should NOT fail | ||
257 | } | ||
258 | |||
259 | static void rlock_AA1B(void) | ||
260 | { | ||
261 | RL(X1); | ||
262 | RL(X2); // this one should NOT fail | ||
263 | } | ||
264 | |||
265 | static void rsem_AA1(void) | ||
266 | { | ||
267 | RSL(X1); | ||
268 | RSL(X1); // this one should fail | ||
269 | } | ||
270 | |||
271 | static void rsem_AA1B(void) | ||
272 | { | ||
273 | RSL(X1); | ||
274 | RSL(X2); // this one should fail | ||
275 | } | ||
276 | /* | ||
277 | * The mixing of read and write locks is not allowed: | ||
278 | */ | ||
279 | static void rlock_AA2(void) | ||
280 | { | ||
281 | RL(X1); | ||
282 | WL(X2); // this one should fail | ||
283 | } | ||
284 | |||
285 | static void rsem_AA2(void) | ||
286 | { | ||
287 | RSL(X1); | ||
288 | WSL(X2); // this one should fail | ||
289 | } | ||
290 | |||
291 | static void rlock_AA3(void) | ||
292 | { | ||
293 | WL(X1); | ||
294 | RL(X2); // this one should fail | ||
295 | } | ||
296 | |||
297 | static void rsem_AA3(void) | ||
298 | { | ||
299 | WSL(X1); | ||
300 | RSL(X2); // this one should fail | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * ABBA deadlock: | ||
305 | */ | ||
306 | |||
307 | #define E() \ | ||
308 | \ | ||
309 | LOCK_UNLOCK_2(A, B); \ | ||
310 | LOCK_UNLOCK_2(B, A); /* fail */ | ||
311 | |||
312 | /* | ||
313 | * 6 testcases: | ||
314 | */ | ||
315 | #include "locking-selftest-spin.h" | ||
316 | GENERATE_TESTCASE(ABBA_spin) | ||
317 | #include "locking-selftest-wlock.h" | ||
318 | GENERATE_TESTCASE(ABBA_wlock) | ||
319 | #include "locking-selftest-rlock.h" | ||
320 | GENERATE_TESTCASE(ABBA_rlock) | ||
321 | #include "locking-selftest-mutex.h" | ||
322 | GENERATE_TESTCASE(ABBA_mutex) | ||
323 | #include "locking-selftest-wsem.h" | ||
324 | GENERATE_TESTCASE(ABBA_wsem) | ||
325 | #include "locking-selftest-rsem.h" | ||
326 | GENERATE_TESTCASE(ABBA_rsem) | ||
327 | |||
328 | #undef E | ||
329 | |||
330 | /* | ||
331 | * AB BC CA deadlock: | ||
332 | */ | ||
333 | |||
334 | #define E() \ | ||
335 | \ | ||
336 | LOCK_UNLOCK_2(A, B); \ | ||
337 | LOCK_UNLOCK_2(B, C); \ | ||
338 | LOCK_UNLOCK_2(C, A); /* fail */ | ||
339 | |||
340 | /* | ||
341 | * 6 testcases: | ||
342 | */ | ||
343 | #include "locking-selftest-spin.h" | ||
344 | GENERATE_TESTCASE(ABBCCA_spin) | ||
345 | #include "locking-selftest-wlock.h" | ||
346 | GENERATE_TESTCASE(ABBCCA_wlock) | ||
347 | #include "locking-selftest-rlock.h" | ||
348 | GENERATE_TESTCASE(ABBCCA_rlock) | ||
349 | #include "locking-selftest-mutex.h" | ||
350 | GENERATE_TESTCASE(ABBCCA_mutex) | ||
351 | #include "locking-selftest-wsem.h" | ||
352 | GENERATE_TESTCASE(ABBCCA_wsem) | ||
353 | #include "locking-selftest-rsem.h" | ||
354 | GENERATE_TESTCASE(ABBCCA_rsem) | ||
355 | |||
356 | #undef E | ||
357 | |||
358 | /* | ||
359 | * AB CA BC deadlock: | ||
360 | */ | ||
361 | |||
362 | #define E() \ | ||
363 | \ | ||
364 | LOCK_UNLOCK_2(A, B); \ | ||
365 | LOCK_UNLOCK_2(C, A); \ | ||
366 | LOCK_UNLOCK_2(B, C); /* fail */ | ||
367 | |||
368 | /* | ||
369 | * 6 testcases: | ||
370 | */ | ||
371 | #include "locking-selftest-spin.h" | ||
372 | GENERATE_TESTCASE(ABCABC_spin) | ||
373 | #include "locking-selftest-wlock.h" | ||
374 | GENERATE_TESTCASE(ABCABC_wlock) | ||
375 | #include "locking-selftest-rlock.h" | ||
376 | GENERATE_TESTCASE(ABCABC_rlock) | ||
377 | #include "locking-selftest-mutex.h" | ||
378 | GENERATE_TESTCASE(ABCABC_mutex) | ||
379 | #include "locking-selftest-wsem.h" | ||
380 | GENERATE_TESTCASE(ABCABC_wsem) | ||
381 | #include "locking-selftest-rsem.h" | ||
382 | GENERATE_TESTCASE(ABCABC_rsem) | ||
383 | |||
384 | #undef E | ||
385 | |||
386 | /* | ||
387 | * AB BC CD DA deadlock: | ||
388 | */ | ||
389 | |||
390 | #define E() \ | ||
391 | \ | ||
392 | LOCK_UNLOCK_2(A, B); \ | ||
393 | LOCK_UNLOCK_2(B, C); \ | ||
394 | LOCK_UNLOCK_2(C, D); \ | ||
395 | LOCK_UNLOCK_2(D, A); /* fail */ | ||
396 | |||
397 | /* | ||
398 | * 6 testcases: | ||
399 | */ | ||
400 | #include "locking-selftest-spin.h" | ||
401 | GENERATE_TESTCASE(ABBCCDDA_spin) | ||
402 | #include "locking-selftest-wlock.h" | ||
403 | GENERATE_TESTCASE(ABBCCDDA_wlock) | ||
404 | #include "locking-selftest-rlock.h" | ||
405 | GENERATE_TESTCASE(ABBCCDDA_rlock) | ||
406 | #include "locking-selftest-mutex.h" | ||
407 | GENERATE_TESTCASE(ABBCCDDA_mutex) | ||
408 | #include "locking-selftest-wsem.h" | ||
409 | GENERATE_TESTCASE(ABBCCDDA_wsem) | ||
410 | #include "locking-selftest-rsem.h" | ||
411 | GENERATE_TESTCASE(ABBCCDDA_rsem) | ||
412 | |||
413 | #undef E | ||
414 | |||
415 | /* | ||
416 | * AB CD BD DA deadlock: | ||
417 | */ | ||
418 | #define E() \ | ||
419 | \ | ||
420 | LOCK_UNLOCK_2(A, B); \ | ||
421 | LOCK_UNLOCK_2(C, D); \ | ||
422 | LOCK_UNLOCK_2(B, D); \ | ||
423 | LOCK_UNLOCK_2(D, A); /* fail */ | ||
424 | |||
425 | /* | ||
426 | * 6 testcases: | ||
427 | */ | ||
428 | #include "locking-selftest-spin.h" | ||
429 | GENERATE_TESTCASE(ABCDBDDA_spin) | ||
430 | #include "locking-selftest-wlock.h" | ||
431 | GENERATE_TESTCASE(ABCDBDDA_wlock) | ||
432 | #include "locking-selftest-rlock.h" | ||
433 | GENERATE_TESTCASE(ABCDBDDA_rlock) | ||
434 | #include "locking-selftest-mutex.h" | ||
435 | GENERATE_TESTCASE(ABCDBDDA_mutex) | ||
436 | #include "locking-selftest-wsem.h" | ||
437 | GENERATE_TESTCASE(ABCDBDDA_wsem) | ||
438 | #include "locking-selftest-rsem.h" | ||
439 | GENERATE_TESTCASE(ABCDBDDA_rsem) | ||
440 | |||
441 | #undef E | ||
442 | |||
443 | /* | ||
444 | * AB CD BC DA deadlock: | ||
445 | */ | ||
446 | #define E() \ | ||
447 | \ | ||
448 | LOCK_UNLOCK_2(A, B); \ | ||
449 | LOCK_UNLOCK_2(C, D); \ | ||
450 | LOCK_UNLOCK_2(B, C); \ | ||
451 | LOCK_UNLOCK_2(D, A); /* fail */ | ||
452 | |||
453 | /* | ||
454 | * 6 testcases: | ||
455 | */ | ||
456 | #include "locking-selftest-spin.h" | ||
457 | GENERATE_TESTCASE(ABCDBCDA_spin) | ||
458 | #include "locking-selftest-wlock.h" | ||
459 | GENERATE_TESTCASE(ABCDBCDA_wlock) | ||
460 | #include "locking-selftest-rlock.h" | ||
461 | GENERATE_TESTCASE(ABCDBCDA_rlock) | ||
462 | #include "locking-selftest-mutex.h" | ||
463 | GENERATE_TESTCASE(ABCDBCDA_mutex) | ||
464 | #include "locking-selftest-wsem.h" | ||
465 | GENERATE_TESTCASE(ABCDBCDA_wsem) | ||
466 | #include "locking-selftest-rsem.h" | ||
467 | GENERATE_TESTCASE(ABCDBCDA_rsem) | ||
468 | |||
469 | #undef E | ||
470 | |||
471 | /* | ||
472 | * Double unlock: | ||
473 | */ | ||
474 | #define E() \ | ||
475 | \ | ||
476 | LOCK(A); \ | ||
477 | UNLOCK(A); \ | ||
478 | UNLOCK(A); /* fail */ | ||
479 | |||
480 | /* | ||
481 | * 6 testcases: | ||
482 | */ | ||
483 | #include "locking-selftest-spin.h" | ||
484 | GENERATE_TESTCASE(double_unlock_spin) | ||
485 | #include "locking-selftest-wlock.h" | ||
486 | GENERATE_TESTCASE(double_unlock_wlock) | ||
487 | #include "locking-selftest-rlock.h" | ||
488 | GENERATE_TESTCASE(double_unlock_rlock) | ||
489 | #include "locking-selftest-mutex.h" | ||
490 | GENERATE_TESTCASE(double_unlock_mutex) | ||
491 | #include "locking-selftest-wsem.h" | ||
492 | GENERATE_TESTCASE(double_unlock_wsem) | ||
493 | #include "locking-selftest-rsem.h" | ||
494 | GENERATE_TESTCASE(double_unlock_rsem) | ||
495 | |||
496 | #undef E | ||
497 | |||
498 | /* | ||
499 | * Bad unlock ordering: | ||
500 | */ | ||
501 | #define E() \ | ||
502 | \ | ||
503 | LOCK(A); \ | ||
504 | LOCK(B); \ | ||
505 | UNLOCK(A); /* fail */ \ | ||
506 | UNLOCK(B); | ||
507 | |||
508 | /* | ||
509 | * 6 testcases: | ||
510 | */ | ||
511 | #include "locking-selftest-spin.h" | ||
512 | GENERATE_TESTCASE(bad_unlock_order_spin) | ||
513 | #include "locking-selftest-wlock.h" | ||
514 | GENERATE_TESTCASE(bad_unlock_order_wlock) | ||
515 | #include "locking-selftest-rlock.h" | ||
516 | GENERATE_TESTCASE(bad_unlock_order_rlock) | ||
517 | #include "locking-selftest-mutex.h" | ||
518 | GENERATE_TESTCASE(bad_unlock_order_mutex) | ||
519 | #include "locking-selftest-wsem.h" | ||
520 | GENERATE_TESTCASE(bad_unlock_order_wsem) | ||
521 | #include "locking-selftest-rsem.h" | ||
522 | GENERATE_TESTCASE(bad_unlock_order_rsem) | ||
523 | |||
524 | #undef E | ||
525 | |||
526 | /* | ||
527 | * initializing a held lock: | ||
528 | */ | ||
529 | #define E() \ | ||
530 | \ | ||
531 | LOCK(A); \ | ||
532 | INIT(A); /* fail */ | ||
533 | |||
534 | /* | ||
535 | * 6 testcases: | ||
536 | */ | ||
537 | #include "locking-selftest-spin.h" | ||
538 | GENERATE_TESTCASE(init_held_spin) | ||
539 | #include "locking-selftest-wlock.h" | ||
540 | GENERATE_TESTCASE(init_held_wlock) | ||
541 | #include "locking-selftest-rlock.h" | ||
542 | GENERATE_TESTCASE(init_held_rlock) | ||
543 | #include "locking-selftest-mutex.h" | ||
544 | GENERATE_TESTCASE(init_held_mutex) | ||
545 | #include "locking-selftest-wsem.h" | ||
546 | GENERATE_TESTCASE(init_held_wsem) | ||
547 | #include "locking-selftest-rsem.h" | ||
548 | GENERATE_TESTCASE(init_held_rsem) | ||
549 | |||
550 | #undef E | ||
551 | |||
552 | /* | ||
553 | * locking an irq-safe lock with irqs enabled: | ||
554 | */ | ||
555 | #define E1() \ | ||
556 | \ | ||
557 | IRQ_ENTER(); \ | ||
558 | LOCK(A); \ | ||
559 | UNLOCK(A); \ | ||
560 | IRQ_EXIT(); | ||
561 | |||
562 | #define E2() \ | ||
563 | \ | ||
564 | LOCK(A); \ | ||
565 | UNLOCK(A); | ||
566 | |||
567 | /* | ||
568 | * Generate 24 testcases: | ||
569 | */ | ||
570 | #include "locking-selftest-spin-hardirq.h" | ||
571 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) | ||
572 | |||
573 | #include "locking-selftest-rlock-hardirq.h" | ||
574 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) | ||
575 | |||
576 | #include "locking-selftest-wlock-hardirq.h" | ||
577 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) | ||
578 | |||
579 | #include "locking-selftest-spin-softirq.h" | ||
580 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) | ||
581 | |||
582 | #include "locking-selftest-rlock-softirq.h" | ||
583 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) | ||
584 | |||
585 | #include "locking-selftest-wlock-softirq.h" | ||
586 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) | ||
587 | |||
588 | #undef E1 | ||
589 | #undef E2 | ||
590 | |||
591 | /* | ||
592 | * Enabling hardirqs with a softirq-safe lock held: | ||
593 | */ | ||
594 | #define E1() \ | ||
595 | \ | ||
596 | SOFTIRQ_ENTER(); \ | ||
597 | LOCK(A); \ | ||
598 | UNLOCK(A); \ | ||
599 | SOFTIRQ_EXIT(); | ||
600 | |||
601 | #define E2() \ | ||
602 | \ | ||
603 | HARDIRQ_DISABLE(); \ | ||
604 | LOCK(A); \ | ||
605 | HARDIRQ_ENABLE(); \ | ||
606 | UNLOCK(A); | ||
607 | |||
608 | /* | ||
609 | * Generate 12 testcases: | ||
610 | */ | ||
611 | #include "locking-selftest-spin.h" | ||
612 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin) | ||
613 | |||
614 | #include "locking-selftest-wlock.h" | ||
615 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock) | ||
616 | |||
617 | #include "locking-selftest-rlock.h" | ||
618 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) | ||
619 | |||
620 | #undef E1 | ||
621 | #undef E2 | ||
622 | |||
623 | /* | ||
624 | * Enabling irqs with an irq-safe lock held: | ||
625 | */ | ||
626 | #define E1() \ | ||
627 | \ | ||
628 | IRQ_ENTER(); \ | ||
629 | LOCK(A); \ | ||
630 | UNLOCK(A); \ | ||
631 | IRQ_EXIT(); | ||
632 | |||
633 | #define E2() \ | ||
634 | \ | ||
635 | IRQ_DISABLE(); \ | ||
636 | LOCK(A); \ | ||
637 | IRQ_ENABLE(); \ | ||
638 | UNLOCK(A); | ||
639 | |||
640 | /* | ||
641 | * Generate 24 testcases: | ||
642 | */ | ||
643 | #include "locking-selftest-spin-hardirq.h" | ||
644 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) | ||
645 | |||
646 | #include "locking-selftest-rlock-hardirq.h" | ||
647 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) | ||
648 | |||
649 | #include "locking-selftest-wlock-hardirq.h" | ||
650 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) | ||
651 | |||
652 | #include "locking-selftest-spin-softirq.h" | ||
653 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) | ||
654 | |||
655 | #include "locking-selftest-rlock-softirq.h" | ||
656 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) | ||
657 | |||
658 | #include "locking-selftest-wlock-softirq.h" | ||
659 | GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) | ||
660 | |||
661 | #undef E1 | ||
662 | #undef E2 | ||
663 | |||
664 | /* | ||
665 | * Acquiring a irq-unsafe lock while holding an irq-safe-lock: | ||
666 | */ | ||
667 | #define E1() \ | ||
668 | \ | ||
669 | LOCK(A); \ | ||
670 | LOCK(B); \ | ||
671 | UNLOCK(B); \ | ||
672 | UNLOCK(A); \ | ||
673 | |||
674 | #define E2() \ | ||
675 | \ | ||
676 | LOCK(B); \ | ||
677 | UNLOCK(B); | ||
678 | |||
679 | #define E3() \ | ||
680 | \ | ||
681 | IRQ_ENTER(); \ | ||
682 | LOCK(A); \ | ||
683 | UNLOCK(A); \ | ||
684 | IRQ_EXIT(); | ||
685 | |||
686 | /* | ||
687 | * Generate 36 testcases: | ||
688 | */ | ||
689 | #include "locking-selftest-spin-hardirq.h" | ||
690 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) | ||
691 | |||
692 | #include "locking-selftest-rlock-hardirq.h" | ||
693 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) | ||
694 | |||
695 | #include "locking-selftest-wlock-hardirq.h" | ||
696 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) | ||
697 | |||
698 | #include "locking-selftest-spin-softirq.h" | ||
699 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) | ||
700 | |||
701 | #include "locking-selftest-rlock-softirq.h" | ||
702 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) | ||
703 | |||
704 | #include "locking-selftest-wlock-softirq.h" | ||
705 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) | ||
706 | |||
707 | #undef E1 | ||
708 | #undef E2 | ||
709 | #undef E3 | ||
710 | |||
711 | /* | ||
712 | * If a lock turns into softirq-safe, but earlier it took | ||
713 | * a softirq-unsafe lock: | ||
714 | */ | ||
715 | |||
716 | #define E1() \ | ||
717 | IRQ_DISABLE(); \ | ||
718 | LOCK(A); \ | ||
719 | LOCK(B); \ | ||
720 | UNLOCK(B); \ | ||
721 | UNLOCK(A); \ | ||
722 | IRQ_ENABLE(); | ||
723 | |||
724 | #define E2() \ | ||
725 | LOCK(B); \ | ||
726 | UNLOCK(B); | ||
727 | |||
728 | #define E3() \ | ||
729 | IRQ_ENTER(); \ | ||
730 | LOCK(A); \ | ||
731 | UNLOCK(A); \ | ||
732 | IRQ_EXIT(); | ||
733 | |||
734 | /* | ||
735 | * Generate 36 testcases: | ||
736 | */ | ||
737 | #include "locking-selftest-spin-hardirq.h" | ||
738 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) | ||
739 | |||
740 | #include "locking-selftest-rlock-hardirq.h" | ||
741 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) | ||
742 | |||
743 | #include "locking-selftest-wlock-hardirq.h" | ||
744 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) | ||
745 | |||
746 | #include "locking-selftest-spin-softirq.h" | ||
747 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) | ||
748 | |||
749 | #include "locking-selftest-rlock-softirq.h" | ||
750 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) | ||
751 | |||
752 | #include "locking-selftest-wlock-softirq.h" | ||
753 | GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) | ||
754 | |||
755 | #undef E1 | ||
756 | #undef E2 | ||
757 | #undef E3 | ||
758 | |||
759 | /* | ||
760 | * read-lock / write-lock irq inversion. | ||
761 | * | ||
762 | * Deadlock scenario: | ||
763 | * | ||
764 | * CPU#1 is at #1, i.e. it has write-locked A, but has not | ||
765 | * taken B yet. | ||
766 | * | ||
767 | * CPU#2 is at #2, i.e. it has locked B. | ||
768 | * | ||
769 | * Hardirq hits CPU#2 at point #2 and is trying to read-lock A. | ||
770 | * | ||
771 | * The deadlock occurs because CPU#1 will spin on B, and CPU#2 | ||
772 | * will spin on A. | ||
773 | */ | ||
774 | |||
775 | #define E1() \ | ||
776 | \ | ||
777 | IRQ_DISABLE(); \ | ||
778 | WL(A); \ | ||
779 | LOCK(B); \ | ||
780 | UNLOCK(B); \ | ||
781 | WU(A); \ | ||
782 | IRQ_ENABLE(); | ||
783 | |||
784 | #define E2() \ | ||
785 | \ | ||
786 | LOCK(B); \ | ||
787 | UNLOCK(B); | ||
788 | |||
789 | #define E3() \ | ||
790 | \ | ||
791 | IRQ_ENTER(); \ | ||
792 | RL(A); \ | ||
793 | RU(A); \ | ||
794 | IRQ_EXIT(); | ||
795 | |||
796 | /* | ||
797 | * Generate 36 testcases: | ||
798 | */ | ||
799 | #include "locking-selftest-spin-hardirq.h" | ||
800 | GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin) | ||
801 | |||
802 | #include "locking-selftest-rlock-hardirq.h" | ||
803 | GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) | ||
804 | |||
805 | #include "locking-selftest-wlock-hardirq.h" | ||
806 | GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) | ||
807 | |||
808 | #include "locking-selftest-spin-softirq.h" | ||
809 | GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) | ||
810 | |||
811 | #include "locking-selftest-rlock-softirq.h" | ||
812 | GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) | ||
813 | |||
814 | #include "locking-selftest-wlock-softirq.h" | ||
815 | GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) | ||
816 | |||
817 | #undef E1 | ||
818 | #undef E2 | ||
819 | #undef E3 | ||
820 | |||
821 | /* | ||
822 | * read-lock / write-lock recursion that is actually safe. | ||
823 | */ | ||
824 | |||
825 | #define E1() \ | ||
826 | \ | ||
827 | IRQ_DISABLE(); \ | ||
828 | WL(A); \ | ||
829 | WU(A); \ | ||
830 | IRQ_ENABLE(); | ||
831 | |||
832 | #define E2() \ | ||
833 | \ | ||
834 | RL(A); \ | ||
835 | RU(A); \ | ||
836 | |||
837 | #define E3() \ | ||
838 | \ | ||
839 | IRQ_ENTER(); \ | ||
840 | RL(A); \ | ||
841 | L(B); \ | ||
842 | U(B); \ | ||
843 | RU(A); \ | ||
844 | IRQ_EXIT(); | ||
845 | |||
846 | /* | ||
847 | * Generate 12 testcases: | ||
848 | */ | ||
849 | #include "locking-selftest-hardirq.h" | ||
850 | GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard) | ||
851 | |||
852 | #include "locking-selftest-softirq.h" | ||
853 | GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) | ||
854 | |||
855 | #undef E1 | ||
856 | #undef E2 | ||
857 | #undef E3 | ||
858 | |||
859 | /* | ||
860 | * read-lock / write-lock recursion that is unsafe. | ||
861 | */ | ||
862 | |||
863 | #define E1() \ | ||
864 | \ | ||
865 | IRQ_DISABLE(); \ | ||
866 | L(B); \ | ||
867 | WL(A); \ | ||
868 | WU(A); \ | ||
869 | U(B); \ | ||
870 | IRQ_ENABLE(); | ||
871 | |||
872 | #define E2() \ | ||
873 | \ | ||
874 | RL(A); \ | ||
875 | RU(A); \ | ||
876 | |||
877 | #define E3() \ | ||
878 | \ | ||
879 | IRQ_ENTER(); \ | ||
880 | L(B); \ | ||
881 | U(B); \ | ||
882 | IRQ_EXIT(); | ||
883 | |||
884 | /* | ||
885 | * Generate 12 testcases: | ||
886 | */ | ||
887 | #include "locking-selftest-hardirq.h" | ||
888 | // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard) | ||
889 | |||
890 | #include "locking-selftest-softirq.h" | ||
891 | // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft) | ||
892 | |||
893 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
894 | # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) | ||
895 | # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) | ||
896 | # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) | ||
897 | # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) | ||
898 | #else | ||
899 | # define I_SPINLOCK(x) | ||
900 | # define I_RWLOCK(x) | ||
901 | # define I_MUTEX(x) | ||
902 | # define I_RWSEM(x) | ||
903 | #endif | ||
904 | |||
905 | #define I1(x) \ | ||
906 | do { \ | ||
907 | I_SPINLOCK(x); \ | ||
908 | I_RWLOCK(x); \ | ||
909 | I_MUTEX(x); \ | ||
910 | I_RWSEM(x); \ | ||
911 | } while (0) | ||
912 | |||
913 | #define I2(x) \ | ||
914 | do { \ | ||
915 | spin_lock_init(&lock_##x); \ | ||
916 | rwlock_init(&rwlock_##x); \ | ||
917 | mutex_init(&mutex_##x); \ | ||
918 | init_rwsem(&rwsem_##x); \ | ||
919 | } while (0) | ||
920 | |||
921 | static void reset_locks(void) | ||
922 | { | ||
923 | local_irq_disable(); | ||
924 | I1(A); I1(B); I1(C); I1(D); | ||
925 | I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); | ||
926 | lockdep_reset(); | ||
927 | I2(A); I2(B); I2(C); I2(D); | ||
928 | init_shared_classes(); | ||
929 | local_irq_enable(); | ||
930 | } | ||
931 | |||
932 | #undef I | ||
933 | |||
934 | static int testcase_total; | ||
935 | static int testcase_successes; | ||
936 | static int expected_testcase_failures; | ||
937 | static int unexpected_testcase_failures; | ||
938 | |||
939 | static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) | ||
940 | { | ||
941 | unsigned long saved_preempt_count = preempt_count(); | ||
942 | int expected_failure = 0; | ||
943 | |||
944 | WARN_ON(irqs_disabled()); | ||
945 | |||
946 | testcase_fn(); | ||
947 | /* | ||
948 | * Filter out expected failures: | ||
949 | */ | ||
950 | #ifndef CONFIG_PROVE_LOCKING | ||
951 | if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) | ||
952 | expected_failure = 1; | ||
953 | if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) | ||
954 | expected_failure = 1; | ||
955 | if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) | ||
956 | expected_failure = 1; | ||
957 | if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected) | ||
958 | expected_failure = 1; | ||
959 | #endif | ||
960 | if (debug_locks != expected) { | ||
961 | if (expected_failure) { | ||
962 | expected_testcase_failures++; | ||
963 | printk("failed|"); | ||
964 | } else { | ||
965 | unexpected_testcase_failures++; | ||
966 | printk("FAILED|"); | ||
967 | } | ||
968 | } else { | ||
969 | testcase_successes++; | ||
970 | printk(" ok |"); | ||
971 | } | ||
972 | testcase_total++; | ||
973 | |||
974 | if (debug_locks_verbose) | ||
975 | printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", | ||
976 | lockclass_mask, debug_locks, expected); | ||
977 | /* | ||
978 | * Some tests (e.g. double-unlock) might corrupt the preemption | ||
979 | * count, so restore it: | ||
980 | */ | ||
981 | preempt_count() = saved_preempt_count; | ||
982 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
983 | if (softirq_count()) | ||
984 | current->softirqs_enabled = 0; | ||
985 | else | ||
986 | current->softirqs_enabled = 1; | ||
987 | #endif | ||
988 | |||
989 | reset_locks(); | ||
990 | } | ||
991 | |||
992 | static inline void print_testname(const char *testname) | ||
993 | { | ||
994 | printk("%33s:", testname); | ||
995 | } | ||
996 | |||
997 | #define DO_TESTCASE_1(desc, name, nr) \ | ||
998 | print_testname(desc"/"#nr); \ | ||
999 | dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ | ||
1000 | printk("\n"); | ||
1001 | |||
1002 | #define DO_TESTCASE_1B(desc, name, nr) \ | ||
1003 | print_testname(desc"/"#nr); \ | ||
1004 | dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ | ||
1005 | printk("\n"); | ||
1006 | |||
1007 | #define DO_TESTCASE_3(desc, name, nr) \ | ||
1008 | print_testname(desc"/"#nr); \ | ||
1009 | dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ | ||
1010 | dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ | ||
1011 | dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ | ||
1012 | printk("\n"); | ||
1013 | |||
1014 | #define DO_TESTCASE_3RW(desc, name, nr) \ | ||
1015 | print_testname(desc"/"#nr); \ | ||
1016 | dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ | ||
1017 | dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ | ||
1018 | dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ | ||
1019 | printk("\n"); | ||
1020 | |||
1021 | #define DO_TESTCASE_6(desc, name) \ | ||
1022 | print_testname(desc); \ | ||
1023 | dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \ | ||
1024 | dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \ | ||
1025 | dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \ | ||
1026 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ | ||
1027 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ | ||
1028 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ | ||
1029 | printk("\n"); | ||
1030 | |||
1031 | #define DO_TESTCASE_6_SUCCESS(desc, name) \ | ||
1032 | print_testname(desc); \ | ||
1033 | dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \ | ||
1034 | dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \ | ||
1035 | dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ | ||
1036 | dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ | ||
1037 | dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ | ||
1038 | dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ | ||
1039 | printk("\n"); | ||
1040 | |||
1041 | /* | ||
1042 | * 'read' variant: rlocks must not trigger. | ||
1043 | */ | ||
1044 | #define DO_TESTCASE_6R(desc, name) \ | ||
1045 | print_testname(desc); \ | ||
1046 | dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \ | ||
1047 | dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \ | ||
1048 | dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ | ||
1049 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ | ||
1050 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ | ||
1051 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ | ||
1052 | printk("\n"); | ||
1053 | |||
1054 | #define DO_TESTCASE_2I(desc, name, nr) \ | ||
1055 | DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ | ||
1056 | DO_TESTCASE_1("soft-"desc, name##_soft, nr); | ||
1057 | |||
1058 | #define DO_TESTCASE_2IB(desc, name, nr) \ | ||
1059 | DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ | ||
1060 | DO_TESTCASE_1B("soft-"desc, name##_soft, nr); | ||
1061 | |||
1062 | #define DO_TESTCASE_6I(desc, name, nr) \ | ||
1063 | DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ | ||
1064 | DO_TESTCASE_3("soft-"desc, name##_soft, nr); | ||
1065 | |||
1066 | #define DO_TESTCASE_6IRW(desc, name, nr) \ | ||
1067 | DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ | ||
1068 | DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); | ||
1069 | |||
1070 | #define DO_TESTCASE_2x3(desc, name) \ | ||
1071 | DO_TESTCASE_3(desc, name, 12); \ | ||
1072 | DO_TESTCASE_3(desc, name, 21); | ||
1073 | |||
1074 | #define DO_TESTCASE_2x6(desc, name) \ | ||
1075 | DO_TESTCASE_6I(desc, name, 12); \ | ||
1076 | DO_TESTCASE_6I(desc, name, 21); | ||
1077 | |||
1078 | #define DO_TESTCASE_6x2(desc, name) \ | ||
1079 | DO_TESTCASE_2I(desc, name, 123); \ | ||
1080 | DO_TESTCASE_2I(desc, name, 132); \ | ||
1081 | DO_TESTCASE_2I(desc, name, 213); \ | ||
1082 | DO_TESTCASE_2I(desc, name, 231); \ | ||
1083 | DO_TESTCASE_2I(desc, name, 312); \ | ||
1084 | DO_TESTCASE_2I(desc, name, 321); | ||
1085 | |||
1086 | #define DO_TESTCASE_6x2B(desc, name) \ | ||
1087 | DO_TESTCASE_2IB(desc, name, 123); \ | ||
1088 | DO_TESTCASE_2IB(desc, name, 132); \ | ||
1089 | DO_TESTCASE_2IB(desc, name, 213); \ | ||
1090 | DO_TESTCASE_2IB(desc, name, 231); \ | ||
1091 | DO_TESTCASE_2IB(desc, name, 312); \ | ||
1092 | DO_TESTCASE_2IB(desc, name, 321); | ||
1093 | |||
1094 | #define DO_TESTCASE_6x6(desc, name) \ | ||
1095 | DO_TESTCASE_6I(desc, name, 123); \ | ||
1096 | DO_TESTCASE_6I(desc, name, 132); \ | ||
1097 | DO_TESTCASE_6I(desc, name, 213); \ | ||
1098 | DO_TESTCASE_6I(desc, name, 231); \ | ||
1099 | DO_TESTCASE_6I(desc, name, 312); \ | ||
1100 | DO_TESTCASE_6I(desc, name, 321); | ||
1101 | |||
1102 | #define DO_TESTCASE_6x6RW(desc, name) \ | ||
1103 | DO_TESTCASE_6IRW(desc, name, 123); \ | ||
1104 | DO_TESTCASE_6IRW(desc, name, 132); \ | ||
1105 | DO_TESTCASE_6IRW(desc, name, 213); \ | ||
1106 | DO_TESTCASE_6IRW(desc, name, 231); \ | ||
1107 | DO_TESTCASE_6IRW(desc, name, 312); \ | ||
1108 | DO_TESTCASE_6IRW(desc, name, 321); | ||
1109 | |||
1110 | |||
1111 | void locking_selftest(void) | ||
1112 | { | ||
1113 | /* | ||
1114 | * Got a locking failure before the selftest ran? | ||
1115 | */ | ||
1116 | if (!debug_locks) { | ||
1117 | printk("----------------------------------\n"); | ||
1118 | printk("| Locking API testsuite disabled |\n"); | ||
1119 | printk("----------------------------------\n"); | ||
1120 | return; | ||
1121 | } | ||
1122 | |||
1123 | /* | ||
1124 | * Run the testsuite: | ||
1125 | */ | ||
1126 | printk("------------------------\n"); | ||
1127 | printk("| Locking API testsuite:\n"); | ||
1128 | printk("----------------------------------------------------------------------------\n"); | ||
1129 | printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); | ||
1130 | printk(" --------------------------------------------------------------------------\n"); | ||
1131 | |||
1132 | init_shared_classes(); | ||
1133 | debug_locks_silent = !debug_locks_verbose; | ||
1134 | |||
1135 | DO_TESTCASE_6R("A-A deadlock", AA); | ||
1136 | DO_TESTCASE_6R("A-B-B-A deadlock", ABBA); | ||
1137 | DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA); | ||
1138 | DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC); | ||
1139 | DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA); | ||
1140 | DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA); | ||
1141 | DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); | ||
1142 | DO_TESTCASE_6("double unlock", double_unlock); | ||
1143 | DO_TESTCASE_6("initialize held", init_held); | ||
1144 | DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order); | ||
1145 | |||
1146 | printk(" --------------------------------------------------------------------------\n"); | ||
1147 | print_testname("recursive read-lock"); | ||
1148 | printk(" |"); | ||
1149 | dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK); | ||
1150 | printk(" |"); | ||
1151 | dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM); | ||
1152 | printk("\n"); | ||
1153 | |||
1154 | print_testname("recursive read-lock #2"); | ||
1155 | printk(" |"); | ||
1156 | dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK); | ||
1157 | printk(" |"); | ||
1158 | dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM); | ||
1159 | printk("\n"); | ||
1160 | |||
1161 | print_testname("mixed read-write-lock"); | ||
1162 | printk(" |"); | ||
1163 | dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK); | ||
1164 | printk(" |"); | ||
1165 | dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM); | ||
1166 | printk("\n"); | ||
1167 | |||
1168 | print_testname("mixed write-read-lock"); | ||
1169 | printk(" |"); | ||
1170 | dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK); | ||
1171 | printk(" |"); | ||
1172 | dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); | ||
1173 | printk("\n"); | ||
1174 | |||
1175 | printk(" --------------------------------------------------------------------------\n"); | ||
1176 | |||
1177 | /* | ||
1178 | * irq-context testcases: | ||
1179 | */ | ||
1180 | DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); | ||
1181 | DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); | ||
1182 | DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); | ||
1183 | DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); | ||
1184 | DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); | ||
1185 | DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion); | ||
1186 | |||
1187 | DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); | ||
1188 | // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); | ||
1189 | |||
1190 | if (unexpected_testcase_failures) { | ||
1191 | printk("-----------------------------------------------------------------\n"); | ||
1192 | debug_locks = 0; | ||
1193 | printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", | ||
1194 | unexpected_testcase_failures, testcase_total); | ||
1195 | printk("-----------------------------------------------------------------\n"); | ||
1196 | } else if (expected_testcase_failures && testcase_successes) { | ||
1197 | printk("--------------------------------------------------------\n"); | ||
1198 | printk("%3d out of %3d testcases failed, as expected. |\n", | ||
1199 | expected_testcase_failures, testcase_total); | ||
1200 | printk("----------------------------------------------------\n"); | ||
1201 | debug_locks = 1; | ||
1202 | } else if (expected_testcase_failures && !testcase_successes) { | ||
1203 | printk("--------------------------------------------------------\n"); | ||
1204 | printk("All %3d testcases failed, as expected. |\n", | ||
1205 | expected_testcase_failures); | ||
1206 | printk("----------------------------------------\n"); | ||
1207 | debug_locks = 1; | ||
1208 | } else { | ||
1209 | printk("-------------------------------------------------------\n"); | ||
1210 | printk("Good, all %3d testcases passed! |\n", | ||
1211 | testcase_successes); | ||
1212 | printk("---------------------------------\n"); | ||
1213 | debug_locks = 1; | ||
1214 | } | ||
1215 | debug_locks_silent = 0; | ||
1216 | } | ||
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..db4fed74b940 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -17,27 +17,22 @@ struct rwsem_waiter { | |||
17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
18 | }; | 18 | }; |
19 | 19 | ||
20 | #if RWSEM_DEBUG | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk("[%d] %s({%d,%d})\n", | ||
25 | current->pid, str, sem->activity, | ||
26 | list_empty(&sem->wait_list) ? 0 : 1); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | 20 | /* |
31 | * initialise the semaphore | 21 | * initialise the semaphore |
32 | */ | 22 | */ |
33 | void fastcall init_rwsem(struct rw_semaphore *sem) | 23 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
24 | struct lock_class_key *key) | ||
34 | { | 25 | { |
26 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
27 | /* | ||
28 | * Make sure we are not reinitializing a held semaphore: | ||
29 | */ | ||
30 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | ||
31 | lockdep_init_map(&sem->dep_map, name, key); | ||
32 | #endif | ||
35 | sem->activity = 0; | 33 | sem->activity = 0; |
36 | spin_lock_init(&sem->wait_lock); | 34 | spin_lock_init(&sem->wait_lock); |
37 | INIT_LIST_HEAD(&sem->wait_list); | 35 | INIT_LIST_HEAD(&sem->wait_list); |
38 | #if RWSEM_DEBUG | ||
39 | sem->debug = 0; | ||
40 | #endif | ||
41 | } | 36 | } |
42 | 37 | ||
43 | /* | 38 | /* |
@@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
56 | struct task_struct *tsk; | 51 | struct task_struct *tsk; |
57 | int woken; | 52 | int woken; |
58 | 53 | ||
59 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
60 | |||
61 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 54 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
62 | 55 | ||
63 | if (!wakewrite) { | 56 | if (!wakewrite) { |
@@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
104 | sem->activity += woken; | 97 | sem->activity += woken; |
105 | 98 | ||
106 | out: | 99 | out: |
107 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
108 | return sem; | 100 | return sem; |
109 | } | 101 | } |
110 | 102 | ||
@@ -138,8 +130,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
138 | struct rwsem_waiter waiter; | 130 | struct rwsem_waiter waiter; |
139 | struct task_struct *tsk; | 131 | struct task_struct *tsk; |
140 | 132 | ||
141 | rwsemtrace(sem, "Entering __down_read"); | ||
142 | |||
143 | spin_lock_irq(&sem->wait_lock); | 133 | spin_lock_irq(&sem->wait_lock); |
144 | 134 | ||
145 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 135 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
@@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) | |||
171 | } | 161 | } |
172 | 162 | ||
173 | tsk->state = TASK_RUNNING; | 163 | tsk->state = TASK_RUNNING; |
174 | |||
175 | out: | 164 | out: |
176 | rwsemtrace(sem, "Leaving __down_read"); | 165 | ; |
177 | } | 166 | } |
178 | 167 | ||
179 | /* | 168 | /* |
@@ -184,7 +173,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
184 | unsigned long flags; | 173 | unsigned long flags; |
185 | int ret = 0; | 174 | int ret = 0; |
186 | 175 | ||
187 | rwsemtrace(sem, "Entering __down_read_trylock"); | ||
188 | 176 | ||
189 | spin_lock_irqsave(&sem->wait_lock, flags); | 177 | spin_lock_irqsave(&sem->wait_lock, flags); |
190 | 178 | ||
@@ -196,7 +184,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
196 | 184 | ||
197 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 185 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
198 | 186 | ||
199 | rwsemtrace(sem, "Leaving __down_read_trylock"); | ||
200 | return ret; | 187 | return ret; |
201 | } | 188 | } |
202 | 189 | ||
@@ -204,13 +191,11 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
204 | * get a write lock on the semaphore | 191 | * get a write lock on the semaphore |
205 | * - we increment the waiting count anyway to indicate an exclusive lock | 192 | * - we increment the waiting count anyway to indicate an exclusive lock |
206 | */ | 193 | */ |
207 | void fastcall __sched __down_write(struct rw_semaphore *sem) | 194 | void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
208 | { | 195 | { |
209 | struct rwsem_waiter waiter; | 196 | struct rwsem_waiter waiter; |
210 | struct task_struct *tsk; | 197 | struct task_struct *tsk; |
211 | 198 | ||
212 | rwsemtrace(sem, "Entering __down_write"); | ||
213 | |||
214 | spin_lock_irq(&sem->wait_lock); | 199 | spin_lock_irq(&sem->wait_lock); |
215 | 200 | ||
216 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 201 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -242,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
242 | } | 227 | } |
243 | 228 | ||
244 | tsk->state = TASK_RUNNING; | 229 | tsk->state = TASK_RUNNING; |
245 | |||
246 | out: | 230 | out: |
247 | rwsemtrace(sem, "Leaving __down_write"); | 231 | ; |
232 | } | ||
233 | |||
234 | void fastcall __sched __down_write(struct rw_semaphore *sem) | ||
235 | { | ||
236 | __down_write_nested(sem, 0); | ||
248 | } | 237 | } |
249 | 238 | ||
250 | /* | 239 | /* |
@@ -255,8 +244,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
255 | unsigned long flags; | 244 | unsigned long flags; |
256 | int ret = 0; | 245 | int ret = 0; |
257 | 246 | ||
258 | rwsemtrace(sem, "Entering __down_write_trylock"); | ||
259 | |||
260 | spin_lock_irqsave(&sem->wait_lock, flags); | 247 | spin_lock_irqsave(&sem->wait_lock, flags); |
261 | 248 | ||
262 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 249 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
@@ -267,7 +254,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) | |||
267 | 254 | ||
268 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 255 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
269 | 256 | ||
270 | rwsemtrace(sem, "Leaving __down_write_trylock"); | ||
271 | return ret; | 257 | return ret; |
272 | } | 258 | } |
273 | 259 | ||
@@ -278,16 +264,12 @@ void fastcall __up_read(struct rw_semaphore *sem) | |||
278 | { | 264 | { |
279 | unsigned long flags; | 265 | unsigned long flags; |
280 | 266 | ||
281 | rwsemtrace(sem, "Entering __up_read"); | ||
282 | |||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | 267 | spin_lock_irqsave(&sem->wait_lock, flags); |
284 | 268 | ||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 269 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
286 | sem = __rwsem_wake_one_writer(sem); | 270 | sem = __rwsem_wake_one_writer(sem); |
287 | 271 | ||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 272 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
289 | |||
290 | rwsemtrace(sem, "Leaving __up_read"); | ||
291 | } | 273 | } |
292 | 274 | ||
293 | /* | 275 | /* |
@@ -297,8 +279,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
297 | { | 279 | { |
298 | unsigned long flags; | 280 | unsigned long flags; |
299 | 281 | ||
300 | rwsemtrace(sem, "Entering __up_write"); | ||
301 | |||
302 | spin_lock_irqsave(&sem->wait_lock, flags); | 282 | spin_lock_irqsave(&sem->wait_lock, flags); |
303 | 283 | ||
304 | sem->activity = 0; | 284 | sem->activity = 0; |
@@ -306,8 +286,6 @@ void fastcall __up_write(struct rw_semaphore *sem) | |||
306 | sem = __rwsem_do_wake(sem, 1); | 286 | sem = __rwsem_do_wake(sem, 1); |
307 | 287 | ||
308 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 288 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
309 | |||
310 | rwsemtrace(sem, "Leaving __up_write"); | ||
311 | } | 289 | } |
312 | 290 | ||
313 | /* | 291 | /* |
@@ -318,8 +296,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
318 | { | 296 | { |
319 | unsigned long flags; | 297 | unsigned long flags; |
320 | 298 | ||
321 | rwsemtrace(sem, "Entering __downgrade_write"); | ||
322 | |||
323 | spin_lock_irqsave(&sem->wait_lock, flags); | 299 | spin_lock_irqsave(&sem->wait_lock, flags); |
324 | 300 | ||
325 | sem->activity = 1; | 301 | sem->activity = 1; |
@@ -327,18 +303,14 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
327 | sem = __rwsem_do_wake(sem, 0); | 303 | sem = __rwsem_do_wake(sem, 0); |
328 | 304 | ||
329 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 305 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
330 | |||
331 | rwsemtrace(sem, "Leaving __downgrade_write"); | ||
332 | } | 306 | } |
333 | 307 | ||
334 | EXPORT_SYMBOL(init_rwsem); | 308 | EXPORT_SYMBOL(__init_rwsem); |
335 | EXPORT_SYMBOL(__down_read); | 309 | EXPORT_SYMBOL(__down_read); |
336 | EXPORT_SYMBOL(__down_read_trylock); | 310 | EXPORT_SYMBOL(__down_read_trylock); |
311 | EXPORT_SYMBOL(__down_write_nested); | ||
337 | EXPORT_SYMBOL(__down_write); | 312 | EXPORT_SYMBOL(__down_write); |
338 | EXPORT_SYMBOL(__down_write_trylock); | 313 | EXPORT_SYMBOL(__down_write_trylock); |
339 | EXPORT_SYMBOL(__up_read); | 314 | EXPORT_SYMBOL(__up_read); |
340 | EXPORT_SYMBOL(__up_write); | 315 | EXPORT_SYMBOL(__up_write); |
341 | EXPORT_SYMBOL(__downgrade_write); | 316 | EXPORT_SYMBOL(__downgrade_write); |
342 | #if RWSEM_DEBUG | ||
343 | EXPORT_SYMBOL(rwsemtrace); | ||
344 | #endif | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index 62fa4eba9ffe..b322421c2969 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -8,6 +8,26 @@ | |||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | 10 | ||
11 | /* | ||
12 | * Initialize an rwsem: | ||
13 | */ | ||
14 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
15 | struct lock_class_key *key) | ||
16 | { | ||
17 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
18 | /* | ||
19 | * Make sure we are not reinitializing a held semaphore: | ||
20 | */ | ||
21 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | ||
22 | lockdep_init_map(&sem->dep_map, name, key); | ||
23 | #endif | ||
24 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
25 | spin_lock_init(&sem->wait_lock); | ||
26 | INIT_LIST_HEAD(&sem->wait_list); | ||
27 | } | ||
28 | |||
29 | EXPORT_SYMBOL(__init_rwsem); | ||
30 | |||
11 | struct rwsem_waiter { | 31 | struct rwsem_waiter { |
12 | struct list_head list; | 32 | struct list_head list; |
13 | struct task_struct *task; | 33 | struct task_struct *task; |
@@ -16,17 +36,6 @@ struct rwsem_waiter { | |||
16 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 36 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
17 | }; | 37 | }; |
18 | 38 | ||
19 | #if RWSEM_DEBUG | ||
20 | #undef rwsemtrace | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | printk("sem=%p\n", sem); | ||
24 | printk("(sem)=%08lx\n", sem->count); | ||
25 | if (sem->debug) | ||
26 | printk("[%d] %s({%08lx})\n", current->pid, str, sem->count); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | 39 | /* |
31 | * handle the lock release when processes blocked on it that can now run | 40 | * handle the lock release when processes blocked on it that can now run |
32 | * - if we come here from up_xxxx(), then: | 41 | * - if we come here from up_xxxx(), then: |
@@ -45,8 +54,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
45 | struct list_head *next; | 54 | struct list_head *next; |
46 | signed long oldcount, woken, loop; | 55 | signed long oldcount, woken, loop; |
47 | 56 | ||
48 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
49 | |||
50 | if (downgrading) | 57 | if (downgrading) |
51 | goto dont_wake_writers; | 58 | goto dont_wake_writers; |
52 | 59 | ||
@@ -127,7 +134,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
127 | next->prev = &sem->wait_list; | 134 | next->prev = &sem->wait_list; |
128 | 135 | ||
129 | out: | 136 | out: |
130 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
131 | return sem; | 137 | return sem; |
132 | 138 | ||
133 | /* undo the change to count, but check for a transition 1->0 */ | 139 | /* undo the change to count, but check for a transition 1->0 */ |
@@ -186,13 +192,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem) | |||
186 | { | 192 | { |
187 | struct rwsem_waiter waiter; | 193 | struct rwsem_waiter waiter; |
188 | 194 | ||
189 | rwsemtrace(sem, "Entering rwsem_down_read_failed"); | ||
190 | |||
191 | waiter.flags = RWSEM_WAITING_FOR_READ; | 195 | waiter.flags = RWSEM_WAITING_FOR_READ; |
192 | rwsem_down_failed_common(sem, &waiter, | 196 | rwsem_down_failed_common(sem, &waiter, |
193 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); | 197 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); |
194 | |||
195 | rwsemtrace(sem, "Leaving rwsem_down_read_failed"); | ||
196 | return sem; | 198 | return sem; |
197 | } | 199 | } |
198 | 200 | ||
@@ -204,12 +206,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem) | |||
204 | { | 206 | { |
205 | struct rwsem_waiter waiter; | 207 | struct rwsem_waiter waiter; |
206 | 208 | ||
207 | rwsemtrace(sem, "Entering rwsem_down_write_failed"); | ||
208 | |||
209 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | 209 | waiter.flags = RWSEM_WAITING_FOR_WRITE; |
210 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); | 210 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); |
211 | 211 | ||
212 | rwsemtrace(sem, "Leaving rwsem_down_write_failed"); | ||
213 | return sem; | 212 | return sem; |
214 | } | 213 | } |
215 | 214 | ||
@@ -221,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | |||
221 | { | 220 | { |
222 | unsigned long flags; | 221 | unsigned long flags; |
223 | 222 | ||
224 | rwsemtrace(sem, "Entering rwsem_wake"); | ||
225 | |||
226 | spin_lock_irqsave(&sem->wait_lock, flags); | 223 | spin_lock_irqsave(&sem->wait_lock, flags); |
227 | 224 | ||
228 | /* do nothing if list empty */ | 225 | /* do nothing if list empty */ |
@@ -231,8 +228,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | |||
231 | 228 | ||
232 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 229 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
233 | 230 | ||
234 | rwsemtrace(sem, "Leaving rwsem_wake"); | ||
235 | |||
236 | return sem; | 231 | return sem; |
237 | } | 232 | } |
238 | 233 | ||
@@ -245,8 +240,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
245 | { | 240 | { |
246 | unsigned long flags; | 241 | unsigned long flags; |
247 | 242 | ||
248 | rwsemtrace(sem, "Entering rwsem_downgrade_wake"); | ||
249 | |||
250 | spin_lock_irqsave(&sem->wait_lock, flags); | 243 | spin_lock_irqsave(&sem->wait_lock, flags); |
251 | 244 | ||
252 | /* do nothing if list empty */ | 245 | /* do nothing if list empty */ |
@@ -255,7 +248,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
255 | 248 | ||
256 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 249 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
257 | 250 | ||
258 | rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); | ||
259 | return sem; | 251 | return sem; |
260 | } | 252 | } |
261 | 253 | ||
@@ -263,6 +255,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed); | |||
263 | EXPORT_SYMBOL(rwsem_down_write_failed); | 255 | EXPORT_SYMBOL(rwsem_down_write_failed); |
264 | EXPORT_SYMBOL(rwsem_wake); | 256 | EXPORT_SYMBOL(rwsem_wake); |
265 | EXPORT_SYMBOL(rwsem_downgrade_wake); | 257 | EXPORT_SYMBOL(rwsem_downgrade_wake); |
266 | #if RWSEM_DEBUG | ||
267 | EXPORT_SYMBOL(rwsemtrace); | ||
268 | #endif | ||
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 93c15ee3f8ea..3d9c4dc965ed 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -8,38 +8,71 @@ | |||
8 | 8 | ||
9 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/debug_locks.h> | ||
11 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/module.h> | ||
14 | |||
15 | void __spin_lock_init(spinlock_t *lock, const char *name, | ||
16 | struct lock_class_key *key) | ||
17 | { | ||
18 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
19 | /* | ||
20 | * Make sure we are not reinitializing a held lock: | ||
21 | */ | ||
22 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
23 | lockdep_init_map(&lock->dep_map, name, key); | ||
24 | #endif | ||
25 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
26 | lock->magic = SPINLOCK_MAGIC; | ||
27 | lock->owner = SPINLOCK_OWNER_INIT; | ||
28 | lock->owner_cpu = -1; | ||
29 | } | ||
30 | |||
31 | EXPORT_SYMBOL(__spin_lock_init); | ||
32 | |||
33 | void __rwlock_init(rwlock_t *lock, const char *name, | ||
34 | struct lock_class_key *key) | ||
35 | { | ||
36 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
37 | /* | ||
38 | * Make sure we are not reinitializing a held lock: | ||
39 | */ | ||
40 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
41 | lockdep_init_map(&lock->dep_map, name, key); | ||
42 | #endif | ||
43 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; | ||
44 | lock->magic = RWLOCK_MAGIC; | ||
45 | lock->owner = SPINLOCK_OWNER_INIT; | ||
46 | lock->owner_cpu = -1; | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL(__rwlock_init); | ||
12 | 50 | ||
13 | static void spin_bug(spinlock_t *lock, const char *msg) | 51 | static void spin_bug(spinlock_t *lock, const char *msg) |
14 | { | 52 | { |
15 | static long print_once = 1; | ||
16 | struct task_struct *owner = NULL; | 53 | struct task_struct *owner = NULL; |
17 | 54 | ||
18 | if (xchg(&print_once, 0)) { | 55 | if (!debug_locks_off()) |
19 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | 56 | return; |
20 | owner = lock->owner; | 57 | |
21 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | 58 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) |
22 | msg, raw_smp_processor_id(), | 59 | owner = lock->owner; |
23 | current->comm, current->pid); | 60 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", |
24 | printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " | 61 | msg, raw_smp_processor_id(), |
25 | ".owner_cpu: %d\n", | 62 | current->comm, current->pid); |
26 | lock, lock->magic, | 63 | printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " |
27 | owner ? owner->comm : "<none>", | 64 | ".owner_cpu: %d\n", |
28 | owner ? owner->pid : -1, | 65 | lock, lock->magic, |
29 | lock->owner_cpu); | 66 | owner ? owner->comm : "<none>", |
30 | dump_stack(); | 67 | owner ? owner->pid : -1, |
31 | #ifdef CONFIG_SMP | 68 | lock->owner_cpu); |
32 | /* | 69 | dump_stack(); |
33 | * We cannot continue on SMP: | ||
34 | */ | ||
35 | // panic("bad locking"); | ||
36 | #endif | ||
37 | } | ||
38 | } | 70 | } |
39 | 71 | ||
40 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | 72 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
41 | 73 | ||
42 | static inline void debug_spin_lock_before(spinlock_t *lock) | 74 | static inline void |
75 | debug_spin_lock_before(spinlock_t *lock) | ||
43 | { | 76 | { |
44 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | 77 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); |
45 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | 78 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); |
@@ -118,20 +151,13 @@ void _raw_spin_unlock(spinlock_t *lock) | |||
118 | 151 | ||
119 | static void rwlock_bug(rwlock_t *lock, const char *msg) | 152 | static void rwlock_bug(rwlock_t *lock, const char *msg) |
120 | { | 153 | { |
121 | static long print_once = 1; | 154 | if (!debug_locks_off()) |
122 | 155 | return; | |
123 | if (xchg(&print_once, 0)) { | 156 | |
124 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | 157 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", |
125 | msg, raw_smp_processor_id(), current->comm, | 158 | msg, raw_smp_processor_id(), current->comm, |
126 | current->pid, lock); | 159 | current->pid, lock); |
127 | dump_stack(); | 160 | dump_stack(); |
128 | #ifdef CONFIG_SMP | ||
129 | /* | ||
130 | * We cannot continue on SMP: | ||
131 | */ | ||
132 | panic("bad locking"); | ||
133 | #endif | ||
134 | } | ||
135 | } | 161 | } |
136 | 162 | ||
137 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | 163 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) |