diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 2702 |
1 files changed, 2702 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c new file mode 100644 index 000000000000..f32ca78c198d --- /dev/null +++ b/kernel/lockdep.c | |||
@@ -0,0 +1,2702 @@ | |||
1 | /* | ||
2 | * kernel/lockdep.c | ||
3 | * | ||
4 | * Runtime locking correctness validator | ||
5 | * | ||
6 | * Started by Ingo Molnar: | ||
7 | * | ||
8 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
9 | * | ||
10 | * this code maps all the lock dependencies as they occur in a live kernel | ||
11 | * and will warn about the following classes of locking bugs: | ||
12 | * | ||
13 | * - lock inversion scenarios | ||
14 | * - circular lock dependencies | ||
15 | * - hardirq/softirq safe/unsafe locking bugs | ||
16 | * | ||
17 | * Bugs are reported even if the current locking scenario does not cause | ||
18 | * any deadlock at this point. | ||
19 | * | ||
20 | * I.e. if anytime in the past two locks were taken in a different order, | ||
21 | * even if it happened for another task, even if those were different | ||
22 | * locks (but of the same class as this lock), this code will detect it. | ||
23 | * | ||
24 | * Thanks to Arjan van de Ven for coming up with the initial idea of | ||
25 | * mapping lock dependencies runtime. | ||
26 | */ | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/proc_fs.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/kallsyms.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/stacktrace.h> | ||
37 | #include <linux/debug_locks.h> | ||
38 | #include <linux/irqflags.h> | ||
39 | |||
40 | #include <asm/sections.h> | ||
41 | |||
42 | #include "lockdep_internals.h" | ||
43 | |||
44 | /* | ||
45 | * hash_lock: protects the lockdep hashes and class/list/hash allocators. | ||
46 | * | ||
47 | * This is one of the rare exceptions where it's justified | ||
48 | * to use a raw spinlock - we really dont want the spinlock | ||
49 | * code to recurse back into the lockdep code. | ||
50 | */ | ||
51 | static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
52 | |||
53 | static int lockdep_initialized; | ||
54 | |||
55 | unsigned long nr_list_entries; | ||
56 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | ||
57 | |||
58 | /* | ||
59 | * Allocate a lockdep entry. (assumes hash_lock held, returns | ||
60 | * with NULL on failure) | ||
61 | */ | ||
62 | static struct lock_list *alloc_list_entry(void) | ||
63 | { | ||
64 | if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { | ||
65 | __raw_spin_unlock(&hash_lock); | ||
66 | debug_locks_off(); | ||
67 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); | ||
68 | printk("turning off the locking correctness validator.\n"); | ||
69 | return NULL; | ||
70 | } | ||
71 | return list_entries + nr_list_entries++; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * All data structures here are protected by the global debug_lock. | ||
76 | * | ||
77 | * Mutex key structs only get allocated, once during bootup, and never | ||
78 | * get freed - this significantly simplifies the debugging code. | ||
79 | */ | ||
80 | unsigned long nr_lock_classes; | ||
81 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | ||
82 | |||
83 | /* | ||
84 | * We keep a global list of all lock classes. The list only grows, | ||
85 | * never shrinks. The list is only accessed with the lockdep | ||
86 | * spinlock lock held. | ||
87 | */ | ||
88 | LIST_HEAD(all_lock_classes); | ||
89 | |||
90 | /* | ||
91 | * The lockdep classes are in a hash-table as well, for fast lookup: | ||
92 | */ | ||
93 | #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) | ||
94 | #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) | ||
95 | #define CLASSHASH_MASK (CLASSHASH_SIZE - 1) | ||
96 | #define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK) | ||
97 | #define classhashentry(key) (classhash_table + __classhashfn((key))) | ||
98 | |||
99 | static struct list_head classhash_table[CLASSHASH_SIZE]; | ||
100 | |||
101 | unsigned long nr_lock_chains; | ||
102 | static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; | ||
103 | |||
104 | /* | ||
105 | * We put the lock dependency chains into a hash-table as well, to cache | ||
106 | * their existence: | ||
107 | */ | ||
108 | #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) | ||
109 | #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) | ||
110 | #define CHAINHASH_MASK (CHAINHASH_SIZE - 1) | ||
111 | #define __chainhashfn(chain) \ | ||
112 | (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK) | ||
113 | #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) | ||
114 | |||
115 | static struct list_head chainhash_table[CHAINHASH_SIZE]; | ||
116 | |||
117 | /* | ||
118 | * The hash key of the lock dependency chains is a hash itself too: | ||
119 | * it's a hash of all locks taken up to that lock, including that lock. | ||
120 | * It's a 64-bit hash, because it's important for the keys to be | ||
121 | * unique. | ||
122 | */ | ||
123 | #define iterate_chain_key(key1, key2) \ | ||
124 | (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \ | ||
125 | ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \ | ||
126 | (key2)) | ||
127 | |||
128 | void lockdep_off(void) | ||
129 | { | ||
130 | current->lockdep_recursion++; | ||
131 | } | ||
132 | |||
133 | EXPORT_SYMBOL(lockdep_off); | ||
134 | |||
135 | void lockdep_on(void) | ||
136 | { | ||
137 | current->lockdep_recursion--; | ||
138 | } | ||
139 | |||
140 | EXPORT_SYMBOL(lockdep_on); | ||
141 | |||
142 | int lockdep_internal(void) | ||
143 | { | ||
144 | return current->lockdep_recursion != 0; | ||
145 | } | ||
146 | |||
147 | EXPORT_SYMBOL(lockdep_internal); | ||
148 | |||
149 | /* | ||
150 | * Debugging switches: | ||
151 | */ | ||
152 | |||
153 | #define VERBOSE 0 | ||
154 | #ifdef VERBOSE | ||
155 | # define VERY_VERBOSE 0 | ||
156 | #endif | ||
157 | |||
158 | #if VERBOSE | ||
159 | # define HARDIRQ_VERBOSE 1 | ||
160 | # define SOFTIRQ_VERBOSE 1 | ||
161 | #else | ||
162 | # define HARDIRQ_VERBOSE 0 | ||
163 | # define SOFTIRQ_VERBOSE 0 | ||
164 | #endif | ||
165 | |||
166 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | ||
167 | /* | ||
168 | * Quick filtering for interesting events: | ||
169 | */ | ||
170 | static int class_filter(struct lock_class *class) | ||
171 | { | ||
172 | if (class->name_version == 1 && | ||
173 | !strcmp(class->name, "&rl->lock")) | ||
174 | return 1; | ||
175 | if (class->name_version == 1 && | ||
176 | !strcmp(class->name, "&ni->mrec_lock")) | ||
177 | return 1; | ||
178 | if (class->name_version == 1 && | ||
179 | !strcmp(class->name, "mft_ni_runlist_lock")) | ||
180 | return 1; | ||
181 | if (class->name_version == 1 && | ||
182 | !strcmp(class->name, "mft_ni_mrec_lock")) | ||
183 | return 1; | ||
184 | if (class->name_version == 1 && | ||
185 | !strcmp(class->name, "&vol->lcnbmp_lock")) | ||
186 | return 1; | ||
187 | return 0; | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | static int verbose(struct lock_class *class) | ||
192 | { | ||
193 | #if VERBOSE | ||
194 | return class_filter(class); | ||
195 | #endif | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
200 | |||
201 | static int hardirq_verbose(struct lock_class *class) | ||
202 | { | ||
203 | #if HARDIRQ_VERBOSE | ||
204 | return class_filter(class); | ||
205 | #endif | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int softirq_verbose(struct lock_class *class) | ||
210 | { | ||
211 | #if SOFTIRQ_VERBOSE | ||
212 | return class_filter(class); | ||
213 | #endif | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | #endif | ||
218 | |||
219 | /* | ||
220 | * Stack-trace: tightly packed array of stack backtrace | ||
221 | * addresses. Protected by the hash_lock. | ||
222 | */ | ||
223 | unsigned long nr_stack_trace_entries; | ||
224 | static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; | ||
225 | |||
226 | static int save_trace(struct stack_trace *trace) | ||
227 | { | ||
228 | trace->nr_entries = 0; | ||
229 | trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; | ||
230 | trace->entries = stack_trace + nr_stack_trace_entries; | ||
231 | |||
232 | save_stack_trace(trace, NULL, 0, 3); | ||
233 | |||
234 | trace->max_entries = trace->nr_entries; | ||
235 | |||
236 | nr_stack_trace_entries += trace->nr_entries; | ||
237 | if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) | ||
238 | return 0; | ||
239 | |||
240 | if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { | ||
241 | __raw_spin_unlock(&hash_lock); | ||
242 | if (debug_locks_off()) { | ||
243 | printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); | ||
244 | printk("turning off the locking correctness validator.\n"); | ||
245 | dump_stack(); | ||
246 | } | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | return 1; | ||
251 | } | ||
252 | |||
253 | unsigned int nr_hardirq_chains; | ||
254 | unsigned int nr_softirq_chains; | ||
255 | unsigned int nr_process_chains; | ||
256 | unsigned int max_lockdep_depth; | ||
257 | unsigned int max_recursion_depth; | ||
258 | |||
259 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
260 | /* | ||
261 | * We cannot printk in early bootup code. Not even early_printk() | ||
262 | * might work. So we mark any initialization errors and printk | ||
263 | * about it later on, in lockdep_info(). | ||
264 | */ | ||
265 | static int lockdep_init_error; | ||
266 | |||
267 | /* | ||
268 | * Various lockdep statistics: | ||
269 | */ | ||
270 | atomic_t chain_lookup_hits; | ||
271 | atomic_t chain_lookup_misses; | ||
272 | atomic_t hardirqs_on_events; | ||
273 | atomic_t hardirqs_off_events; | ||
274 | atomic_t redundant_hardirqs_on; | ||
275 | atomic_t redundant_hardirqs_off; | ||
276 | atomic_t softirqs_on_events; | ||
277 | atomic_t softirqs_off_events; | ||
278 | atomic_t redundant_softirqs_on; | ||
279 | atomic_t redundant_softirqs_off; | ||
280 | atomic_t nr_unused_locks; | ||
281 | atomic_t nr_cyclic_checks; | ||
282 | atomic_t nr_cyclic_check_recursions; | ||
283 | atomic_t nr_find_usage_forwards_checks; | ||
284 | atomic_t nr_find_usage_forwards_recursions; | ||
285 | atomic_t nr_find_usage_backwards_checks; | ||
286 | atomic_t nr_find_usage_backwards_recursions; | ||
287 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | ||
288 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | ||
289 | # define debug_atomic_read(ptr) atomic_read(ptr) | ||
290 | #else | ||
291 | # define debug_atomic_inc(ptr) do { } while (0) | ||
292 | # define debug_atomic_dec(ptr) do { } while (0) | ||
293 | # define debug_atomic_read(ptr) 0 | ||
294 | #endif | ||
295 | |||
296 | /* | ||
297 | * Locking printouts: | ||
298 | */ | ||
299 | |||
300 | static const char *usage_str[] = | ||
301 | { | ||
302 | [LOCK_USED] = "initial-use ", | ||
303 | [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", | ||
304 | [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", | ||
305 | [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", | ||
306 | [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W", | ||
307 | [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", | ||
308 | [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", | ||
309 | [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", | ||
310 | [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", | ||
311 | }; | ||
312 | |||
313 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | ||
314 | { | ||
315 | unsigned long offs, size; | ||
316 | char *modname; | ||
317 | |||
318 | return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str); | ||
319 | } | ||
320 | |||
321 | void | ||
322 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | ||
323 | { | ||
324 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | ||
325 | |||
326 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | ||
327 | *c1 = '+'; | ||
328 | else | ||
329 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | ||
330 | *c1 = '-'; | ||
331 | |||
332 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | ||
333 | *c2 = '+'; | ||
334 | else | ||
335 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | ||
336 | *c2 = '-'; | ||
337 | |||
338 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | ||
339 | *c3 = '-'; | ||
340 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { | ||
341 | *c3 = '+'; | ||
342 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | ||
343 | *c3 = '?'; | ||
344 | } | ||
345 | |||
346 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | ||
347 | *c4 = '-'; | ||
348 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { | ||
349 | *c4 = '+'; | ||
350 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | ||
351 | *c4 = '?'; | ||
352 | } | ||
353 | } | ||
354 | |||
355 | static void print_lock_name(struct lock_class *class) | ||
356 | { | ||
357 | char str[128], c1, c2, c3, c4; | ||
358 | const char *name; | ||
359 | |||
360 | get_usage_chars(class, &c1, &c2, &c3, &c4); | ||
361 | |||
362 | name = class->name; | ||
363 | if (!name) { | ||
364 | name = __get_key_name(class->key, str); | ||
365 | printk(" (%s", name); | ||
366 | } else { | ||
367 | printk(" (%s", name); | ||
368 | if (class->name_version > 1) | ||
369 | printk("#%d", class->name_version); | ||
370 | if (class->subclass) | ||
371 | printk("/%d", class->subclass); | ||
372 | } | ||
373 | printk("){%c%c%c%c}", c1, c2, c3, c4); | ||
374 | } | ||
375 | |||
376 | static void print_lockdep_cache(struct lockdep_map *lock) | ||
377 | { | ||
378 | const char *name; | ||
379 | char str[128]; | ||
380 | |||
381 | name = lock->name; | ||
382 | if (!name) | ||
383 | name = __get_key_name(lock->key->subkeys, str); | ||
384 | |||
385 | printk("%s", name); | ||
386 | } | ||
387 | |||
388 | static void print_lock(struct held_lock *hlock) | ||
389 | { | ||
390 | print_lock_name(hlock->class); | ||
391 | printk(", at: "); | ||
392 | print_ip_sym(hlock->acquire_ip); | ||
393 | } | ||
394 | |||
395 | static void lockdep_print_held_locks(struct task_struct *curr) | ||
396 | { | ||
397 | int i, depth = curr->lockdep_depth; | ||
398 | |||
399 | if (!depth) { | ||
400 | printk("no locks held by %s/%d.\n", curr->comm, curr->pid); | ||
401 | return; | ||
402 | } | ||
403 | printk("%d lock%s held by %s/%d:\n", | ||
404 | depth, depth > 1 ? "s" : "", curr->comm, curr->pid); | ||
405 | |||
406 | for (i = 0; i < depth; i++) { | ||
407 | printk(" #%d: ", i); | ||
408 | print_lock(curr->held_locks + i); | ||
409 | } | ||
410 | } | ||
411 | /* | ||
412 | * Helper to print a nice hierarchy of lock dependencies: | ||
413 | */ | ||
414 | static void print_spaces(int nr) | ||
415 | { | ||
416 | int i; | ||
417 | |||
418 | for (i = 0; i < nr; i++) | ||
419 | printk(" "); | ||
420 | } | ||
421 | |||
422 | static void print_lock_class_header(struct lock_class *class, int depth) | ||
423 | { | ||
424 | int bit; | ||
425 | |||
426 | print_spaces(depth); | ||
427 | printk("->"); | ||
428 | print_lock_name(class); | ||
429 | printk(" ops: %lu", class->ops); | ||
430 | printk(" {\n"); | ||
431 | |||
432 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { | ||
433 | if (class->usage_mask & (1 << bit)) { | ||
434 | int len = depth; | ||
435 | |||
436 | print_spaces(depth); | ||
437 | len += printk(" %s", usage_str[bit]); | ||
438 | len += printk(" at:\n"); | ||
439 | print_stack_trace(class->usage_traces + bit, len); | ||
440 | } | ||
441 | } | ||
442 | print_spaces(depth); | ||
443 | printk(" }\n"); | ||
444 | |||
445 | print_spaces(depth); | ||
446 | printk(" ... key at: "); | ||
447 | print_ip_sym((unsigned long)class->key); | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * printk all lock dependencies starting at <entry>: | ||
452 | */ | ||
453 | static void print_lock_dependencies(struct lock_class *class, int depth) | ||
454 | { | ||
455 | struct lock_list *entry; | ||
456 | |||
457 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | ||
458 | return; | ||
459 | |||
460 | print_lock_class_header(class, depth); | ||
461 | |||
462 | list_for_each_entry(entry, &class->locks_after, entry) { | ||
463 | DEBUG_LOCKS_WARN_ON(!entry->class); | ||
464 | print_lock_dependencies(entry->class, depth + 1); | ||
465 | |||
466 | print_spaces(depth); | ||
467 | printk(" ... acquired at:\n"); | ||
468 | print_stack_trace(&entry->trace, 2); | ||
469 | printk("\n"); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * Add a new dependency to the head of the list: | ||
475 | */ | ||
476 | static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | ||
477 | struct list_head *head, unsigned long ip) | ||
478 | { | ||
479 | struct lock_list *entry; | ||
480 | /* | ||
481 | * Lock not present yet - get a new dependency struct and | ||
482 | * add it to the list: | ||
483 | */ | ||
484 | entry = alloc_list_entry(); | ||
485 | if (!entry) | ||
486 | return 0; | ||
487 | |||
488 | entry->class = this; | ||
489 | save_trace(&entry->trace); | ||
490 | |||
491 | /* | ||
492 | * Since we never remove from the dependency list, the list can | ||
493 | * be walked lockless by other CPUs, it's only allocation | ||
494 | * that must be protected by the spinlock. But this also means | ||
495 | * we must make new entries visible only once writes to the | ||
496 | * entry become visible - hence the RCU op: | ||
497 | */ | ||
498 | list_add_tail_rcu(&entry->entry, head); | ||
499 | |||
500 | return 1; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Recursive, forwards-direction lock-dependency checking, used for | ||
505 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe | ||
506 | * checking. | ||
507 | * | ||
508 | * (to keep the stackframe of the recursive functions small we | ||
509 | * use these global variables, and we also mark various helper | ||
510 | * functions as noinline.) | ||
511 | */ | ||
512 | static struct held_lock *check_source, *check_target; | ||
513 | |||
514 | /* | ||
515 | * Print a dependency chain entry (this is only done when a deadlock | ||
516 | * has been detected): | ||
517 | */ | ||
518 | static noinline int | ||
519 | print_circular_bug_entry(struct lock_list *target, unsigned int depth) | ||
520 | { | ||
521 | if (debug_locks_silent) | ||
522 | return 0; | ||
523 | printk("\n-> #%u", depth); | ||
524 | print_lock_name(target->class); | ||
525 | printk(":\n"); | ||
526 | print_stack_trace(&target->trace, 6); | ||
527 | |||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * When a circular dependency is detected, print the | ||
533 | * header first: | ||
534 | */ | ||
535 | static noinline int | ||
536 | print_circular_bug_header(struct lock_list *entry, unsigned int depth) | ||
537 | { | ||
538 | struct task_struct *curr = current; | ||
539 | |||
540 | __raw_spin_unlock(&hash_lock); | ||
541 | debug_locks_off(); | ||
542 | if (debug_locks_silent) | ||
543 | return 0; | ||
544 | |||
545 | printk("\n=======================================================\n"); | ||
546 | printk( "[ INFO: possible circular locking dependency detected ]\n"); | ||
547 | printk( "-------------------------------------------------------\n"); | ||
548 | printk("%s/%d is trying to acquire lock:\n", | ||
549 | curr->comm, curr->pid); | ||
550 | print_lock(check_source); | ||
551 | printk("\nbut task is already holding lock:\n"); | ||
552 | print_lock(check_target); | ||
553 | printk("\nwhich lock already depends on the new lock.\n\n"); | ||
554 | printk("\nthe existing dependency chain (in reverse order) is:\n"); | ||
555 | |||
556 | print_circular_bug_entry(entry, depth); | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static noinline int print_circular_bug_tail(void) | ||
562 | { | ||
563 | struct task_struct *curr = current; | ||
564 | struct lock_list this; | ||
565 | |||
566 | if (debug_locks_silent) | ||
567 | return 0; | ||
568 | |||
569 | this.class = check_source->class; | ||
570 | save_trace(&this.trace); | ||
571 | print_circular_bug_entry(&this, 0); | ||
572 | |||
573 | printk("\nother info that might help us debug this:\n\n"); | ||
574 | lockdep_print_held_locks(curr); | ||
575 | |||
576 | printk("\nstack backtrace:\n"); | ||
577 | dump_stack(); | ||
578 | |||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static int noinline print_infinite_recursion_bug(void) | ||
583 | { | ||
584 | __raw_spin_unlock(&hash_lock); | ||
585 | DEBUG_LOCKS_WARN_ON(1); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Prove that the dependency graph starting at <entry> can not | ||
592 | * lead to <target>. Print an error and return 0 if it does. | ||
593 | */ | ||
594 | static noinline int | ||
595 | check_noncircular(struct lock_class *source, unsigned int depth) | ||
596 | { | ||
597 | struct lock_list *entry; | ||
598 | |||
599 | debug_atomic_inc(&nr_cyclic_check_recursions); | ||
600 | if (depth > max_recursion_depth) | ||
601 | max_recursion_depth = depth; | ||
602 | if (depth >= 20) | ||
603 | return print_infinite_recursion_bug(); | ||
604 | /* | ||
605 | * Check this lock's dependency list: | ||
606 | */ | ||
607 | list_for_each_entry(entry, &source->locks_after, entry) { | ||
608 | if (entry->class == check_target->class) | ||
609 | return print_circular_bug_header(entry, depth+1); | ||
610 | debug_atomic_inc(&nr_cyclic_checks); | ||
611 | if (!check_noncircular(entry->class, depth+1)) | ||
612 | return print_circular_bug_entry(entry, depth+1); | ||
613 | } | ||
614 | return 1; | ||
615 | } | ||
616 | |||
617 | static int very_verbose(struct lock_class *class) | ||
618 | { | ||
619 | #if VERY_VERBOSE | ||
620 | return class_filter(class); | ||
621 | #endif | ||
622 | return 0; | ||
623 | } | ||
624 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
625 | |||
626 | /* | ||
627 | * Forwards and backwards subgraph searching, for the purposes of | ||
628 | * proving that two subgraphs can be connected by a new dependency | ||
629 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. | ||
630 | */ | ||
631 | static enum lock_usage_bit find_usage_bit; | ||
632 | static struct lock_class *forwards_match, *backwards_match; | ||
633 | |||
634 | /* | ||
635 | * Find a node in the forwards-direction dependency sub-graph starting | ||
636 | * at <source> that matches <find_usage_bit>. | ||
637 | * | ||
638 | * Return 2 if such a node exists in the subgraph, and put that node | ||
639 | * into <forwards_match>. | ||
640 | * | ||
641 | * Return 1 otherwise and keep <forwards_match> unchanged. | ||
642 | * Return 0 on error. | ||
643 | */ | ||
644 | static noinline int | ||
645 | find_usage_forwards(struct lock_class *source, unsigned int depth) | ||
646 | { | ||
647 | struct lock_list *entry; | ||
648 | int ret; | ||
649 | |||
650 | if (depth > max_recursion_depth) | ||
651 | max_recursion_depth = depth; | ||
652 | if (depth >= 20) | ||
653 | return print_infinite_recursion_bug(); | ||
654 | |||
655 | debug_atomic_inc(&nr_find_usage_forwards_checks); | ||
656 | if (source->usage_mask & (1 << find_usage_bit)) { | ||
657 | forwards_match = source; | ||
658 | return 2; | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Check this lock's dependency list: | ||
663 | */ | ||
664 | list_for_each_entry(entry, &source->locks_after, entry) { | ||
665 | debug_atomic_inc(&nr_find_usage_forwards_recursions); | ||
666 | ret = find_usage_forwards(entry->class, depth+1); | ||
667 | if (ret == 2 || ret == 0) | ||
668 | return ret; | ||
669 | } | ||
670 | return 1; | ||
671 | } | ||
672 | |||
673 | /* | ||
674 | * Find a node in the backwards-direction dependency sub-graph starting | ||
675 | * at <source> that matches <find_usage_bit>. | ||
676 | * | ||
677 | * Return 2 if such a node exists in the subgraph, and put that node | ||
678 | * into <backwards_match>. | ||
679 | * | ||
680 | * Return 1 otherwise and keep <backwards_match> unchanged. | ||
681 | * Return 0 on error. | ||
682 | */ | ||
683 | static noinline int | ||
684 | find_usage_backwards(struct lock_class *source, unsigned int depth) | ||
685 | { | ||
686 | struct lock_list *entry; | ||
687 | int ret; | ||
688 | |||
689 | if (depth > max_recursion_depth) | ||
690 | max_recursion_depth = depth; | ||
691 | if (depth >= 20) | ||
692 | return print_infinite_recursion_bug(); | ||
693 | |||
694 | debug_atomic_inc(&nr_find_usage_backwards_checks); | ||
695 | if (source->usage_mask & (1 << find_usage_bit)) { | ||
696 | backwards_match = source; | ||
697 | return 2; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Check this lock's dependency list: | ||
702 | */ | ||
703 | list_for_each_entry(entry, &source->locks_before, entry) { | ||
704 | debug_atomic_inc(&nr_find_usage_backwards_recursions); | ||
705 | ret = find_usage_backwards(entry->class, depth+1); | ||
706 | if (ret == 2 || ret == 0) | ||
707 | return ret; | ||
708 | } | ||
709 | return 1; | ||
710 | } | ||
711 | |||
712 | static int | ||
713 | print_bad_irq_dependency(struct task_struct *curr, | ||
714 | struct held_lock *prev, | ||
715 | struct held_lock *next, | ||
716 | enum lock_usage_bit bit1, | ||
717 | enum lock_usage_bit bit2, | ||
718 | const char *irqclass) | ||
719 | { | ||
720 | __raw_spin_unlock(&hash_lock); | ||
721 | debug_locks_off(); | ||
722 | if (debug_locks_silent) | ||
723 | return 0; | ||
724 | |||
725 | printk("\n======================================================\n"); | ||
726 | printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", | ||
727 | irqclass, irqclass); | ||
728 | printk( "------------------------------------------------------\n"); | ||
729 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", | ||
730 | curr->comm, curr->pid, | ||
731 | curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, | ||
732 | curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, | ||
733 | curr->hardirqs_enabled, | ||
734 | curr->softirqs_enabled); | ||
735 | print_lock(next); | ||
736 | |||
737 | printk("\nand this task is already holding:\n"); | ||
738 | print_lock(prev); | ||
739 | printk("which would create a new lock dependency:\n"); | ||
740 | print_lock_name(prev->class); | ||
741 | printk(" ->"); | ||
742 | print_lock_name(next->class); | ||
743 | printk("\n"); | ||
744 | |||
745 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | ||
746 | irqclass); | ||
747 | print_lock_name(backwards_match); | ||
748 | printk("\n... which became %s-irq-safe at:\n", irqclass); | ||
749 | |||
750 | print_stack_trace(backwards_match->usage_traces + bit1, 1); | ||
751 | |||
752 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); | ||
753 | print_lock_name(forwards_match); | ||
754 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); | ||
755 | printk("..."); | ||
756 | |||
757 | print_stack_trace(forwards_match->usage_traces + bit2, 1); | ||
758 | |||
759 | printk("\nother info that might help us debug this:\n\n"); | ||
760 | lockdep_print_held_locks(curr); | ||
761 | |||
762 | printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); | ||
763 | print_lock_dependencies(backwards_match, 0); | ||
764 | |||
765 | printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); | ||
766 | print_lock_dependencies(forwards_match, 0); | ||
767 | |||
768 | printk("\nstack backtrace:\n"); | ||
769 | dump_stack(); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static int | ||
775 | check_usage(struct task_struct *curr, struct held_lock *prev, | ||
776 | struct held_lock *next, enum lock_usage_bit bit_backwards, | ||
777 | enum lock_usage_bit bit_forwards, const char *irqclass) | ||
778 | { | ||
779 | int ret; | ||
780 | |||
781 | find_usage_bit = bit_backwards; | ||
782 | /* fills in <backwards_match> */ | ||
783 | ret = find_usage_backwards(prev->class, 0); | ||
784 | if (!ret || ret == 1) | ||
785 | return ret; | ||
786 | |||
787 | find_usage_bit = bit_forwards; | ||
788 | ret = find_usage_forwards(next->class, 0); | ||
789 | if (!ret || ret == 1) | ||
790 | return ret; | ||
791 | /* ret == 2 */ | ||
792 | return print_bad_irq_dependency(curr, prev, next, | ||
793 | bit_backwards, bit_forwards, irqclass); | ||
794 | } | ||
795 | |||
796 | #endif | ||
797 | |||
798 | static int | ||
799 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | ||
800 | struct held_lock *next) | ||
801 | { | ||
802 | debug_locks_off(); | ||
803 | __raw_spin_unlock(&hash_lock); | ||
804 | if (debug_locks_silent) | ||
805 | return 0; | ||
806 | |||
807 | printk("\n=============================================\n"); | ||
808 | printk( "[ INFO: possible recursive locking detected ]\n"); | ||
809 | printk( "---------------------------------------------\n"); | ||
810 | printk("%s/%d is trying to acquire lock:\n", | ||
811 | curr->comm, curr->pid); | ||
812 | print_lock(next); | ||
813 | printk("\nbut task is already holding lock:\n"); | ||
814 | print_lock(prev); | ||
815 | |||
816 | printk("\nother info that might help us debug this:\n"); | ||
817 | lockdep_print_held_locks(curr); | ||
818 | |||
819 | printk("\nstack backtrace:\n"); | ||
820 | dump_stack(); | ||
821 | |||
822 | return 0; | ||
823 | } | ||
824 | |||
825 | /* | ||
826 | * Check whether we are holding such a class already. | ||
827 | * | ||
828 | * (Note that this has to be done separately, because the graph cannot | ||
829 | * detect such classes of deadlocks.) | ||
830 | * | ||
831 | * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read | ||
832 | */ | ||
833 | static int | ||
834 | check_deadlock(struct task_struct *curr, struct held_lock *next, | ||
835 | struct lockdep_map *next_instance, int read) | ||
836 | { | ||
837 | struct held_lock *prev; | ||
838 | int i; | ||
839 | |||
840 | for (i = 0; i < curr->lockdep_depth; i++) { | ||
841 | prev = curr->held_locks + i; | ||
842 | if (prev->class != next->class) | ||
843 | continue; | ||
844 | /* | ||
845 | * Allow read-after-read recursion of the same | ||
846 | * lock class (i.e. read_lock(lock)+read_lock(lock)): | ||
847 | */ | ||
848 | if ((read == 2) && prev->read) | ||
849 | return 2; | ||
850 | return print_deadlock_bug(curr, prev, next); | ||
851 | } | ||
852 | return 1; | ||
853 | } | ||
854 | |||
855 | /* | ||
856 | * There was a chain-cache miss, and we are about to add a new dependency | ||
857 | * to a previous lock. We recursively validate the following rules: | ||
858 | * | ||
859 | * - would the adding of the <prev> -> <next> dependency create a | ||
860 | * circular dependency in the graph? [== circular deadlock] | ||
861 | * | ||
862 | * - does the new prev->next dependency connect any hardirq-safe lock | ||
863 | * (in the full backwards-subgraph starting at <prev>) with any | ||
864 | * hardirq-unsafe lock (in the full forwards-subgraph starting at | ||
865 | * <next>)? [== illegal lock inversion with hardirq contexts] | ||
866 | * | ||
867 | * - does the new prev->next dependency connect any softirq-safe lock | ||
868 | * (in the full backwards-subgraph starting at <prev>) with any | ||
869 | * softirq-unsafe lock (in the full forwards-subgraph starting at | ||
870 | * <next>)? [== illegal lock inversion with softirq contexts] | ||
871 | * | ||
872 | * any of these scenarios could lead to a deadlock. | ||
873 | * | ||
874 | * Then if all the validations pass, we add the forwards and backwards | ||
875 | * dependency. | ||
876 | */ | ||
877 | static int | ||
878 | check_prev_add(struct task_struct *curr, struct held_lock *prev, | ||
879 | struct held_lock *next) | ||
880 | { | ||
881 | struct lock_list *entry; | ||
882 | int ret; | ||
883 | |||
884 | /* | ||
885 | * Prove that the new <prev> -> <next> dependency would not | ||
886 | * create a circular dependency in the graph. (We do this by | ||
887 | * forward-recursing into the graph starting at <next>, and | ||
888 | * checking whether we can reach <prev>.) | ||
889 | * | ||
890 | * We are using global variables to control the recursion, to | ||
891 | * keep the stackframe size of the recursive functions low: | ||
892 | */ | ||
893 | check_source = next; | ||
894 | check_target = prev; | ||
895 | if (!(check_noncircular(next->class, 0))) | ||
896 | return print_circular_bug_tail(); | ||
897 | |||
898 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
899 | /* | ||
900 | * Prove that the new dependency does not connect a hardirq-safe | ||
901 | * lock with a hardirq-unsafe lock - to achieve this we search | ||
902 | * the backwards-subgraph starting at <prev>, and the | ||
903 | * forwards-subgraph starting at <next>: | ||
904 | */ | ||
905 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, | ||
906 | LOCK_ENABLED_HARDIRQS, "hard")) | ||
907 | return 0; | ||
908 | |||
909 | /* | ||
910 | * Prove that the new dependency does not connect a hardirq-safe-read | ||
911 | * lock with a hardirq-unsafe lock - to achieve this we search | ||
912 | * the backwards-subgraph starting at <prev>, and the | ||
913 | * forwards-subgraph starting at <next>: | ||
914 | */ | ||
915 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, | ||
916 | LOCK_ENABLED_HARDIRQS, "hard-read")) | ||
917 | return 0; | ||
918 | |||
919 | /* | ||
920 | * Prove that the new dependency does not connect a softirq-safe | ||
921 | * lock with a softirq-unsafe lock - to achieve this we search | ||
922 | * the backwards-subgraph starting at <prev>, and the | ||
923 | * forwards-subgraph starting at <next>: | ||
924 | */ | ||
925 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, | ||
926 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
927 | return 0; | ||
928 | /* | ||
929 | * Prove that the new dependency does not connect a softirq-safe-read | ||
930 | * lock with a softirq-unsafe lock - to achieve this we search | ||
931 | * the backwards-subgraph starting at <prev>, and the | ||
932 | * forwards-subgraph starting at <next>: | ||
933 | */ | ||
934 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, | ||
935 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
936 | return 0; | ||
937 | #endif | ||
938 | /* | ||
939 | * For recursive read-locks we do all the dependency checks, | ||
940 | * but we dont store read-triggered dependencies (only | ||
941 | * write-triggered dependencies). This ensures that only the | ||
942 | * write-side dependencies matter, and that if for example a | ||
943 | * write-lock never takes any other locks, then the reads are | ||
944 | * equivalent to a NOP. | ||
945 | */ | ||
946 | if (next->read == 2 || prev->read == 2) | ||
947 | return 1; | ||
948 | /* | ||
949 | * Is the <prev> -> <next> dependency already present? | ||
950 | * | ||
951 | * (this may occur even though this is a new chain: consider | ||
952 | * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 | ||
953 | * chains - the second one will be new, but L1 already has | ||
954 | * L2 added to its dependency list, due to the first chain.) | ||
955 | */ | ||
956 | list_for_each_entry(entry, &prev->class->locks_after, entry) { | ||
957 | if (entry->class == next->class) | ||
958 | return 2; | ||
959 | } | ||
960 | |||
961 | /* | ||
962 | * Ok, all validations passed, add the new lock | ||
963 | * to the previous lock's dependency list: | ||
964 | */ | ||
965 | ret = add_lock_to_list(prev->class, next->class, | ||
966 | &prev->class->locks_after, next->acquire_ip); | ||
967 | if (!ret) | ||
968 | return 0; | ||
969 | /* | ||
970 | * Return value of 2 signals 'dependency already added', | ||
971 | * in that case we dont have to add the backlink either. | ||
972 | */ | ||
973 | if (ret == 2) | ||
974 | return 2; | ||
975 | ret = add_lock_to_list(next->class, prev->class, | ||
976 | &next->class->locks_before, next->acquire_ip); | ||
977 | |||
978 | /* | ||
979 | * Debugging printouts: | ||
980 | */ | ||
981 | if (verbose(prev->class) || verbose(next->class)) { | ||
982 | __raw_spin_unlock(&hash_lock); | ||
983 | printk("\n new dependency: "); | ||
984 | print_lock_name(prev->class); | ||
985 | printk(" => "); | ||
986 | print_lock_name(next->class); | ||
987 | printk("\n"); | ||
988 | dump_stack(); | ||
989 | __raw_spin_lock(&hash_lock); | ||
990 | } | ||
991 | return 1; | ||
992 | } | ||
993 | |||
994 | /* | ||
995 | * Add the dependency to all directly-previous locks that are 'relevant'. | ||
996 | * The ones that are relevant are (in increasing distance from curr): | ||
997 | * all consecutive trylock entries and the final non-trylock entry - or | ||
998 | * the end of this context's lock-chain - whichever comes first. | ||
999 | */ | ||
1000 | static int | ||
1001 | check_prevs_add(struct task_struct *curr, struct held_lock *next) | ||
1002 | { | ||
1003 | int depth = curr->lockdep_depth; | ||
1004 | struct held_lock *hlock; | ||
1005 | |||
1006 | /* | ||
1007 | * Debugging checks. | ||
1008 | * | ||
1009 | * Depth must not be zero for a non-head lock: | ||
1010 | */ | ||
1011 | if (!depth) | ||
1012 | goto out_bug; | ||
1013 | /* | ||
1014 | * At least two relevant locks must exist for this | ||
1015 | * to be a head: | ||
1016 | */ | ||
1017 | if (curr->held_locks[depth].irq_context != | ||
1018 | curr->held_locks[depth-1].irq_context) | ||
1019 | goto out_bug; | ||
1020 | |||
1021 | for (;;) { | ||
1022 | hlock = curr->held_locks + depth-1; | ||
1023 | /* | ||
1024 | * Only non-recursive-read entries get new dependencies | ||
1025 | * added: | ||
1026 | */ | ||
1027 | if (hlock->read != 2) { | ||
1028 | check_prev_add(curr, hlock, next); | ||
1029 | /* | ||
1030 | * Stop after the first non-trylock entry, | ||
1031 | * as non-trylock entries have added their | ||
1032 | * own direct dependencies already, so this | ||
1033 | * lock is connected to them indirectly: | ||
1034 | */ | ||
1035 | if (!hlock->trylock) | ||
1036 | break; | ||
1037 | } | ||
1038 | depth--; | ||
1039 | /* | ||
1040 | * End of lock-stack? | ||
1041 | */ | ||
1042 | if (!depth) | ||
1043 | break; | ||
1044 | /* | ||
1045 | * Stop the search if we cross into another context: | ||
1046 | */ | ||
1047 | if (curr->held_locks[depth].irq_context != | ||
1048 | curr->held_locks[depth-1].irq_context) | ||
1049 | break; | ||
1050 | } | ||
1051 | return 1; | ||
1052 | out_bug: | ||
1053 | __raw_spin_unlock(&hash_lock); | ||
1054 | DEBUG_LOCKS_WARN_ON(1); | ||
1055 | |||
1056 | return 0; | ||
1057 | } | ||
1058 | |||
1059 | |||
1060 | /* | ||
1061 | * Is this the address of a static object: | ||
1062 | */ | ||
1063 | static int static_obj(void *obj) | ||
1064 | { | ||
1065 | unsigned long start = (unsigned long) &_stext, | ||
1066 | end = (unsigned long) &_end, | ||
1067 | addr = (unsigned long) obj; | ||
1068 | #ifdef CONFIG_SMP | ||
1069 | int i; | ||
1070 | #endif | ||
1071 | |||
1072 | /* | ||
1073 | * static variable? | ||
1074 | */ | ||
1075 | if ((addr >= start) && (addr < end)) | ||
1076 | return 1; | ||
1077 | |||
1078 | #ifdef CONFIG_SMP | ||
1079 | /* | ||
1080 | * percpu var? | ||
1081 | */ | ||
1082 | for_each_possible_cpu(i) { | ||
1083 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | ||
1084 | end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); | ||
1085 | |||
1086 | if ((addr >= start) && (addr < end)) | ||
1087 | return 1; | ||
1088 | } | ||
1089 | #endif | ||
1090 | |||
1091 | /* | ||
1092 | * module var? | ||
1093 | */ | ||
1094 | return is_module_address(addr); | ||
1095 | } | ||
1096 | |||
1097 | /* | ||
1098 | * To make lock name printouts unique, we calculate a unique | ||
1099 | * class->name_version generation counter: | ||
1100 | */ | ||
1101 | static int count_matching_names(struct lock_class *new_class) | ||
1102 | { | ||
1103 | struct lock_class *class; | ||
1104 | int count = 0; | ||
1105 | |||
1106 | if (!new_class->name) | ||
1107 | return 0; | ||
1108 | |||
1109 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | ||
1110 | if (new_class->key - new_class->subclass == class->key) | ||
1111 | return class->name_version; | ||
1112 | if (class->name && !strcmp(class->name, new_class->name)) | ||
1113 | count = max(count, class->name_version); | ||
1114 | } | ||
1115 | |||
1116 | return count + 1; | ||
1117 | } | ||
1118 | |||
1119 | extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); | ||
1120 | |||
1121 | /* | ||
1122 | * Register a lock's class in the hash-table, if the class is not present | ||
1123 | * yet. Otherwise we look it up. We cache the result in the lock object | ||
1124 | * itself, so actual lookup of the hash should be once per lock object. | ||
1125 | */ | ||
1126 | static inline struct lock_class * | ||
1127 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | ||
1128 | { | ||
1129 | struct lockdep_subclass_key *key; | ||
1130 | struct list_head *hash_head; | ||
1131 | struct lock_class *class; | ||
1132 | |||
1133 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
1134 | /* | ||
1135 | * If the architecture calls into lockdep before initializing | ||
1136 | * the hashes then we'll warn about it later. (we cannot printk | ||
1137 | * right now) | ||
1138 | */ | ||
1139 | if (unlikely(!lockdep_initialized)) { | ||
1140 | lockdep_init(); | ||
1141 | lockdep_init_error = 1; | ||
1142 | } | ||
1143 | #endif | ||
1144 | |||
1145 | /* | ||
1146 | * Static locks do not have their class-keys yet - for them the key | ||
1147 | * is the lock object itself: | ||
1148 | */ | ||
1149 | if (unlikely(!lock->key)) | ||
1150 | lock->key = (void *)lock; | ||
1151 | |||
1152 | /* | ||
1153 | * NOTE: the class-key must be unique. For dynamic locks, a static | ||
1154 | * lock_class_key variable is passed in through the mutex_init() | ||
1155 | * (or spin_lock_init()) call - which acts as the key. For static | ||
1156 | * locks we use the lock object itself as the key. | ||
1157 | */ | ||
1158 | if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) | ||
1159 | __error_too_big_MAX_LOCKDEP_SUBCLASSES(); | ||
1160 | |||
1161 | key = lock->key->subkeys + subclass; | ||
1162 | |||
1163 | hash_head = classhashentry(key); | ||
1164 | |||
1165 | /* | ||
1166 | * We can walk the hash lockfree, because the hash only | ||
1167 | * grows, and we are careful when adding entries to the end: | ||
1168 | */ | ||
1169 | list_for_each_entry(class, hash_head, hash_entry) | ||
1170 | if (class->key == key) | ||
1171 | goto out_set; | ||
1172 | |||
1173 | /* | ||
1174 | * Debug-check: all keys must be persistent! | ||
1175 | */ | ||
1176 | if (!static_obj(lock->key)) { | ||
1177 | debug_locks_off(); | ||
1178 | printk("INFO: trying to register non-static key.\n"); | ||
1179 | printk("the code is fine but needs lockdep annotation.\n"); | ||
1180 | printk("turning off the locking correctness validator.\n"); | ||
1181 | dump_stack(); | ||
1182 | |||
1183 | return NULL; | ||
1184 | } | ||
1185 | |||
1186 | __raw_spin_lock(&hash_lock); | ||
1187 | /* | ||
1188 | * We have to do the hash-walk again, to avoid races | ||
1189 | * with another CPU: | ||
1190 | */ | ||
1191 | list_for_each_entry(class, hash_head, hash_entry) | ||
1192 | if (class->key == key) | ||
1193 | goto out_unlock_set; | ||
1194 | /* | ||
1195 | * Allocate a new key from the static array, and add it to | ||
1196 | * the hash: | ||
1197 | */ | ||
1198 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | ||
1199 | __raw_spin_unlock(&hash_lock); | ||
1200 | debug_locks_off(); | ||
1201 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | ||
1202 | printk("turning off the locking correctness validator.\n"); | ||
1203 | return NULL; | ||
1204 | } | ||
1205 | class = lock_classes + nr_lock_classes++; | ||
1206 | debug_atomic_inc(&nr_unused_locks); | ||
1207 | class->key = key; | ||
1208 | class->name = lock->name; | ||
1209 | class->subclass = subclass; | ||
1210 | INIT_LIST_HEAD(&class->lock_entry); | ||
1211 | INIT_LIST_HEAD(&class->locks_before); | ||
1212 | INIT_LIST_HEAD(&class->locks_after); | ||
1213 | class->name_version = count_matching_names(class); | ||
1214 | /* | ||
1215 | * We use RCU's safe list-add method to make | ||
1216 | * parallel walking of the hash-list safe: | ||
1217 | */ | ||
1218 | list_add_tail_rcu(&class->hash_entry, hash_head); | ||
1219 | |||
1220 | if (verbose(class)) { | ||
1221 | __raw_spin_unlock(&hash_lock); | ||
1222 | printk("\nnew class %p: %s", class->key, class->name); | ||
1223 | if (class->name_version > 1) | ||
1224 | printk("#%d", class->name_version); | ||
1225 | printk("\n"); | ||
1226 | dump_stack(); | ||
1227 | __raw_spin_lock(&hash_lock); | ||
1228 | } | ||
1229 | out_unlock_set: | ||
1230 | __raw_spin_unlock(&hash_lock); | ||
1231 | |||
1232 | out_set: | ||
1233 | lock->class[subclass] = class; | ||
1234 | |||
1235 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | ||
1236 | |||
1237 | return class; | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * Look up a dependency chain. If the key is not present yet then | ||
1242 | * add it and return 0 - in this case the new dependency chain is | ||
1243 | * validated. If the key is already hashed, return 1. | ||
1244 | */ | ||
1245 | static inline int lookup_chain_cache(u64 chain_key) | ||
1246 | { | ||
1247 | struct list_head *hash_head = chainhashentry(chain_key); | ||
1248 | struct lock_chain *chain; | ||
1249 | |||
1250 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | ||
1251 | /* | ||
1252 | * We can walk it lock-free, because entries only get added | ||
1253 | * to the hash: | ||
1254 | */ | ||
1255 | list_for_each_entry(chain, hash_head, entry) { | ||
1256 | if (chain->chain_key == chain_key) { | ||
1257 | cache_hit: | ||
1258 | debug_atomic_inc(&chain_lookup_hits); | ||
1259 | /* | ||
1260 | * In the debugging case, force redundant checking | ||
1261 | * by returning 1: | ||
1262 | */ | ||
1263 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
1264 | __raw_spin_lock(&hash_lock); | ||
1265 | return 1; | ||
1266 | #endif | ||
1267 | return 0; | ||
1268 | } | ||
1269 | } | ||
1270 | /* | ||
1271 | * Allocate a new chain entry from the static array, and add | ||
1272 | * it to the hash: | ||
1273 | */ | ||
1274 | __raw_spin_lock(&hash_lock); | ||
1275 | /* | ||
1276 | * We have to walk the chain again locked - to avoid duplicates: | ||
1277 | */ | ||
1278 | list_for_each_entry(chain, hash_head, entry) { | ||
1279 | if (chain->chain_key == chain_key) { | ||
1280 | __raw_spin_unlock(&hash_lock); | ||
1281 | goto cache_hit; | ||
1282 | } | ||
1283 | } | ||
1284 | if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { | ||
1285 | __raw_spin_unlock(&hash_lock); | ||
1286 | debug_locks_off(); | ||
1287 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); | ||
1288 | printk("turning off the locking correctness validator.\n"); | ||
1289 | return 0; | ||
1290 | } | ||
1291 | chain = lock_chains + nr_lock_chains++; | ||
1292 | chain->chain_key = chain_key; | ||
1293 | list_add_tail_rcu(&chain->entry, hash_head); | ||
1294 | debug_atomic_inc(&chain_lookup_misses); | ||
1295 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
1296 | if (current->hardirq_context) | ||
1297 | nr_hardirq_chains++; | ||
1298 | else { | ||
1299 | if (current->softirq_context) | ||
1300 | nr_softirq_chains++; | ||
1301 | else | ||
1302 | nr_process_chains++; | ||
1303 | } | ||
1304 | #else | ||
1305 | nr_process_chains++; | ||
1306 | #endif | ||
1307 | |||
1308 | return 1; | ||
1309 | } | ||
1310 | |||
1311 | /* | ||
1312 | * We are building curr_chain_key incrementally, so double-check | ||
1313 | * it from scratch, to make sure that it's done correctly: | ||
1314 | */ | ||
1315 | static void check_chain_key(struct task_struct *curr) | ||
1316 | { | ||
1317 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
1318 | struct held_lock *hlock, *prev_hlock = NULL; | ||
1319 | unsigned int i, id; | ||
1320 | u64 chain_key = 0; | ||
1321 | |||
1322 | for (i = 0; i < curr->lockdep_depth; i++) { | ||
1323 | hlock = curr->held_locks + i; | ||
1324 | if (chain_key != hlock->prev_chain_key) { | ||
1325 | debug_locks_off(); | ||
1326 | printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | ||
1327 | curr->lockdep_depth, i, | ||
1328 | (unsigned long long)chain_key, | ||
1329 | (unsigned long long)hlock->prev_chain_key); | ||
1330 | WARN_ON(1); | ||
1331 | return; | ||
1332 | } | ||
1333 | id = hlock->class - lock_classes; | ||
1334 | DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS); | ||
1335 | if (prev_hlock && (prev_hlock->irq_context != | ||
1336 | hlock->irq_context)) | ||
1337 | chain_key = 0; | ||
1338 | chain_key = iterate_chain_key(chain_key, id); | ||
1339 | prev_hlock = hlock; | ||
1340 | } | ||
1341 | if (chain_key != curr->curr_chain_key) { | ||
1342 | debug_locks_off(); | ||
1343 | printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | ||
1344 | curr->lockdep_depth, i, | ||
1345 | (unsigned long long)chain_key, | ||
1346 | (unsigned long long)curr->curr_chain_key); | ||
1347 | WARN_ON(1); | ||
1348 | } | ||
1349 | #endif | ||
1350 | } | ||
1351 | |||
1352 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
1353 | |||
1354 | /* | ||
1355 | * print irq inversion bug: | ||
1356 | */ | ||
1357 | static int | ||
1358 | print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | ||
1359 | struct held_lock *this, int forwards, | ||
1360 | const char *irqclass) | ||
1361 | { | ||
1362 | __raw_spin_unlock(&hash_lock); | ||
1363 | debug_locks_off(); | ||
1364 | if (debug_locks_silent) | ||
1365 | return 0; | ||
1366 | |||
1367 | printk("\n=========================================================\n"); | ||
1368 | printk( "[ INFO: possible irq lock inversion dependency detected ]\n"); | ||
1369 | printk( "---------------------------------------------------------\n"); | ||
1370 | printk("%s/%d just changed the state of lock:\n", | ||
1371 | curr->comm, curr->pid); | ||
1372 | print_lock(this); | ||
1373 | if (forwards) | ||
1374 | printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | ||
1375 | else | ||
1376 | printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | ||
1377 | print_lock_name(other); | ||
1378 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | ||
1379 | |||
1380 | printk("\nother info that might help us debug this:\n"); | ||
1381 | lockdep_print_held_locks(curr); | ||
1382 | |||
1383 | printk("\nthe first lock's dependencies:\n"); | ||
1384 | print_lock_dependencies(this->class, 0); | ||
1385 | |||
1386 | printk("\nthe second lock's dependencies:\n"); | ||
1387 | print_lock_dependencies(other, 0); | ||
1388 | |||
1389 | printk("\nstack backtrace:\n"); | ||
1390 | dump_stack(); | ||
1391 | |||
1392 | return 0; | ||
1393 | } | ||
1394 | |||
1395 | /* | ||
1396 | * Prove that in the forwards-direction subgraph starting at <this> | ||
1397 | * there is no lock matching <mask>: | ||
1398 | */ | ||
1399 | static int | ||
1400 | check_usage_forwards(struct task_struct *curr, struct held_lock *this, | ||
1401 | enum lock_usage_bit bit, const char *irqclass) | ||
1402 | { | ||
1403 | int ret; | ||
1404 | |||
1405 | find_usage_bit = bit; | ||
1406 | /* fills in <forwards_match> */ | ||
1407 | ret = find_usage_forwards(this->class, 0); | ||
1408 | if (!ret || ret == 1) | ||
1409 | return ret; | ||
1410 | |||
1411 | return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); | ||
1412 | } | ||
1413 | |||
1414 | /* | ||
1415 | * Prove that in the backwards-direction subgraph starting at <this> | ||
1416 | * there is no lock matching <mask>: | ||
1417 | */ | ||
1418 | static int | ||
1419 | check_usage_backwards(struct task_struct *curr, struct held_lock *this, | ||
1420 | enum lock_usage_bit bit, const char *irqclass) | ||
1421 | { | ||
1422 | int ret; | ||
1423 | |||
1424 | find_usage_bit = bit; | ||
1425 | /* fills in <backwards_match> */ | ||
1426 | ret = find_usage_backwards(this->class, 0); | ||
1427 | if (!ret || ret == 1) | ||
1428 | return ret; | ||
1429 | |||
1430 | return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); | ||
1431 | } | ||
1432 | |||
1433 | static inline void print_irqtrace_events(struct task_struct *curr) | ||
1434 | { | ||
1435 | printk("irq event stamp: %u\n", curr->irq_events); | ||
1436 | printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); | ||
1437 | print_ip_sym(curr->hardirq_enable_ip); | ||
1438 | printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); | ||
1439 | print_ip_sym(curr->hardirq_disable_ip); | ||
1440 | printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); | ||
1441 | print_ip_sym(curr->softirq_enable_ip); | ||
1442 | printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); | ||
1443 | print_ip_sym(curr->softirq_disable_ip); | ||
1444 | } | ||
1445 | |||
1446 | #else | ||
1447 | static inline void print_irqtrace_events(struct task_struct *curr) | ||
1448 | { | ||
1449 | } | ||
1450 | #endif | ||
1451 | |||
1452 | static int | ||
1453 | print_usage_bug(struct task_struct *curr, struct held_lock *this, | ||
1454 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) | ||
1455 | { | ||
1456 | __raw_spin_unlock(&hash_lock); | ||
1457 | debug_locks_off(); | ||
1458 | if (debug_locks_silent) | ||
1459 | return 0; | ||
1460 | |||
1461 | printk("\n=================================\n"); | ||
1462 | printk( "[ INFO: inconsistent lock state ]\n"); | ||
1463 | printk( "---------------------------------\n"); | ||
1464 | |||
1465 | printk("inconsistent {%s} -> {%s} usage.\n", | ||
1466 | usage_str[prev_bit], usage_str[new_bit]); | ||
1467 | |||
1468 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", | ||
1469 | curr->comm, curr->pid, | ||
1470 | trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, | ||
1471 | trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, | ||
1472 | trace_hardirqs_enabled(curr), | ||
1473 | trace_softirqs_enabled(curr)); | ||
1474 | print_lock(this); | ||
1475 | |||
1476 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | ||
1477 | print_stack_trace(this->class->usage_traces + prev_bit, 1); | ||
1478 | |||
1479 | print_irqtrace_events(curr); | ||
1480 | printk("\nother info that might help us debug this:\n"); | ||
1481 | lockdep_print_held_locks(curr); | ||
1482 | |||
1483 | printk("\nstack backtrace:\n"); | ||
1484 | dump_stack(); | ||
1485 | |||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | /* | ||
1490 | * Print out an error if an invalid bit is set: | ||
1491 | */ | ||
1492 | static inline int | ||
1493 | valid_state(struct task_struct *curr, struct held_lock *this, | ||
1494 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | ||
1495 | { | ||
1496 | if (unlikely(this->class->usage_mask & (1 << bad_bit))) | ||
1497 | return print_usage_bug(curr, this, bad_bit, new_bit); | ||
1498 | return 1; | ||
1499 | } | ||
1500 | |||
1501 | #define STRICT_READ_CHECKS 1 | ||
1502 | |||
1503 | /* | ||
1504 | * Mark a lock with a usage bit, and validate the state transition: | ||
1505 | */ | ||
1506 | static int mark_lock(struct task_struct *curr, struct held_lock *this, | ||
1507 | enum lock_usage_bit new_bit, unsigned long ip) | ||
1508 | { | ||
1509 | unsigned int new_mask = 1 << new_bit, ret = 1; | ||
1510 | |||
1511 | /* | ||
1512 | * If already set then do not dirty the cacheline, | ||
1513 | * nor do any checks: | ||
1514 | */ | ||
1515 | if (likely(this->class->usage_mask & new_mask)) | ||
1516 | return 1; | ||
1517 | |||
1518 | __raw_spin_lock(&hash_lock); | ||
1519 | /* | ||
1520 | * Make sure we didnt race: | ||
1521 | */ | ||
1522 | if (unlikely(this->class->usage_mask & new_mask)) { | ||
1523 | __raw_spin_unlock(&hash_lock); | ||
1524 | return 1; | ||
1525 | } | ||
1526 | |||
1527 | this->class->usage_mask |= new_mask; | ||
1528 | |||
1529 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
1530 | if (new_bit == LOCK_ENABLED_HARDIRQS || | ||
1531 | new_bit == LOCK_ENABLED_HARDIRQS_READ) | ||
1532 | ip = curr->hardirq_enable_ip; | ||
1533 | else if (new_bit == LOCK_ENABLED_SOFTIRQS || | ||
1534 | new_bit == LOCK_ENABLED_SOFTIRQS_READ) | ||
1535 | ip = curr->softirq_enable_ip; | ||
1536 | #endif | ||
1537 | if (!save_trace(this->class->usage_traces + new_bit)) | ||
1538 | return 0; | ||
1539 | |||
1540 | switch (new_bit) { | ||
1541 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
1542 | case LOCK_USED_IN_HARDIRQ: | ||
1543 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | ||
1544 | return 0; | ||
1545 | if (!valid_state(curr, this, new_bit, | ||
1546 | LOCK_ENABLED_HARDIRQS_READ)) | ||
1547 | return 0; | ||
1548 | /* | ||
1549 | * just marked it hardirq-safe, check that this lock | ||
1550 | * took no hardirq-unsafe lock in the past: | ||
1551 | */ | ||
1552 | if (!check_usage_forwards(curr, this, | ||
1553 | LOCK_ENABLED_HARDIRQS, "hard")) | ||
1554 | return 0; | ||
1555 | #if STRICT_READ_CHECKS | ||
1556 | /* | ||
1557 | * just marked it hardirq-safe, check that this lock | ||
1558 | * took no hardirq-unsafe-read lock in the past: | ||
1559 | */ | ||
1560 | if (!check_usage_forwards(curr, this, | ||
1561 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | ||
1562 | return 0; | ||
1563 | #endif | ||
1564 | if (hardirq_verbose(this->class)) | ||
1565 | ret = 2; | ||
1566 | break; | ||
1567 | case LOCK_USED_IN_SOFTIRQ: | ||
1568 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | ||
1569 | return 0; | ||
1570 | if (!valid_state(curr, this, new_bit, | ||
1571 | LOCK_ENABLED_SOFTIRQS_READ)) | ||
1572 | return 0; | ||
1573 | /* | ||
1574 | * just marked it softirq-safe, check that this lock | ||
1575 | * took no softirq-unsafe lock in the past: | ||
1576 | */ | ||
1577 | if (!check_usage_forwards(curr, this, | ||
1578 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1579 | return 0; | ||
1580 | #if STRICT_READ_CHECKS | ||
1581 | /* | ||
1582 | * just marked it softirq-safe, check that this lock | ||
1583 | * took no softirq-unsafe-read lock in the past: | ||
1584 | */ | ||
1585 | if (!check_usage_forwards(curr, this, | ||
1586 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | ||
1587 | return 0; | ||
1588 | #endif | ||
1589 | if (softirq_verbose(this->class)) | ||
1590 | ret = 2; | ||
1591 | break; | ||
1592 | case LOCK_USED_IN_HARDIRQ_READ: | ||
1593 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | ||
1594 | return 0; | ||
1595 | /* | ||
1596 | * just marked it hardirq-read-safe, check that this lock | ||
1597 | * took no hardirq-unsafe lock in the past: | ||
1598 | */ | ||
1599 | if (!check_usage_forwards(curr, this, | ||
1600 | LOCK_ENABLED_HARDIRQS, "hard")) | ||
1601 | return 0; | ||
1602 | if (hardirq_verbose(this->class)) | ||
1603 | ret = 2; | ||
1604 | break; | ||
1605 | case LOCK_USED_IN_SOFTIRQ_READ: | ||
1606 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | ||
1607 | return 0; | ||
1608 | /* | ||
1609 | * just marked it softirq-read-safe, check that this lock | ||
1610 | * took no softirq-unsafe lock in the past: | ||
1611 | */ | ||
1612 | if (!check_usage_forwards(curr, this, | ||
1613 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1614 | return 0; | ||
1615 | if (softirq_verbose(this->class)) | ||
1616 | ret = 2; | ||
1617 | break; | ||
1618 | case LOCK_ENABLED_HARDIRQS: | ||
1619 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
1620 | return 0; | ||
1621 | if (!valid_state(curr, this, new_bit, | ||
1622 | LOCK_USED_IN_HARDIRQ_READ)) | ||
1623 | return 0; | ||
1624 | /* | ||
1625 | * just marked it hardirq-unsafe, check that no hardirq-safe | ||
1626 | * lock in the system ever took it in the past: | ||
1627 | */ | ||
1628 | if (!check_usage_backwards(curr, this, | ||
1629 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
1630 | return 0; | ||
1631 | #if STRICT_READ_CHECKS | ||
1632 | /* | ||
1633 | * just marked it hardirq-unsafe, check that no | ||
1634 | * hardirq-safe-read lock in the system ever took | ||
1635 | * it in the past: | ||
1636 | */ | ||
1637 | if (!check_usage_backwards(curr, this, | ||
1638 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | ||
1639 | return 0; | ||
1640 | #endif | ||
1641 | if (hardirq_verbose(this->class)) | ||
1642 | ret = 2; | ||
1643 | break; | ||
1644 | case LOCK_ENABLED_SOFTIRQS: | ||
1645 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
1646 | return 0; | ||
1647 | if (!valid_state(curr, this, new_bit, | ||
1648 | LOCK_USED_IN_SOFTIRQ_READ)) | ||
1649 | return 0; | ||
1650 | /* | ||
1651 | * just marked it softirq-unsafe, check that no softirq-safe | ||
1652 | * lock in the system ever took it in the past: | ||
1653 | */ | ||
1654 | if (!check_usage_backwards(curr, this, | ||
1655 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
1656 | return 0; | ||
1657 | #if STRICT_READ_CHECKS | ||
1658 | /* | ||
1659 | * just marked it softirq-unsafe, check that no | ||
1660 | * softirq-safe-read lock in the system ever took | ||
1661 | * it in the past: | ||
1662 | */ | ||
1663 | if (!check_usage_backwards(curr, this, | ||
1664 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | ||
1665 | return 0; | ||
1666 | #endif | ||
1667 | if (softirq_verbose(this->class)) | ||
1668 | ret = 2; | ||
1669 | break; | ||
1670 | case LOCK_ENABLED_HARDIRQS_READ: | ||
1671 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
1672 | return 0; | ||
1673 | #if STRICT_READ_CHECKS | ||
1674 | /* | ||
1675 | * just marked it hardirq-read-unsafe, check that no | ||
1676 | * hardirq-safe lock in the system ever took it in the past: | ||
1677 | */ | ||
1678 | if (!check_usage_backwards(curr, this, | ||
1679 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
1680 | return 0; | ||
1681 | #endif | ||
1682 | if (hardirq_verbose(this->class)) | ||
1683 | ret = 2; | ||
1684 | break; | ||
1685 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
1686 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
1687 | return 0; | ||
1688 | #if STRICT_READ_CHECKS | ||
1689 | /* | ||
1690 | * just marked it softirq-read-unsafe, check that no | ||
1691 | * softirq-safe lock in the system ever took it in the past: | ||
1692 | */ | ||
1693 | if (!check_usage_backwards(curr, this, | ||
1694 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
1695 | return 0; | ||
1696 | #endif | ||
1697 | if (softirq_verbose(this->class)) | ||
1698 | ret = 2; | ||
1699 | break; | ||
1700 | #endif | ||
1701 | case LOCK_USED: | ||
1702 | /* | ||
1703 | * Add it to the global list of classes: | ||
1704 | */ | ||
1705 | list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes); | ||
1706 | debug_atomic_dec(&nr_unused_locks); | ||
1707 | break; | ||
1708 | default: | ||
1709 | debug_locks_off(); | ||
1710 | WARN_ON(1); | ||
1711 | return 0; | ||
1712 | } | ||
1713 | |||
1714 | __raw_spin_unlock(&hash_lock); | ||
1715 | |||
1716 | /* | ||
1717 | * We must printk outside of the hash_lock: | ||
1718 | */ | ||
1719 | if (ret == 2) { | ||
1720 | printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); | ||
1721 | print_lock(this); | ||
1722 | print_irqtrace_events(curr); | ||
1723 | dump_stack(); | ||
1724 | } | ||
1725 | |||
1726 | return ret; | ||
1727 | } | ||
1728 | |||
1729 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
1730 | /* | ||
1731 | * Mark all held locks with a usage bit: | ||
1732 | */ | ||
1733 | static int | ||
1734 | mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip) | ||
1735 | { | ||
1736 | enum lock_usage_bit usage_bit; | ||
1737 | struct held_lock *hlock; | ||
1738 | int i; | ||
1739 | |||
1740 | for (i = 0; i < curr->lockdep_depth; i++) { | ||
1741 | hlock = curr->held_locks + i; | ||
1742 | |||
1743 | if (hardirq) { | ||
1744 | if (hlock->read) | ||
1745 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; | ||
1746 | else | ||
1747 | usage_bit = LOCK_ENABLED_HARDIRQS; | ||
1748 | } else { | ||
1749 | if (hlock->read) | ||
1750 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | ||
1751 | else | ||
1752 | usage_bit = LOCK_ENABLED_SOFTIRQS; | ||
1753 | } | ||
1754 | if (!mark_lock(curr, hlock, usage_bit, ip)) | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | return 1; | ||
1759 | } | ||
1760 | |||
1761 | /* | ||
1762 | * Debugging helper: via this flag we know that we are in | ||
1763 | * 'early bootup code', and will warn about any invalid irqs-on event: | ||
1764 | */ | ||
1765 | static int early_boot_irqs_enabled; | ||
1766 | |||
1767 | void early_boot_irqs_off(void) | ||
1768 | { | ||
1769 | early_boot_irqs_enabled = 0; | ||
1770 | } | ||
1771 | |||
1772 | void early_boot_irqs_on(void) | ||
1773 | { | ||
1774 | early_boot_irqs_enabled = 1; | ||
1775 | } | ||
1776 | |||
1777 | /* | ||
1778 | * Hardirqs will be enabled: | ||
1779 | */ | ||
1780 | void trace_hardirqs_on(void) | ||
1781 | { | ||
1782 | struct task_struct *curr = current; | ||
1783 | unsigned long ip; | ||
1784 | |||
1785 | if (unlikely(!debug_locks || current->lockdep_recursion)) | ||
1786 | return; | ||
1787 | |||
1788 | if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) | ||
1789 | return; | ||
1790 | |||
1791 | if (unlikely(curr->hardirqs_enabled)) { | ||
1792 | debug_atomic_inc(&redundant_hardirqs_on); | ||
1793 | return; | ||
1794 | } | ||
1795 | /* we'll do an OFF -> ON transition: */ | ||
1796 | curr->hardirqs_enabled = 1; | ||
1797 | ip = (unsigned long) __builtin_return_address(0); | ||
1798 | |||
1799 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
1800 | return; | ||
1801 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
1802 | return; | ||
1803 | /* | ||
1804 | * We are going to turn hardirqs on, so set the | ||
1805 | * usage bit for all held locks: | ||
1806 | */ | ||
1807 | if (!mark_held_locks(curr, 1, ip)) | ||
1808 | return; | ||
1809 | /* | ||
1810 | * If we have softirqs enabled, then set the usage | ||
1811 | * bit for all held locks. (disabled hardirqs prevented | ||
1812 | * this bit from being set before) | ||
1813 | */ | ||
1814 | if (curr->softirqs_enabled) | ||
1815 | if (!mark_held_locks(curr, 0, ip)) | ||
1816 | return; | ||
1817 | |||
1818 | curr->hardirq_enable_ip = ip; | ||
1819 | curr->hardirq_enable_event = ++curr->irq_events; | ||
1820 | debug_atomic_inc(&hardirqs_on_events); | ||
1821 | } | ||
1822 | |||
1823 | EXPORT_SYMBOL(trace_hardirqs_on); | ||
1824 | |||
1825 | /* | ||
1826 | * Hardirqs were disabled: | ||
1827 | */ | ||
1828 | void trace_hardirqs_off(void) | ||
1829 | { | ||
1830 | struct task_struct *curr = current; | ||
1831 | |||
1832 | if (unlikely(!debug_locks || current->lockdep_recursion)) | ||
1833 | return; | ||
1834 | |||
1835 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
1836 | return; | ||
1837 | |||
1838 | if (curr->hardirqs_enabled) { | ||
1839 | /* | ||
1840 | * We have done an ON -> OFF transition: | ||
1841 | */ | ||
1842 | curr->hardirqs_enabled = 0; | ||
1843 | curr->hardirq_disable_ip = _RET_IP_; | ||
1844 | curr->hardirq_disable_event = ++curr->irq_events; | ||
1845 | debug_atomic_inc(&hardirqs_off_events); | ||
1846 | } else | ||
1847 | debug_atomic_inc(&redundant_hardirqs_off); | ||
1848 | } | ||
1849 | |||
1850 | EXPORT_SYMBOL(trace_hardirqs_off); | ||
1851 | |||
1852 | /* | ||
1853 | * Softirqs will be enabled: | ||
1854 | */ | ||
1855 | void trace_softirqs_on(unsigned long ip) | ||
1856 | { | ||
1857 | struct task_struct *curr = current; | ||
1858 | |||
1859 | if (unlikely(!debug_locks)) | ||
1860 | return; | ||
1861 | |||
1862 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
1863 | return; | ||
1864 | |||
1865 | if (curr->softirqs_enabled) { | ||
1866 | debug_atomic_inc(&redundant_softirqs_on); | ||
1867 | return; | ||
1868 | } | ||
1869 | |||
1870 | /* | ||
1871 | * We'll do an OFF -> ON transition: | ||
1872 | */ | ||
1873 | curr->softirqs_enabled = 1; | ||
1874 | curr->softirq_enable_ip = ip; | ||
1875 | curr->softirq_enable_event = ++curr->irq_events; | ||
1876 | debug_atomic_inc(&softirqs_on_events); | ||
1877 | /* | ||
1878 | * We are going to turn softirqs on, so set the | ||
1879 | * usage bit for all held locks, if hardirqs are | ||
1880 | * enabled too: | ||
1881 | */ | ||
1882 | if (curr->hardirqs_enabled) | ||
1883 | mark_held_locks(curr, 0, ip); | ||
1884 | } | ||
1885 | |||
1886 | /* | ||
1887 | * Softirqs were disabled: | ||
1888 | */ | ||
1889 | void trace_softirqs_off(unsigned long ip) | ||
1890 | { | ||
1891 | struct task_struct *curr = current; | ||
1892 | |||
1893 | if (unlikely(!debug_locks)) | ||
1894 | return; | ||
1895 | |||
1896 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
1897 | return; | ||
1898 | |||
1899 | if (curr->softirqs_enabled) { | ||
1900 | /* | ||
1901 | * We have done an ON -> OFF transition: | ||
1902 | */ | ||
1903 | curr->softirqs_enabled = 0; | ||
1904 | curr->softirq_disable_ip = ip; | ||
1905 | curr->softirq_disable_event = ++curr->irq_events; | ||
1906 | debug_atomic_inc(&softirqs_off_events); | ||
1907 | DEBUG_LOCKS_WARN_ON(!softirq_count()); | ||
1908 | } else | ||
1909 | debug_atomic_inc(&redundant_softirqs_off); | ||
1910 | } | ||
1911 | |||
1912 | #endif | ||
1913 | |||
1914 | /* | ||
1915 | * Initialize a lock instance's lock-class mapping info: | ||
1916 | */ | ||
1917 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | ||
1918 | struct lock_class_key *key) | ||
1919 | { | ||
1920 | if (unlikely(!debug_locks)) | ||
1921 | return; | ||
1922 | |||
1923 | if (DEBUG_LOCKS_WARN_ON(!key)) | ||
1924 | return; | ||
1925 | if (DEBUG_LOCKS_WARN_ON(!name)) | ||
1926 | return; | ||
1927 | /* | ||
1928 | * Sanity check, the lock-class key must be persistent: | ||
1929 | */ | ||
1930 | if (!static_obj(key)) { | ||
1931 | printk("BUG: key %p not in .data!\n", key); | ||
1932 | DEBUG_LOCKS_WARN_ON(1); | ||
1933 | return; | ||
1934 | } | ||
1935 | lock->name = name; | ||
1936 | lock->key = key; | ||
1937 | memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES); | ||
1938 | } | ||
1939 | |||
1940 | EXPORT_SYMBOL_GPL(lockdep_init_map); | ||
1941 | |||
1942 | /* | ||
1943 | * This gets called for every mutex_lock*()/spin_lock*() operation. | ||
1944 | * We maintain the dependency maps and validate the locking attempt: | ||
1945 | */ | ||
1946 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | ||
1947 | int trylock, int read, int check, int hardirqs_off, | ||
1948 | unsigned long ip) | ||
1949 | { | ||
1950 | struct task_struct *curr = current; | ||
1951 | struct held_lock *hlock; | ||
1952 | struct lock_class *class; | ||
1953 | unsigned int depth, id; | ||
1954 | int chain_head = 0; | ||
1955 | u64 chain_key; | ||
1956 | |||
1957 | if (unlikely(!debug_locks)) | ||
1958 | return 0; | ||
1959 | |||
1960 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
1961 | return 0; | ||
1962 | |||
1963 | if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { | ||
1964 | debug_locks_off(); | ||
1965 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | ||
1966 | printk("turning off the locking correctness validator.\n"); | ||
1967 | return 0; | ||
1968 | } | ||
1969 | |||
1970 | class = lock->class[subclass]; | ||
1971 | /* not cached yet? */ | ||
1972 | if (unlikely(!class)) { | ||
1973 | class = register_lock_class(lock, subclass); | ||
1974 | if (!class) | ||
1975 | return 0; | ||
1976 | } | ||
1977 | debug_atomic_inc((atomic_t *)&class->ops); | ||
1978 | if (very_verbose(class)) { | ||
1979 | printk("\nacquire class [%p] %s", class->key, class->name); | ||
1980 | if (class->name_version > 1) | ||
1981 | printk("#%d", class->name_version); | ||
1982 | printk("\n"); | ||
1983 | dump_stack(); | ||
1984 | } | ||
1985 | |||
1986 | /* | ||
1987 | * Add the lock to the list of currently held locks. | ||
1988 | * (we dont increase the depth just yet, up until the | ||
1989 | * dependency checks are done) | ||
1990 | */ | ||
1991 | depth = curr->lockdep_depth; | ||
1992 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) | ||
1993 | return 0; | ||
1994 | |||
1995 | hlock = curr->held_locks + depth; | ||
1996 | |||
1997 | hlock->class = class; | ||
1998 | hlock->acquire_ip = ip; | ||
1999 | hlock->instance = lock; | ||
2000 | hlock->trylock = trylock; | ||
2001 | hlock->read = read; | ||
2002 | hlock->check = check; | ||
2003 | hlock->hardirqs_off = hardirqs_off; | ||
2004 | |||
2005 | if (check != 2) | ||
2006 | goto out_calc_hash; | ||
2007 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
2008 | /* | ||
2009 | * If non-trylock use in a hardirq or softirq context, then | ||
2010 | * mark the lock as used in these contexts: | ||
2011 | */ | ||
2012 | if (!trylock) { | ||
2013 | if (read) { | ||
2014 | if (curr->hardirq_context) | ||
2015 | if (!mark_lock(curr, hlock, | ||
2016 | LOCK_USED_IN_HARDIRQ_READ, ip)) | ||
2017 | return 0; | ||
2018 | if (curr->softirq_context) | ||
2019 | if (!mark_lock(curr, hlock, | ||
2020 | LOCK_USED_IN_SOFTIRQ_READ, ip)) | ||
2021 | return 0; | ||
2022 | } else { | ||
2023 | if (curr->hardirq_context) | ||
2024 | if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip)) | ||
2025 | return 0; | ||
2026 | if (curr->softirq_context) | ||
2027 | if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip)) | ||
2028 | return 0; | ||
2029 | } | ||
2030 | } | ||
2031 | if (!hardirqs_off) { | ||
2032 | if (read) { | ||
2033 | if (!mark_lock(curr, hlock, | ||
2034 | LOCK_ENABLED_HARDIRQS_READ, ip)) | ||
2035 | return 0; | ||
2036 | if (curr->softirqs_enabled) | ||
2037 | if (!mark_lock(curr, hlock, | ||
2038 | LOCK_ENABLED_SOFTIRQS_READ, ip)) | ||
2039 | return 0; | ||
2040 | } else { | ||
2041 | if (!mark_lock(curr, hlock, | ||
2042 | LOCK_ENABLED_HARDIRQS, ip)) | ||
2043 | return 0; | ||
2044 | if (curr->softirqs_enabled) | ||
2045 | if (!mark_lock(curr, hlock, | ||
2046 | LOCK_ENABLED_SOFTIRQS, ip)) | ||
2047 | return 0; | ||
2048 | } | ||
2049 | } | ||
2050 | #endif | ||
2051 | /* mark it as used: */ | ||
2052 | if (!mark_lock(curr, hlock, LOCK_USED, ip)) | ||
2053 | return 0; | ||
2054 | out_calc_hash: | ||
2055 | /* | ||
2056 | * Calculate the chain hash: it's the combined has of all the | ||
2057 | * lock keys along the dependency chain. We save the hash value | ||
2058 | * at every step so that we can get the current hash easily | ||
2059 | * after unlock. The chain hash is then used to cache dependency | ||
2060 | * results. | ||
2061 | * | ||
2062 | * The 'key ID' is what is the most compact key value to drive | ||
2063 | * the hash, not class->key. | ||
2064 | */ | ||
2065 | id = class - lock_classes; | ||
2066 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | ||
2067 | return 0; | ||
2068 | |||
2069 | chain_key = curr->curr_chain_key; | ||
2070 | if (!depth) { | ||
2071 | if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) | ||
2072 | return 0; | ||
2073 | chain_head = 1; | ||
2074 | } | ||
2075 | |||
2076 | hlock->prev_chain_key = chain_key; | ||
2077 | |||
2078 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
2079 | /* | ||
2080 | * Keep track of points where we cross into an interrupt context: | ||
2081 | */ | ||
2082 | hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + | ||
2083 | curr->softirq_context; | ||
2084 | if (depth) { | ||
2085 | struct held_lock *prev_hlock; | ||
2086 | |||
2087 | prev_hlock = curr->held_locks + depth-1; | ||
2088 | /* | ||
2089 | * If we cross into another context, reset the | ||
2090 | * hash key (this also prevents the checking and the | ||
2091 | * adding of the dependency to 'prev'): | ||
2092 | */ | ||
2093 | if (prev_hlock->irq_context != hlock->irq_context) { | ||
2094 | chain_key = 0; | ||
2095 | chain_head = 1; | ||
2096 | } | ||
2097 | } | ||
2098 | #endif | ||
2099 | chain_key = iterate_chain_key(chain_key, id); | ||
2100 | curr->curr_chain_key = chain_key; | ||
2101 | |||
2102 | /* | ||
2103 | * Trylock needs to maintain the stack of held locks, but it | ||
2104 | * does not add new dependencies, because trylock can be done | ||
2105 | * in any order. | ||
2106 | * | ||
2107 | * We look up the chain_key and do the O(N^2) check and update of | ||
2108 | * the dependencies only if this is a new dependency chain. | ||
2109 | * (If lookup_chain_cache() returns with 1 it acquires | ||
2110 | * hash_lock for us) | ||
2111 | */ | ||
2112 | if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { | ||
2113 | /* | ||
2114 | * Check whether last held lock: | ||
2115 | * | ||
2116 | * - is irq-safe, if this lock is irq-unsafe | ||
2117 | * - is softirq-safe, if this lock is hardirq-unsafe | ||
2118 | * | ||
2119 | * And check whether the new lock's dependency graph | ||
2120 | * could lead back to the previous lock. | ||
2121 | * | ||
2122 | * any of these scenarios could lead to a deadlock. If | ||
2123 | * All validations | ||
2124 | */ | ||
2125 | int ret = check_deadlock(curr, hlock, lock, read); | ||
2126 | |||
2127 | if (!ret) | ||
2128 | return 0; | ||
2129 | /* | ||
2130 | * Mark recursive read, as we jump over it when | ||
2131 | * building dependencies (just like we jump over | ||
2132 | * trylock entries): | ||
2133 | */ | ||
2134 | if (ret == 2) | ||
2135 | hlock->read = 2; | ||
2136 | /* | ||
2137 | * Add dependency only if this lock is not the head | ||
2138 | * of the chain, and if it's not a secondary read-lock: | ||
2139 | */ | ||
2140 | if (!chain_head && ret != 2) | ||
2141 | if (!check_prevs_add(curr, hlock)) | ||
2142 | return 0; | ||
2143 | __raw_spin_unlock(&hash_lock); | ||
2144 | } | ||
2145 | curr->lockdep_depth++; | ||
2146 | check_chain_key(curr); | ||
2147 | if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { | ||
2148 | debug_locks_off(); | ||
2149 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); | ||
2150 | printk("turning off the locking correctness validator.\n"); | ||
2151 | return 0; | ||
2152 | } | ||
2153 | if (unlikely(curr->lockdep_depth > max_lockdep_depth)) | ||
2154 | max_lockdep_depth = curr->lockdep_depth; | ||
2155 | |||
2156 | return 1; | ||
2157 | } | ||
2158 | |||
2159 | static int | ||
2160 | print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, | ||
2161 | unsigned long ip) | ||
2162 | { | ||
2163 | if (!debug_locks_off()) | ||
2164 | return 0; | ||
2165 | if (debug_locks_silent) | ||
2166 | return 0; | ||
2167 | |||
2168 | printk("\n=====================================\n"); | ||
2169 | printk( "[ BUG: bad unlock balance detected! ]\n"); | ||
2170 | printk( "-------------------------------------\n"); | ||
2171 | printk("%s/%d is trying to release lock (", | ||
2172 | curr->comm, curr->pid); | ||
2173 | print_lockdep_cache(lock); | ||
2174 | printk(") at:\n"); | ||
2175 | print_ip_sym(ip); | ||
2176 | printk("but there are no more locks to release!\n"); | ||
2177 | printk("\nother info that might help us debug this:\n"); | ||
2178 | lockdep_print_held_locks(curr); | ||
2179 | |||
2180 | printk("\nstack backtrace:\n"); | ||
2181 | dump_stack(); | ||
2182 | |||
2183 | return 0; | ||
2184 | } | ||
2185 | |||
2186 | /* | ||
2187 | * Common debugging checks for both nested and non-nested unlock: | ||
2188 | */ | ||
2189 | static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | ||
2190 | unsigned long ip) | ||
2191 | { | ||
2192 | if (unlikely(!debug_locks)) | ||
2193 | return 0; | ||
2194 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
2195 | return 0; | ||
2196 | |||
2197 | if (curr->lockdep_depth <= 0) | ||
2198 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
2199 | |||
2200 | return 1; | ||
2201 | } | ||
2202 | |||
2203 | /* | ||
2204 | * Remove the lock to the list of currently held locks in a | ||
2205 | * potentially non-nested (out of order) manner. This is a | ||
2206 | * relatively rare operation, as all the unlock APIs default | ||
2207 | * to nested mode (which uses lock_release()): | ||
2208 | */ | ||
2209 | static int | ||
2210 | lock_release_non_nested(struct task_struct *curr, | ||
2211 | struct lockdep_map *lock, unsigned long ip) | ||
2212 | { | ||
2213 | struct held_lock *hlock, *prev_hlock; | ||
2214 | unsigned int depth; | ||
2215 | int i; | ||
2216 | |||
2217 | /* | ||
2218 | * Check whether the lock exists in the current stack | ||
2219 | * of held locks: | ||
2220 | */ | ||
2221 | depth = curr->lockdep_depth; | ||
2222 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2223 | return 0; | ||
2224 | |||
2225 | prev_hlock = NULL; | ||
2226 | for (i = depth-1; i >= 0; i--) { | ||
2227 | hlock = curr->held_locks + i; | ||
2228 | /* | ||
2229 | * We must not cross into another context: | ||
2230 | */ | ||
2231 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2232 | break; | ||
2233 | if (hlock->instance == lock) | ||
2234 | goto found_it; | ||
2235 | prev_hlock = hlock; | ||
2236 | } | ||
2237 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
2238 | |||
2239 | found_it: | ||
2240 | /* | ||
2241 | * We have the right lock to unlock, 'hlock' points to it. | ||
2242 | * Now we remove it from the stack, and add back the other | ||
2243 | * entries (if any), recalculating the hash along the way: | ||
2244 | */ | ||
2245 | curr->lockdep_depth = i; | ||
2246 | curr->curr_chain_key = hlock->prev_chain_key; | ||
2247 | |||
2248 | for (i++; i < depth; i++) { | ||
2249 | hlock = curr->held_locks + i; | ||
2250 | if (!__lock_acquire(hlock->instance, | ||
2251 | hlock->class->subclass, hlock->trylock, | ||
2252 | hlock->read, hlock->check, hlock->hardirqs_off, | ||
2253 | hlock->acquire_ip)) | ||
2254 | return 0; | ||
2255 | } | ||
2256 | |||
2257 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) | ||
2258 | return 0; | ||
2259 | return 1; | ||
2260 | } | ||
2261 | |||
2262 | /* | ||
2263 | * Remove the lock to the list of currently held locks - this gets | ||
2264 | * called on mutex_unlock()/spin_unlock*() (or on a failed | ||
2265 | * mutex_lock_interruptible()). This is done for unlocks that nest | ||
2266 | * perfectly. (i.e. the current top of the lock-stack is unlocked) | ||
2267 | */ | ||
2268 | static int lock_release_nested(struct task_struct *curr, | ||
2269 | struct lockdep_map *lock, unsigned long ip) | ||
2270 | { | ||
2271 | struct held_lock *hlock; | ||
2272 | unsigned int depth; | ||
2273 | |||
2274 | /* | ||
2275 | * Pop off the top of the lock stack: | ||
2276 | */ | ||
2277 | depth = curr->lockdep_depth - 1; | ||
2278 | hlock = curr->held_locks + depth; | ||
2279 | |||
2280 | /* | ||
2281 | * Is the unlock non-nested: | ||
2282 | */ | ||
2283 | if (hlock->instance != lock) | ||
2284 | return lock_release_non_nested(curr, lock, ip); | ||
2285 | curr->lockdep_depth--; | ||
2286 | |||
2287 | if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) | ||
2288 | return 0; | ||
2289 | |||
2290 | curr->curr_chain_key = hlock->prev_chain_key; | ||
2291 | |||
2292 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
2293 | hlock->prev_chain_key = 0; | ||
2294 | hlock->class = NULL; | ||
2295 | hlock->acquire_ip = 0; | ||
2296 | hlock->irq_context = 0; | ||
2297 | #endif | ||
2298 | return 1; | ||
2299 | } | ||
2300 | |||
2301 | /* | ||
2302 | * Remove the lock to the list of currently held locks - this gets | ||
2303 | * called on mutex_unlock()/spin_unlock*() (or on a failed | ||
2304 | * mutex_lock_interruptible()). This is done for unlocks that nest | ||
2305 | * perfectly. (i.e. the current top of the lock-stack is unlocked) | ||
2306 | */ | ||
2307 | static void | ||
2308 | __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | ||
2309 | { | ||
2310 | struct task_struct *curr = current; | ||
2311 | |||
2312 | if (!check_unlock(curr, lock, ip)) | ||
2313 | return; | ||
2314 | |||
2315 | if (nested) { | ||
2316 | if (!lock_release_nested(curr, lock, ip)) | ||
2317 | return; | ||
2318 | } else { | ||
2319 | if (!lock_release_non_nested(curr, lock, ip)) | ||
2320 | return; | ||
2321 | } | ||
2322 | |||
2323 | check_chain_key(curr); | ||
2324 | } | ||
2325 | |||
2326 | /* | ||
2327 | * Check whether we follow the irq-flags state precisely: | ||
2328 | */ | ||
2329 | static void check_flags(unsigned long flags) | ||
2330 | { | ||
2331 | #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) | ||
2332 | if (!debug_locks) | ||
2333 | return; | ||
2334 | |||
2335 | if (irqs_disabled_flags(flags)) | ||
2336 | DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled); | ||
2337 | else | ||
2338 | DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled); | ||
2339 | |||
2340 | /* | ||
2341 | * We dont accurately track softirq state in e.g. | ||
2342 | * hardirq contexts (such as on 4KSTACKS), so only | ||
2343 | * check if not in hardirq contexts: | ||
2344 | */ | ||
2345 | if (!hardirq_count()) { | ||
2346 | if (softirq_count()) | ||
2347 | DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); | ||
2348 | else | ||
2349 | DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); | ||
2350 | } | ||
2351 | |||
2352 | if (!debug_locks) | ||
2353 | print_irqtrace_events(current); | ||
2354 | #endif | ||
2355 | } | ||
2356 | |||
2357 | /* | ||
2358 | * We are not always called with irqs disabled - do that here, | ||
2359 | * and also avoid lockdep recursion: | ||
2360 | */ | ||
2361 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | ||
2362 | int trylock, int read, int check, unsigned long ip) | ||
2363 | { | ||
2364 | unsigned long flags; | ||
2365 | |||
2366 | if (unlikely(current->lockdep_recursion)) | ||
2367 | return; | ||
2368 | |||
2369 | raw_local_irq_save(flags); | ||
2370 | check_flags(flags); | ||
2371 | |||
2372 | current->lockdep_recursion = 1; | ||
2373 | __lock_acquire(lock, subclass, trylock, read, check, | ||
2374 | irqs_disabled_flags(flags), ip); | ||
2375 | current->lockdep_recursion = 0; | ||
2376 | raw_local_irq_restore(flags); | ||
2377 | } | ||
2378 | |||
2379 | EXPORT_SYMBOL_GPL(lock_acquire); | ||
2380 | |||
2381 | void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | ||
2382 | { | ||
2383 | unsigned long flags; | ||
2384 | |||
2385 | if (unlikely(current->lockdep_recursion)) | ||
2386 | return; | ||
2387 | |||
2388 | raw_local_irq_save(flags); | ||
2389 | check_flags(flags); | ||
2390 | current->lockdep_recursion = 1; | ||
2391 | __lock_release(lock, nested, ip); | ||
2392 | current->lockdep_recursion = 0; | ||
2393 | raw_local_irq_restore(flags); | ||
2394 | } | ||
2395 | |||
2396 | EXPORT_SYMBOL_GPL(lock_release); | ||
2397 | |||
2398 | /* | ||
2399 | * Used by the testsuite, sanitize the validator state | ||
2400 | * after a simulated failure: | ||
2401 | */ | ||
2402 | |||
2403 | void lockdep_reset(void) | ||
2404 | { | ||
2405 | unsigned long flags; | ||
2406 | |||
2407 | raw_local_irq_save(flags); | ||
2408 | current->curr_chain_key = 0; | ||
2409 | current->lockdep_depth = 0; | ||
2410 | current->lockdep_recursion = 0; | ||
2411 | memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); | ||
2412 | nr_hardirq_chains = 0; | ||
2413 | nr_softirq_chains = 0; | ||
2414 | nr_process_chains = 0; | ||
2415 | debug_locks = 1; | ||
2416 | raw_local_irq_restore(flags); | ||
2417 | } | ||
2418 | |||
2419 | static void zap_class(struct lock_class *class) | ||
2420 | { | ||
2421 | int i; | ||
2422 | |||
2423 | /* | ||
2424 | * Remove all dependencies this lock is | ||
2425 | * involved in: | ||
2426 | */ | ||
2427 | for (i = 0; i < nr_list_entries; i++) { | ||
2428 | if (list_entries[i].class == class) | ||
2429 | list_del_rcu(&list_entries[i].entry); | ||
2430 | } | ||
2431 | /* | ||
2432 | * Unhash the class and remove it from the all_lock_classes list: | ||
2433 | */ | ||
2434 | list_del_rcu(&class->hash_entry); | ||
2435 | list_del_rcu(&class->lock_entry); | ||
2436 | |||
2437 | } | ||
2438 | |||
2439 | static inline int within(void *addr, void *start, unsigned long size) | ||
2440 | { | ||
2441 | return addr >= start && addr < start + size; | ||
2442 | } | ||
2443 | |||
2444 | void lockdep_free_key_range(void *start, unsigned long size) | ||
2445 | { | ||
2446 | struct lock_class *class, *next; | ||
2447 | struct list_head *head; | ||
2448 | unsigned long flags; | ||
2449 | int i; | ||
2450 | |||
2451 | raw_local_irq_save(flags); | ||
2452 | __raw_spin_lock(&hash_lock); | ||
2453 | |||
2454 | /* | ||
2455 | * Unhash all classes that were created by this module: | ||
2456 | */ | ||
2457 | for (i = 0; i < CLASSHASH_SIZE; i++) { | ||
2458 | head = classhash_table + i; | ||
2459 | if (list_empty(head)) | ||
2460 | continue; | ||
2461 | list_for_each_entry_safe(class, next, head, hash_entry) | ||
2462 | if (within(class->key, start, size)) | ||
2463 | zap_class(class); | ||
2464 | } | ||
2465 | |||
2466 | __raw_spin_unlock(&hash_lock); | ||
2467 | raw_local_irq_restore(flags); | ||
2468 | } | ||
2469 | |||
2470 | void lockdep_reset_lock(struct lockdep_map *lock) | ||
2471 | { | ||
2472 | struct lock_class *class, *next, *entry; | ||
2473 | struct list_head *head; | ||
2474 | unsigned long flags; | ||
2475 | int i, j; | ||
2476 | |||
2477 | raw_local_irq_save(flags); | ||
2478 | __raw_spin_lock(&hash_lock); | ||
2479 | |||
2480 | /* | ||
2481 | * Remove all classes this lock has: | ||
2482 | */ | ||
2483 | for (i = 0; i < CLASSHASH_SIZE; i++) { | ||
2484 | head = classhash_table + i; | ||
2485 | if (list_empty(head)) | ||
2486 | continue; | ||
2487 | list_for_each_entry_safe(class, next, head, hash_entry) { | ||
2488 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | ||
2489 | entry = lock->class[j]; | ||
2490 | if (class == entry) { | ||
2491 | zap_class(class); | ||
2492 | lock->class[j] = NULL; | ||
2493 | break; | ||
2494 | } | ||
2495 | } | ||
2496 | } | ||
2497 | } | ||
2498 | |||
2499 | /* | ||
2500 | * Debug check: in the end all mapped classes should | ||
2501 | * be gone. | ||
2502 | */ | ||
2503 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | ||
2504 | entry = lock->class[j]; | ||
2505 | if (!entry) | ||
2506 | continue; | ||
2507 | __raw_spin_unlock(&hash_lock); | ||
2508 | DEBUG_LOCKS_WARN_ON(1); | ||
2509 | raw_local_irq_restore(flags); | ||
2510 | return; | ||
2511 | } | ||
2512 | |||
2513 | __raw_spin_unlock(&hash_lock); | ||
2514 | raw_local_irq_restore(flags); | ||
2515 | } | ||
2516 | |||
2517 | void __init lockdep_init(void) | ||
2518 | { | ||
2519 | int i; | ||
2520 | |||
2521 | /* | ||
2522 | * Some architectures have their own start_kernel() | ||
2523 | * code which calls lockdep_init(), while we also | ||
2524 | * call lockdep_init() from the start_kernel() itself, | ||
2525 | * and we want to initialize the hashes only once: | ||
2526 | */ | ||
2527 | if (lockdep_initialized) | ||
2528 | return; | ||
2529 | |||
2530 | for (i = 0; i < CLASSHASH_SIZE; i++) | ||
2531 | INIT_LIST_HEAD(classhash_table + i); | ||
2532 | |||
2533 | for (i = 0; i < CHAINHASH_SIZE; i++) | ||
2534 | INIT_LIST_HEAD(chainhash_table + i); | ||
2535 | |||
2536 | lockdep_initialized = 1; | ||
2537 | } | ||
2538 | |||
2539 | void __init lockdep_info(void) | ||
2540 | { | ||
2541 | printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); | ||
2542 | |||
2543 | printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); | ||
2544 | printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); | ||
2545 | printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); | ||
2546 | printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); | ||
2547 | printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); | ||
2548 | printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); | ||
2549 | printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); | ||
2550 | |||
2551 | printk(" memory used by lock dependency info: %lu kB\n", | ||
2552 | (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + | ||
2553 | sizeof(struct list_head) * CLASSHASH_SIZE + | ||
2554 | sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + | ||
2555 | sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + | ||
2556 | sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); | ||
2557 | |||
2558 | printk(" per task-struct memory footprint: %lu bytes\n", | ||
2559 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); | ||
2560 | |||
2561 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
2562 | if (lockdep_init_error) | ||
2563 | printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n"); | ||
2564 | #endif | ||
2565 | } | ||
2566 | |||
2567 | static inline int in_range(const void *start, const void *addr, const void *end) | ||
2568 | { | ||
2569 | return addr >= start && addr <= end; | ||
2570 | } | ||
2571 | |||
2572 | static void | ||
2573 | print_freed_lock_bug(struct task_struct *curr, const void *mem_from, | ||
2574 | const void *mem_to) | ||
2575 | { | ||
2576 | if (!debug_locks_off()) | ||
2577 | return; | ||
2578 | if (debug_locks_silent) | ||
2579 | return; | ||
2580 | |||
2581 | printk("\n=========================\n"); | ||
2582 | printk( "[ BUG: held lock freed! ]\n"); | ||
2583 | printk( "-------------------------\n"); | ||
2584 | printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", | ||
2585 | curr->comm, curr->pid, mem_from, mem_to-1); | ||
2586 | lockdep_print_held_locks(curr); | ||
2587 | |||
2588 | printk("\nstack backtrace:\n"); | ||
2589 | dump_stack(); | ||
2590 | } | ||
2591 | |||
2592 | /* | ||
2593 | * Called when kernel memory is freed (or unmapped), or if a lock | ||
2594 | * is destroyed or reinitialized - this code checks whether there is | ||
2595 | * any held lock in the memory range of <from> to <to>: | ||
2596 | */ | ||
2597 | void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | ||
2598 | { | ||
2599 | const void *mem_to = mem_from + mem_len, *lock_from, *lock_to; | ||
2600 | struct task_struct *curr = current; | ||
2601 | struct held_lock *hlock; | ||
2602 | unsigned long flags; | ||
2603 | int i; | ||
2604 | |||
2605 | if (unlikely(!debug_locks)) | ||
2606 | return; | ||
2607 | |||
2608 | local_irq_save(flags); | ||
2609 | for (i = 0; i < curr->lockdep_depth; i++) { | ||
2610 | hlock = curr->held_locks + i; | ||
2611 | |||
2612 | lock_from = (void *)hlock->instance; | ||
2613 | lock_to = (void *)(hlock->instance + 1); | ||
2614 | |||
2615 | if (!in_range(mem_from, lock_from, mem_to) && | ||
2616 | !in_range(mem_from, lock_to, mem_to)) | ||
2617 | continue; | ||
2618 | |||
2619 | print_freed_lock_bug(curr, mem_from, mem_to); | ||
2620 | break; | ||
2621 | } | ||
2622 | local_irq_restore(flags); | ||
2623 | } | ||
2624 | |||
2625 | static void print_held_locks_bug(struct task_struct *curr) | ||
2626 | { | ||
2627 | if (!debug_locks_off()) | ||
2628 | return; | ||
2629 | if (debug_locks_silent) | ||
2630 | return; | ||
2631 | |||
2632 | printk("\n=====================================\n"); | ||
2633 | printk( "[ BUG: lock held at task exit time! ]\n"); | ||
2634 | printk( "-------------------------------------\n"); | ||
2635 | printk("%s/%d is exiting with locks still held!\n", | ||
2636 | curr->comm, curr->pid); | ||
2637 | lockdep_print_held_locks(curr); | ||
2638 | |||
2639 | printk("\nstack backtrace:\n"); | ||
2640 | dump_stack(); | ||
2641 | } | ||
2642 | |||
2643 | void debug_check_no_locks_held(struct task_struct *task) | ||
2644 | { | ||
2645 | if (unlikely(task->lockdep_depth > 0)) | ||
2646 | print_held_locks_bug(task); | ||
2647 | } | ||
2648 | |||
2649 | void debug_show_all_locks(void) | ||
2650 | { | ||
2651 | struct task_struct *g, *p; | ||
2652 | int count = 10; | ||
2653 | int unlock = 1; | ||
2654 | |||
2655 | printk("\nShowing all locks held in the system:\n"); | ||
2656 | |||
2657 | /* | ||
2658 | * Here we try to get the tasklist_lock as hard as possible, | ||
2659 | * if not successful after 2 seconds we ignore it (but keep | ||
2660 | * trying). This is to enable a debug printout even if a | ||
2661 | * tasklist_lock-holding task deadlocks or crashes. | ||
2662 | */ | ||
2663 | retry: | ||
2664 | if (!read_trylock(&tasklist_lock)) { | ||
2665 | if (count == 10) | ||
2666 | printk("hm, tasklist_lock locked, retrying... "); | ||
2667 | if (count) { | ||
2668 | count--; | ||
2669 | printk(" #%d", 10-count); | ||
2670 | mdelay(200); | ||
2671 | goto retry; | ||
2672 | } | ||
2673 | printk(" ignoring it.\n"); | ||
2674 | unlock = 0; | ||
2675 | } | ||
2676 | if (count != 10) | ||
2677 | printk(" locked it.\n"); | ||
2678 | |||
2679 | do_each_thread(g, p) { | ||
2680 | if (p->lockdep_depth) | ||
2681 | lockdep_print_held_locks(p); | ||
2682 | if (!unlock) | ||
2683 | if (read_trylock(&tasklist_lock)) | ||
2684 | unlock = 1; | ||
2685 | } while_each_thread(g, p); | ||
2686 | |||
2687 | printk("\n"); | ||
2688 | printk("=============================================\n\n"); | ||
2689 | |||
2690 | if (unlock) | ||
2691 | read_unlock(&tasklist_lock); | ||
2692 | } | ||
2693 | |||
2694 | EXPORT_SYMBOL_GPL(debug_show_all_locks); | ||
2695 | |||
2696 | void debug_show_held_locks(struct task_struct *task) | ||
2697 | { | ||
2698 | lockdep_print_held_locks(task); | ||
2699 | } | ||
2700 | |||
2701 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | ||
2702 | |||