aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c107
1 files changed, 46 insertions, 61 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6426d071a324..27b992fe8cec 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
434#endif 434#endif
435} 435}
436 436
437static int save_trace(struct stack_trace *trace) 437static int save_trace(struct lock_trace *trace)
438{ 438{
439 trace->nr_entries = 0; 439 unsigned long *entries = stack_trace + nr_stack_trace_entries;
440 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 440 unsigned int max_entries;
441 trace->entries = stack_trace + nr_stack_trace_entries;
442
443 trace->skip = 3;
444
445 save_stack_trace(trace);
446
447 /*
448 * Some daft arches put -1 at the end to indicate its a full trace.
449 *
450 * <rant> this is buggy anyway, since it takes a whole extra entry so a
451 * complete trace that maxes out the entries provided will be reported
452 * as incomplete, friggin useless </rant>
453 */
454 if (trace->nr_entries != 0 &&
455 trace->entries[trace->nr_entries-1] == ULONG_MAX)
456 trace->nr_entries--;
457
458 trace->max_entries = trace->nr_entries;
459 441
442 trace->offset = nr_stack_trace_entries;
443 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
444 trace->nr_entries = stack_trace_save(entries, max_entries, 3);
460 nr_stack_trace_entries += trace->nr_entries; 445 nr_stack_trace_entries += trace->nr_entries;
461 446
462 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 447 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@ -1207,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void)
1207static int add_lock_to_list(struct lock_class *this, 1192static int add_lock_to_list(struct lock_class *this,
1208 struct lock_class *links_to, struct list_head *head, 1193 struct lock_class *links_to, struct list_head *head,
1209 unsigned long ip, int distance, 1194 unsigned long ip, int distance,
1210 struct stack_trace *trace) 1195 struct lock_trace *trace)
1211{ 1196{
1212 struct lock_list *entry; 1197 struct lock_list *entry;
1213 /* 1198 /*
@@ -1426,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
1426 * checking. 1411 * checking.
1427 */ 1412 */
1428 1413
1414static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
1415{
1416 unsigned long *entries = stack_trace + trace->offset;
1417
1418 stack_trace_print(entries, trace->nr_entries, spaces);
1419}
1420
1429/* 1421/*
1430 * Print a dependency chain entry (this is only done when a deadlock 1422 * Print a dependency chain entry (this is only done when a deadlock
1431 * has been detected): 1423 * has been detected):
@@ -1438,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
1438 printk("\n-> #%u", depth); 1430 printk("\n-> #%u", depth);
1439 print_lock_name(target->class); 1431 print_lock_name(target->class);
1440 printk(KERN_CONT ":\n"); 1432 printk(KERN_CONT ":\n");
1441 print_stack_trace(&target->trace, 6); 1433 print_lock_trace(&target->trace, 6);
1442
1443 return 0; 1434 return 0;
1444} 1435}
1445 1436
@@ -1533,10 +1524,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
1533} 1524}
1534 1525
1535static noinline int print_circular_bug(struct lock_list *this, 1526static noinline int print_circular_bug(struct lock_list *this,
1536 struct lock_list *target, 1527 struct lock_list *target,
1537 struct held_lock *check_src, 1528 struct held_lock *check_src,
1538 struct held_lock *check_tgt, 1529 struct held_lock *check_tgt)
1539 struct stack_trace *trace)
1540{ 1530{
1541 struct task_struct *curr = current; 1531 struct task_struct *curr = current;
1542 struct lock_list *parent; 1532 struct lock_list *parent;
@@ -1758,7 +1748,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
1758 1748
1759 len += printk("%*s %s", depth, "", usage_str[bit]); 1749 len += printk("%*s %s", depth, "", usage_str[bit]);
1760 len += printk(KERN_CONT " at:\n"); 1750 len += printk(KERN_CONT " at:\n");
1761 print_stack_trace(class->usage_traces + bit, len); 1751 print_lock_trace(class->usage_traces + bit, len);
1762 } 1752 }
1763 } 1753 }
1764 printk("%*s }\n", depth, ""); 1754 printk("%*s }\n", depth, "");
@@ -1783,7 +1773,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1783 do { 1773 do {
1784 print_lock_class_header(entry->class, depth); 1774 print_lock_class_header(entry->class, depth);
1785 printk("%*s ... acquired at:\n", depth, ""); 1775 printk("%*s ... acquired at:\n", depth, "");
1786 print_stack_trace(&entry->trace, 2); 1776 print_lock_trace(&entry->trace, 2);
1787 printk("\n"); 1777 printk("\n");
1788 1778
1789 if (depth == 0 && (entry != root)) { 1779 if (depth == 0 && (entry != root)) {
@@ -1896,14 +1886,14 @@ print_bad_irq_dependency(struct task_struct *curr,
1896 print_lock_name(backwards_entry->class); 1886 print_lock_name(backwards_entry->class);
1897 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 1887 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1898 1888
1899 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1889 print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
1900 1890
1901 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 1891 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1902 print_lock_name(forwards_entry->class); 1892 print_lock_name(forwards_entry->class);
1903 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 1893 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1904 pr_warn("..."); 1894 pr_warn("...");
1905 1895
1906 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1896 print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
1907 1897
1908 pr_warn("\nother info that might help us debug this:\n\n"); 1898 pr_warn("\nother info that might help us debug this:\n\n");
1909 print_irq_lock_scenario(backwards_entry, forwards_entry, 1899 print_irq_lock_scenario(backwards_entry, forwards_entry,
@@ -2267,8 +2257,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
2267 */ 2257 */
2268static int 2258static int
2269check_prev_add(struct task_struct *curr, struct held_lock *prev, 2259check_prev_add(struct task_struct *curr, struct held_lock *prev,
2270 struct held_lock *next, int distance, struct stack_trace *trace, 2260 struct held_lock *next, int distance, struct lock_trace *trace)
2271 int (*save)(struct stack_trace *trace))
2272{ 2261{
2273 struct lock_list *uninitialized_var(target_entry); 2262 struct lock_list *uninitialized_var(target_entry);
2274 struct lock_list *entry; 2263 struct lock_list *entry;
@@ -2306,15 +2295,15 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2306 this.parent = NULL; 2295 this.parent = NULL;
2307 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 2296 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
2308 if (unlikely(!ret)) { 2297 if (unlikely(!ret)) {
2309 if (!trace->entries) { 2298 if (!trace->nr_entries) {
2310 /* 2299 /*
2311 * If @save fails here, the printing might trigger 2300 * If save_trace fails here, the printing might
2312 * a WARN but because of the !nr_entries it should 2301 * trigger a WARN but because of the !nr_entries it
2313 * not do bad things. 2302 * should not do bad things.
2314 */ 2303 */
2315 save(trace); 2304 save_trace(trace);
2316 } 2305 }
2317 return print_circular_bug(&this, target_entry, next, prev, trace); 2306 return print_circular_bug(&this, target_entry, next, prev);
2318 } 2307 }
2319 else if (unlikely(ret < 0)) 2308 else if (unlikely(ret < 0))
2320 return print_bfs_bug(ret); 2309 return print_bfs_bug(ret);
@@ -2362,7 +2351,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2362 return print_bfs_bug(ret); 2351 return print_bfs_bug(ret);
2363 2352
2364 2353
2365 if (!trace->entries && !save(trace)) 2354 if (!trace->nr_entries && !save_trace(trace))
2366 return 0; 2355 return 0;
2367 2356
2368 /* 2357 /*
@@ -2394,14 +2383,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2394static int 2383static int
2395check_prevs_add(struct task_struct *curr, struct held_lock *next) 2384check_prevs_add(struct task_struct *curr, struct held_lock *next)
2396{ 2385{
2386 struct lock_trace trace = { .nr_entries = 0 };
2397 int depth = curr->lockdep_depth; 2387 int depth = curr->lockdep_depth;
2398 struct held_lock *hlock; 2388 struct held_lock *hlock;
2399 struct stack_trace trace = {
2400 .nr_entries = 0,
2401 .max_entries = 0,
2402 .entries = NULL,
2403 .skip = 0,
2404 };
2405 2389
2406 /* 2390 /*
2407 * Debugging checks. 2391 * Debugging checks.
@@ -2427,7 +2411,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2427 * added: 2411 * added:
2428 */ 2412 */
2429 if (hlock->read != 2 && hlock->check) { 2413 if (hlock->read != 2 && hlock->check) {
2430 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); 2414 int ret = check_prev_add(curr, hlock, next, distance,
2415 &trace);
2431 if (!ret) 2416 if (!ret)
2432 return 0; 2417 return 0;
2433 2418
@@ -2828,6 +2813,10 @@ static inline int validate_chain(struct task_struct *curr,
2828{ 2813{
2829 return 1; 2814 return 1;
2830} 2815}
2816
2817static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
2818{
2819}
2831#endif 2820#endif
2832 2821
2833/* 2822/*
@@ -2930,7 +2919,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2930 print_lock(this); 2919 print_lock(this);
2931 2920
2932 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 2921 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2933 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2922 print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2934 2923
2935 print_irqtrace_events(curr); 2924 print_irqtrace_events(curr);
2936 pr_warn("\nother info that might help us debug this:\n"); 2925 pr_warn("\nother info that might help us debug this:\n");
@@ -4788,8 +4777,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4788 return; 4777 return;
4789 4778
4790 raw_local_irq_save(flags); 4779 raw_local_irq_save(flags);
4791 if (!graph_lock()) 4780 arch_spin_lock(&lockdep_lock);
4792 goto out_irq; 4781 current->lockdep_recursion = 1;
4793 4782
4794 /* closed head */ 4783 /* closed head */
4795 pf = delayed_free.pf + (delayed_free.index ^ 1); 4784 pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4801,8 +4790,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4801 */ 4790 */
4802 call_rcu_zapped(delayed_free.pf + delayed_free.index); 4791 call_rcu_zapped(delayed_free.pf + delayed_free.index);
4803 4792
4804 graph_unlock(); 4793 current->lockdep_recursion = 0;
4805out_irq: 4794 arch_spin_unlock(&lockdep_lock);
4806 raw_local_irq_restore(flags); 4795 raw_local_irq_restore(flags);
4807} 4796}
4808 4797
@@ -4843,21 +4832,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
4843{ 4832{
4844 struct pending_free *pf; 4833 struct pending_free *pf;
4845 unsigned long flags; 4834 unsigned long flags;
4846 int locked;
4847 4835
4848 init_data_structures_once(); 4836 init_data_structures_once();
4849 4837
4850 raw_local_irq_save(flags); 4838 raw_local_irq_save(flags);
4851 locked = graph_lock(); 4839 arch_spin_lock(&lockdep_lock);
4852 if (!locked) 4840 current->lockdep_recursion = 1;
4853 goto out_irq;
4854
4855 pf = get_pending_free(); 4841 pf = get_pending_free();
4856 __lockdep_free_key_range(pf, start, size); 4842 __lockdep_free_key_range(pf, start, size);
4857 call_rcu_zapped(pf); 4843 call_rcu_zapped(pf);
4858 4844 current->lockdep_recursion = 0;
4859 graph_unlock(); 4845 arch_spin_unlock(&lockdep_lock);
4860out_irq:
4861 raw_local_irq_restore(flags); 4846 raw_local_irq_restore(flags);
4862 4847
4863 /* 4848 /*