summaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:11:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:11:48 -0400
commit2c6a392cddacde153865b15e8295ad0a35ed3c02 (patch)
tree558c34595f8987c87d26fc0fa0dc644fca9ef2cd /kernel/locking/lockdep.c
parent0a499fc5c37e6db096969a83534fd98a2bf2b36c (diff)
parent3599fe12a125fa7118da2bcc5033d7741fb5f3a1 (diff)
Merge branch 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull stack trace updates from Ingo Molnar: "So Thomas looked at the stacktrace code recently and noticed a few weirdnesses, and we all know how such stories of crummy kernel code meeting German engineering perfection end: a 45-patch series to clean it all up! :-) Here's the changes in Thomas's words: 'Struct stack_trace is a sinkhole for input and output parameters which is largely pointless for most usage sites. In fact if embedded into other data structures it creates indirections and extra storage overhead for no benefit. Looking at all usage sites makes it clear that they just require an interface which is based on a storage array. That array is either on stack, global or embedded into some other data structure. Some of the stack depot usage sites are outright wrong, but fortunately the wrongness just causes more stack being used for nothing and does not have functional impact. Another oddity is the inconsistent termination of the stack trace with ULONG_MAX. It's pointless as the number of entries is what determines the length of the stored trace. In fact quite some call sites remove the ULONG_MAX marker afterwards with or without nasty comments about it. Not all architectures do that and those which do, do it inconsistenly either conditional on nr_entries == 0 or unconditionally. The following series cleans that up by: 1) Removing the ULONG_MAX termination in the architecture code 2) Removing the ULONG_MAX fixups at the call sites 3) Providing plain storage array based interfaces for stacktrace and stackdepot. 4) Cleaning up the mess at the callsites including some related cleanups. 5) Removing the struct stack_trace based interfaces This is not changing the struct stack_trace interfaces at the architecture level, but it removes the exposure to the generic code'" * 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) x86/stacktrace: Use common infrastructure stacktrace: Provide common infrastructure lib/stackdepot: Remove obsolete functions stacktrace: Remove obsolete functions livepatch: Simplify stack trace retrieval tracing: Remove the last struct stack_trace usage tracing: Simplify stack trace retrieval tracing: Make ftrace_trace_userstack() static and conditional tracing: Use percpu stack trace buffer more intelligently tracing: Simplify stacktrace retrieval in histograms lockdep: Simplify stack trace handling lockdep: Remove save argument from check_prev_add() lockdep: Remove unused trace argument from print_circular_bug() drm: Simplify stacktrace handling dm persistent data: Simplify stack trace handling dm bufio: Simplify stack trace retrieval btrfs: ref-verify: Simplify stack trace retrieval dma/debug: Simplify stracktrace retrieval fault-inject: Simplify stacktrace retrieval mm/page_owner: Simplify stack trace handling ...
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c87
1 files changed, 38 insertions, 49 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e221be724fe8..91c6b89f04df 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
434#endif 434#endif
435} 435}
436 436
437static int save_trace(struct stack_trace *trace) 437static int save_trace(struct lock_trace *trace)
438{ 438{
439 trace->nr_entries = 0; 439 unsigned long *entries = stack_trace + nr_stack_trace_entries;
440 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 440 unsigned int max_entries;
441 trace->entries = stack_trace + nr_stack_trace_entries;
442
443 trace->skip = 3;
444
445 save_stack_trace(trace);
446
447 /*
448 * Some daft arches put -1 at the end to indicate its a full trace.
449 *
450 * <rant> this is buggy anyway, since it takes a whole extra entry so a
451 * complete trace that maxes out the entries provided will be reported
452 * as incomplete, friggin useless </rant>
453 */
454 if (trace->nr_entries != 0 &&
455 trace->entries[trace->nr_entries-1] == ULONG_MAX)
456 trace->nr_entries--;
457
458 trace->max_entries = trace->nr_entries;
459 441
442 trace->offset = nr_stack_trace_entries;
443 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
444 trace->nr_entries = stack_trace_save(entries, max_entries, 3);
460 nr_stack_trace_entries += trace->nr_entries; 445 nr_stack_trace_entries += trace->nr_entries;
461 446
462 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 447 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@ -1207,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void)
1207static int add_lock_to_list(struct lock_class *this, 1192static int add_lock_to_list(struct lock_class *this,
1208 struct lock_class *links_to, struct list_head *head, 1193 struct lock_class *links_to, struct list_head *head,
1209 unsigned long ip, int distance, 1194 unsigned long ip, int distance,
1210 struct stack_trace *trace) 1195 struct lock_trace *trace)
1211{ 1196{
1212 struct lock_list *entry; 1197 struct lock_list *entry;
1213 /* 1198 /*
@@ -1426,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
1426 * checking. 1411 * checking.
1427 */ 1412 */
1428 1413
1414static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
1415{
1416 unsigned long *entries = stack_trace + trace->offset;
1417
1418 stack_trace_print(entries, trace->nr_entries, spaces);
1419}
1420
1429/* 1421/*
1430 * Print a dependency chain entry (this is only done when a deadlock 1422 * Print a dependency chain entry (this is only done when a deadlock
1431 * has been detected): 1423 * has been detected):
@@ -1438,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
1438 printk("\n-> #%u", depth); 1430 printk("\n-> #%u", depth);
1439 print_lock_name(target->class); 1431 print_lock_name(target->class);
1440 printk(KERN_CONT ":\n"); 1432 printk(KERN_CONT ":\n");
1441 print_stack_trace(&target->trace, 6); 1433 print_lock_trace(&target->trace, 6);
1442
1443 return 0; 1434 return 0;
1444} 1435}
1445 1436
@@ -1533,10 +1524,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
1533} 1524}
1534 1525
1535static noinline int print_circular_bug(struct lock_list *this, 1526static noinline int print_circular_bug(struct lock_list *this,
1536 struct lock_list *target, 1527 struct lock_list *target,
1537 struct held_lock *check_src, 1528 struct held_lock *check_src,
1538 struct held_lock *check_tgt, 1529 struct held_lock *check_tgt)
1539 struct stack_trace *trace)
1540{ 1530{
1541 struct task_struct *curr = current; 1531 struct task_struct *curr = current;
1542 struct lock_list *parent; 1532 struct lock_list *parent;
@@ -1752,7 +1742,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
1752 1742
1753 len += printk("%*s %s", depth, "", usage_str[bit]); 1743 len += printk("%*s %s", depth, "", usage_str[bit]);
1754 len += printk(KERN_CONT " at:\n"); 1744 len += printk(KERN_CONT " at:\n");
1755 print_stack_trace(class->usage_traces + bit, len); 1745 print_lock_trace(class->usage_traces + bit, len);
1756 } 1746 }
1757 } 1747 }
1758 printk("%*s }\n", depth, ""); 1748 printk("%*s }\n", depth, "");
@@ -1777,7 +1767,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1777 do { 1767 do {
1778 print_lock_class_header(entry->class, depth); 1768 print_lock_class_header(entry->class, depth);
1779 printk("%*s ... acquired at:\n", depth, ""); 1769 printk("%*s ... acquired at:\n", depth, "");
1780 print_stack_trace(&entry->trace, 2); 1770 print_lock_trace(&entry->trace, 2);
1781 printk("\n"); 1771 printk("\n");
1782 1772
1783 if (depth == 0 && (entry != root)) { 1773 if (depth == 0 && (entry != root)) {
@@ -1890,14 +1880,14 @@ print_bad_irq_dependency(struct task_struct *curr,
1890 print_lock_name(backwards_entry->class); 1880 print_lock_name(backwards_entry->class);
1891 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 1881 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1892 1882
1893 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1883 print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
1894 1884
1895 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 1885 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1896 print_lock_name(forwards_entry->class); 1886 print_lock_name(forwards_entry->class);
1897 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 1887 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1898 pr_warn("..."); 1888 pr_warn("...");
1899 1889
1900 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1890 print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
1901 1891
1902 pr_warn("\nother info that might help us debug this:\n\n"); 1892 pr_warn("\nother info that might help us debug this:\n\n");
1903 print_irq_lock_scenario(backwards_entry, forwards_entry, 1893 print_irq_lock_scenario(backwards_entry, forwards_entry,
@@ -2170,8 +2160,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
2170 */ 2160 */
2171static int 2161static int
2172check_prev_add(struct task_struct *curr, struct held_lock *prev, 2162check_prev_add(struct task_struct *curr, struct held_lock *prev,
2173 struct held_lock *next, int distance, struct stack_trace *trace, 2163 struct held_lock *next, int distance, struct lock_trace *trace)
2174 int (*save)(struct stack_trace *trace))
2175{ 2164{
2176 struct lock_list *uninitialized_var(target_entry); 2165 struct lock_list *uninitialized_var(target_entry);
2177 struct lock_list *entry; 2166 struct lock_list *entry;
@@ -2209,15 +2198,15 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2209 this.parent = NULL; 2198 this.parent = NULL;
2210 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 2199 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
2211 if (unlikely(!ret)) { 2200 if (unlikely(!ret)) {
2212 if (!trace->entries) { 2201 if (!trace->nr_entries) {
2213 /* 2202 /*
2214 * If @save fails here, the printing might trigger 2203 * If save_trace fails here, the printing might
2215 * a WARN but because of the !nr_entries it should 2204 * trigger a WARN but because of the !nr_entries it
2216 * not do bad things. 2205 * should not do bad things.
2217 */ 2206 */
2218 save(trace); 2207 save_trace(trace);
2219 } 2208 }
2220 return print_circular_bug(&this, target_entry, next, prev, trace); 2209 return print_circular_bug(&this, target_entry, next, prev);
2221 } 2210 }
2222 else if (unlikely(ret < 0)) 2211 else if (unlikely(ret < 0))
2223 return print_bfs_bug(ret); 2212 return print_bfs_bug(ret);
@@ -2265,7 +2254,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2265 return print_bfs_bug(ret); 2254 return print_bfs_bug(ret);
2266 2255
2267 2256
2268 if (!trace->entries && !save(trace)) 2257 if (!trace->nr_entries && !save_trace(trace))
2269 return 0; 2258 return 0;
2270 2259
2271 /* 2260 /*
@@ -2297,14 +2286,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2297static int 2286static int
2298check_prevs_add(struct task_struct *curr, struct held_lock *next) 2287check_prevs_add(struct task_struct *curr, struct held_lock *next)
2299{ 2288{
2289 struct lock_trace trace = { .nr_entries = 0 };
2300 int depth = curr->lockdep_depth; 2290 int depth = curr->lockdep_depth;
2301 struct held_lock *hlock; 2291 struct held_lock *hlock;
2302 struct stack_trace trace = {
2303 .nr_entries = 0,
2304 .max_entries = 0,
2305 .entries = NULL,
2306 .skip = 0,
2307 };
2308 2292
2309 /* 2293 /*
2310 * Debugging checks. 2294 * Debugging checks.
@@ -2330,7 +2314,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2330 * added: 2314 * added:
2331 */ 2315 */
2332 if (hlock->read != 2 && hlock->check) { 2316 if (hlock->read != 2 && hlock->check) {
2333 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); 2317 int ret = check_prev_add(curr, hlock, next, distance,
2318 &trace);
2334 if (!ret) 2319 if (!ret)
2335 return 0; 2320 return 0;
2336 2321
@@ -2731,6 +2716,10 @@ static inline int validate_chain(struct task_struct *curr,
2731{ 2716{
2732 return 1; 2717 return 1;
2733} 2718}
2719
2720static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
2721{
2722}
2734#endif 2723#endif
2735 2724
2736/* 2725/*
@@ -2827,7 +2816,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2827 print_lock(this); 2816 print_lock(this);
2828 2817
2829 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 2818 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2830 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2819 print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2831 2820
2832 print_irqtrace_events(curr); 2821 print_irqtrace_events(curr);
2833 pr_warn("\nother info that might help us debug this:\n"); 2822 pr_warn("\nother info that might help us debug this:\n");