diff options
author | Yuyang Du <duyuyang@gmail.com> | 2019-05-06 04:19:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-06-03 05:55:50 -0400 |
commit | 8c2c2b449aa50463ba4cc1f33cdfc98750ed03ab (patch) | |
tree | 086296a29b9ac514854a0fdff26819243a9cf42a /kernel/locking | |
parent | b4adfe8e05f15d7e73309c93c2c337df7eb5278f (diff) |
locking/lockdep: Refactorize check_noncircular and check_redundant
These two functions now handle different check results themselves. A new
check_path function is added to check whether there is a path in the
dependency graph. No functional change.
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bvanassche@acm.org
Cc: frederic@kernel.org
Cc: ming.lei@redhat.com
Cc: will.deacon@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-20-duyuyang@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 118 |
1 files changed, 74 insertions, 44 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8169706df767..30a1c0e32573 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
1683 | } | 1683 | } |
1684 | 1684 | ||
1685 | /* | 1685 | /* |
1686 | * Prove that the dependency graph starting at <entry> can not | 1686 | * Check that the dependency graph starting at <src> can lead to |
1687 | * lead to <target>. Print an error and return 0 if it does. | 1687 | * <target> or not. Print an error and return 0 if it does. |
1688 | */ | 1688 | */ |
1689 | static noinline int | 1689 | static noinline int |
1690 | check_noncircular(struct lock_list *root, struct lock_class *target, | 1690 | check_path(struct lock_class *target, struct lock_list *src_entry, |
1691 | struct lock_list **target_entry) | 1691 | struct lock_list **target_entry) |
1692 | { | 1692 | { |
1693 | int result; | 1693 | int ret; |
1694 | |||
1695 | ret = __bfs_forwards(src_entry, (void *)target, class_equal, | ||
1696 | target_entry); | ||
1697 | |||
1698 | if (unlikely(ret < 0)) | ||
1699 | print_bfs_bug(ret); | ||
1700 | |||
1701 | return ret; | ||
1702 | } | ||
1703 | |||
1704 | /* | ||
1705 | * Prove that the dependency graph starting at <src> can not | ||
1706 | * lead to <target>. If it can, there is a circle when adding | ||
1707 | * <target> -> <src> dependency. | ||
1708 | * | ||
1709 | * Print an error and return 0 if it does. | ||
1710 | */ | ||
1711 | static noinline int | ||
1712 | check_noncircular(struct held_lock *src, struct held_lock *target, | ||
1713 | struct lock_trace *trace) | ||
1714 | { | ||
1715 | int ret; | ||
1716 | struct lock_list *uninitialized_var(target_entry); | ||
1717 | struct lock_list src_entry = { | ||
1718 | .class = hlock_class(src), | ||
1719 | .parent = NULL, | ||
1720 | }; | ||
1694 | 1721 | ||
1695 | debug_atomic_inc(nr_cyclic_checks); | 1722 | debug_atomic_inc(nr_cyclic_checks); |
1696 | 1723 | ||
1697 | result = __bfs_forwards(root, target, class_equal, target_entry); | 1724 | ret = check_path(hlock_class(target), &src_entry, &target_entry); |
1698 | 1725 | ||
1699 | return result; | 1726 | if (unlikely(!ret)) { |
1727 | if (!trace->nr_entries) { | ||
1728 | /* | ||
1729 | * If save_trace fails here, the printing might | ||
1730 | * trigger a WARN but because of the !nr_entries it | ||
1731 | * should not do bad things. | ||
1732 | */ | ||
1733 | save_trace(trace); | ||
1734 | } | ||
1735 | |||
1736 | print_circular_bug(&src_entry, target_entry, src, target); | ||
1737 | } | ||
1738 | |||
1739 | return ret; | ||
1700 | } | 1740 | } |
1701 | 1741 | ||
1742 | /* | ||
1743 | * Check that the dependency graph starting at <src> can lead to | ||
1744 | * <target> or not. If it can, <src> -> <target> dependency is already | ||
1745 | * in the graph. | ||
1746 | * | ||
1747 | * Print an error and return 2 if it does or 1 if it does not. | ||
1748 | */ | ||
1702 | static noinline int | 1749 | static noinline int |
1703 | check_redundant(struct lock_list *root, struct lock_class *target, | 1750 | check_redundant(struct held_lock *src, struct held_lock *target) |
1704 | struct lock_list **target_entry) | ||
1705 | { | 1751 | { |
1706 | int result; | 1752 | int ret; |
1753 | struct lock_list *uninitialized_var(target_entry); | ||
1754 | struct lock_list src_entry = { | ||
1755 | .class = hlock_class(src), | ||
1756 | .parent = NULL, | ||
1757 | }; | ||
1707 | 1758 | ||
1708 | debug_atomic_inc(nr_redundant_checks); | 1759 | debug_atomic_inc(nr_redundant_checks); |
1709 | 1760 | ||
1710 | result = __bfs_forwards(root, target, class_equal, target_entry); | 1761 | ret = check_path(hlock_class(target), &src_entry, &target_entry); |
1711 | 1762 | ||
1712 | return result; | 1763 | if (!ret) { |
1764 | debug_atomic_inc(nr_redundant); | ||
1765 | ret = 2; | ||
1766 | } else if (ret < 0) | ||
1767 | ret = 0; | ||
1768 | |||
1769 | return ret; | ||
1713 | } | 1770 | } |
1714 | 1771 | ||
1715 | #ifdef CONFIG_TRACE_IRQFLAGS | 1772 | #ifdef CONFIG_TRACE_IRQFLAGS |
@@ -2307,9 +2364,7 @@ static int | |||
2307 | check_prev_add(struct task_struct *curr, struct held_lock *prev, | 2364 | check_prev_add(struct task_struct *curr, struct held_lock *prev, |
2308 | struct held_lock *next, int distance, struct lock_trace *trace) | 2365 | struct held_lock *next, int distance, struct lock_trace *trace) |
2309 | { | 2366 | { |
2310 | struct lock_list *uninitialized_var(target_entry); | ||
2311 | struct lock_list *entry; | 2367 | struct lock_list *entry; |
2312 | struct lock_list this; | ||
2313 | int ret; | 2368 | int ret; |
2314 | 2369 | ||
2315 | if (!hlock_class(prev)->key || !hlock_class(next)->key) { | 2370 | if (!hlock_class(prev)->key || !hlock_class(next)->key) { |
@@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
2340 | * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes | 2395 | * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes |
2341 | * in the graph whose neighbours are to be checked. | 2396 | * in the graph whose neighbours are to be checked. |
2342 | */ | 2397 | */ |
2343 | this.class = hlock_class(next); | 2398 | ret = check_noncircular(next, prev, trace); |
2344 | this.parent = NULL; | 2399 | if (unlikely(ret <= 0)) |
2345 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); | ||
2346 | if (unlikely(!ret)) { | ||
2347 | if (!trace->nr_entries) { | ||
2348 | /* | ||
2349 | * If save_trace fails here, the printing might | ||
2350 | * trigger a WARN but because of the !nr_entries it | ||
2351 | * should not do bad things. | ||
2352 | */ | ||
2353 | save_trace(trace); | ||
2354 | } | ||
2355 | print_circular_bug(&this, target_entry, next, prev); | ||
2356 | return 0; | 2400 | return 0; |
2357 | } | ||
2358 | else if (unlikely(ret < 0)) { | ||
2359 | print_bfs_bug(ret); | ||
2360 | return 0; | ||
2361 | } | ||
2362 | 2401 | ||
2363 | if (!check_irq_usage(curr, prev, next)) | 2402 | if (!check_irq_usage(curr, prev, next)) |
2364 | return 0; | 2403 | return 0; |
@@ -2392,18 +2431,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
2392 | /* | 2431 | /* |
2393 | * Is the <prev> -> <next> link redundant? | 2432 | * Is the <prev> -> <next> link redundant? |
2394 | */ | 2433 | */ |
2395 | this.class = hlock_class(prev); | 2434 | ret = check_redundant(prev, next); |
2396 | this.parent = NULL; | 2435 | if (ret != 1) |
2397 | ret = check_redundant(&this, hlock_class(next), &target_entry); | 2436 | return ret; |
2398 | if (!ret) { | ||
2399 | debug_atomic_inc(nr_redundant); | ||
2400 | return 2; | ||
2401 | } | ||
2402 | if (ret < 0) { | ||
2403 | print_bfs_bug(ret); | ||
2404 | return 0; | ||
2405 | } | ||
2406 | |||
2407 | 2437 | ||
2408 | if (!trace->nr_entries && !save_trace(trace)) | 2438 | if (!trace->nr_entries && !save_trace(trace)) |
2409 | return 0; | 2439 | return 0; |