aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c377
1 files changed, 232 insertions, 145 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 34cdcbedda49..d06190fa5082 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
434#endif 434#endif
435} 435}
436 436
437static int save_trace(struct stack_trace *trace) 437static int save_trace(struct lock_trace *trace)
438{ 438{
439 trace->nr_entries = 0; 439 unsigned long *entries = stack_trace + nr_stack_trace_entries;
440 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 440 unsigned int max_entries;
441 trace->entries = stack_trace + nr_stack_trace_entries;
442
443 trace->skip = 3;
444
445 save_stack_trace(trace);
446
447 /*
448 * Some daft arches put -1 at the end to indicate its a full trace.
449 *
450 * <rant> this is buggy anyway, since it takes a whole extra entry so a
451 * complete trace that maxes out the entries provided will be reported
452 * as incomplete, friggin useless </rant>
453 */
454 if (trace->nr_entries != 0 &&
455 trace->entries[trace->nr_entries-1] == ULONG_MAX)
456 trace->nr_entries--;
457
458 trace->max_entries = trace->nr_entries;
459 441
442 trace->offset = nr_stack_trace_entries;
443 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
444 trace->nr_entries = stack_trace_save(entries, max_entries, 3);
460 nr_stack_trace_entries += trace->nr_entries; 445 nr_stack_trace_entries += trace->nr_entries;
461 446
462 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 447 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@ -516,11 +501,11 @@ static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
516{ 501{
517 char c = '.'; 502 char c = '.';
518 503
519 if (class->usage_mask & lock_flag(bit + 2)) 504 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
520 c = '+'; 505 c = '+';
521 if (class->usage_mask & lock_flag(bit)) { 506 if (class->usage_mask & lock_flag(bit)) {
522 c = '-'; 507 c = '-';
523 if (class->usage_mask & lock_flag(bit + 2)) 508 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
524 c = '?'; 509 c = '?';
525 } 510 }
526 511
@@ -649,6 +634,9 @@ static int static_obj(const void *obj)
649 end = (unsigned long) &_end, 634 end = (unsigned long) &_end,
650 addr = (unsigned long) obj; 635 addr = (unsigned long) obj;
651 636
637 if (arch_is_kernel_initmem_freed(addr))
638 return 0;
639
652 /* 640 /*
653 * static variable? 641 * static variable?
654 */ 642 */
@@ -1207,7 +1195,7 @@ static struct lock_list *alloc_list_entry(void)
1207static int add_lock_to_list(struct lock_class *this, 1195static int add_lock_to_list(struct lock_class *this,
1208 struct lock_class *links_to, struct list_head *head, 1196 struct lock_class *links_to, struct list_head *head,
1209 unsigned long ip, int distance, 1197 unsigned long ip, int distance,
1210 struct stack_trace *trace) 1198 struct lock_trace *trace)
1211{ 1199{
1212 struct lock_list *entry; 1200 struct lock_list *entry;
1213 /* 1201 /*
@@ -1426,6 +1414,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
1426 * checking. 1414 * checking.
1427 */ 1415 */
1428 1416
1417static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
1418{
1419 unsigned long *entries = stack_trace + trace->offset;
1420
1421 stack_trace_print(entries, trace->nr_entries, spaces);
1422}
1423
1429/* 1424/*
1430 * Print a dependency chain entry (this is only done when a deadlock 1425 * Print a dependency chain entry (this is only done when a deadlock
1431 * has been detected): 1426 * has been detected):
@@ -1438,8 +1433,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
1438 printk("\n-> #%u", depth); 1433 printk("\n-> #%u", depth);
1439 print_lock_name(target->class); 1434 print_lock_name(target->class);
1440 printk(KERN_CONT ":\n"); 1435 printk(KERN_CONT ":\n");
1441 print_stack_trace(&target->trace, 6); 1436 print_lock_trace(&target->trace, 6);
1442
1443 return 0; 1437 return 0;
1444} 1438}
1445 1439
@@ -1533,10 +1527,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
1533} 1527}
1534 1528
1535static noinline int print_circular_bug(struct lock_list *this, 1529static noinline int print_circular_bug(struct lock_list *this,
1536 struct lock_list *target, 1530 struct lock_list *target,
1537 struct held_lock *check_src, 1531 struct held_lock *check_src,
1538 struct held_lock *check_tgt, 1532 struct held_lock *check_tgt)
1539 struct stack_trace *trace)
1540{ 1533{
1541 struct task_struct *curr = current; 1534 struct task_struct *curr = current;
1542 struct lock_list *parent; 1535 struct lock_list *parent;
@@ -1676,19 +1669,25 @@ check_redundant(struct lock_list *root, struct lock_class *target,
1676} 1669}
1677 1670
1678#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1671#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1672
1673static inline int usage_accumulate(struct lock_list *entry, void *mask)
1674{
1675 *(unsigned long *)mask |= entry->class->usage_mask;
1676
1677 return 0;
1678}
1679
1679/* 1680/*
1680 * Forwards and backwards subgraph searching, for the purposes of 1681 * Forwards and backwards subgraph searching, for the purposes of
1681 * proving that two subgraphs can be connected by a new dependency 1682 * proving that two subgraphs can be connected by a new dependency
1682 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1683 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1683 */ 1684 */
1684 1685
1685static inline int usage_match(struct lock_list *entry, void *bit) 1686static inline int usage_match(struct lock_list *entry, void *mask)
1686{ 1687{
1687 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1688 return entry->class->usage_mask & *(unsigned long *)mask;
1688} 1689}
1689 1690
1690
1691
1692/* 1691/*
1693 * Find a node in the forwards-direction dependency sub-graph starting 1692 * Find a node in the forwards-direction dependency sub-graph starting
1694 * at @root->class that matches @bit. 1693 * at @root->class that matches @bit.
@@ -1700,14 +1699,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
1700 * Return <0 on error. 1699 * Return <0 on error.
1701 */ 1700 */
1702static int 1701static int
1703find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, 1702find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
1704 struct lock_list **target_entry) 1703 struct lock_list **target_entry)
1705{ 1704{
1706 int result; 1705 int result;
1707 1706
1708 debug_atomic_inc(nr_find_usage_forwards_checks); 1707 debug_atomic_inc(nr_find_usage_forwards_checks);
1709 1708
1710 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); 1709 result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
1711 1710
1712 return result; 1711 return result;
1713} 1712}
@@ -1723,14 +1722,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1723 * Return <0 on error. 1722 * Return <0 on error.
1724 */ 1723 */
1725static int 1724static int
1726find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, 1725find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
1727 struct lock_list **target_entry) 1726 struct lock_list **target_entry)
1728{ 1727{
1729 int result; 1728 int result;
1730 1729
1731 debug_atomic_inc(nr_find_usage_backwards_checks); 1730 debug_atomic_inc(nr_find_usage_backwards_checks);
1732 1731
1733 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); 1732 result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
1734 1733
1735 return result; 1734 return result;
1736} 1735}
@@ -1752,7 +1751,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
1752 1751
1753 len += printk("%*s %s", depth, "", usage_str[bit]); 1752 len += printk("%*s %s", depth, "", usage_str[bit]);
1754 len += printk(KERN_CONT " at:\n"); 1753 len += printk(KERN_CONT " at:\n");
1755 print_stack_trace(class->usage_traces + bit, len); 1754 print_lock_trace(class->usage_traces + bit, len);
1756 } 1755 }
1757 } 1756 }
1758 printk("%*s }\n", depth, ""); 1757 printk("%*s }\n", depth, "");
@@ -1777,7 +1776,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1777 do { 1776 do {
1778 print_lock_class_header(entry->class, depth); 1777 print_lock_class_header(entry->class, depth);
1779 printk("%*s ... acquired at:\n", depth, ""); 1778 printk("%*s ... acquired at:\n", depth, "");
1780 print_stack_trace(&entry->trace, 2); 1779 print_lock_trace(&entry->trace, 2);
1781 printk("\n"); 1780 printk("\n");
1782 1781
1783 if (depth == 0 && (entry != root)) { 1782 if (depth == 0 && (entry != root)) {
@@ -1890,14 +1889,14 @@ print_bad_irq_dependency(struct task_struct *curr,
1890 print_lock_name(backwards_entry->class); 1889 print_lock_name(backwards_entry->class);
1891 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 1890 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1892 1891
1893 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1892 print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
1894 1893
1895 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 1894 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1896 print_lock_name(forwards_entry->class); 1895 print_lock_name(forwards_entry->class);
1897 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 1896 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1898 pr_warn("..."); 1897 pr_warn("...");
1899 1898
1900 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1899 print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
1901 1900
1902 pr_warn("\nother info that might help us debug this:\n\n"); 1901 pr_warn("\nother info that might help us debug this:\n\n");
1903 print_irq_lock_scenario(backwards_entry, forwards_entry, 1902 print_irq_lock_scenario(backwards_entry, forwards_entry,
@@ -1922,39 +1921,6 @@ print_bad_irq_dependency(struct task_struct *curr,
1922 return 0; 1921 return 0;
1923} 1922}
1924 1923
1925static int
1926check_usage(struct task_struct *curr, struct held_lock *prev,
1927 struct held_lock *next, enum lock_usage_bit bit_backwards,
1928 enum lock_usage_bit bit_forwards, const char *irqclass)
1929{
1930 int ret;
1931 struct lock_list this, that;
1932 struct lock_list *uninitialized_var(target_entry);
1933 struct lock_list *uninitialized_var(target_entry1);
1934
1935 this.parent = NULL;
1936
1937 this.class = hlock_class(prev);
1938 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1939 if (ret < 0)
1940 return print_bfs_bug(ret);
1941 if (ret == 1)
1942 return ret;
1943
1944 that.parent = NULL;
1945 that.class = hlock_class(next);
1946 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1947 if (ret < 0)
1948 return print_bfs_bug(ret);
1949 if (ret == 1)
1950 return ret;
1951
1952 return print_bad_irq_dependency(curr, &this, &that,
1953 target_entry, target_entry1,
1954 prev, next,
1955 bit_backwards, bit_forwards, irqclass);
1956}
1957
1958static const char *state_names[] = { 1924static const char *state_names[] = {
1959#define LOCKDEP_STATE(__STATE) \ 1925#define LOCKDEP_STATE(__STATE) \
1960 __stringify(__STATE), 1926 __stringify(__STATE),
@@ -1971,9 +1937,19 @@ static const char *state_rnames[] = {
1971 1937
1972static inline const char *state_name(enum lock_usage_bit bit) 1938static inline const char *state_name(enum lock_usage_bit bit)
1973{ 1939{
1974 return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2]; 1940 if (bit & LOCK_USAGE_READ_MASK)
1941 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
1942 else
1943 return state_names[bit >> LOCK_USAGE_DIR_MASK];
1975} 1944}
1976 1945
1946/*
1947 * The bit number is encoded like:
1948 *
1949 * bit0: 0 exclusive, 1 read lock
1950 * bit1: 0 used in irq, 1 irq enabled
1951 * bit2-n: state
1952 */
1977static int exclusive_bit(int new_bit) 1953static int exclusive_bit(int new_bit)
1978{ 1954{
1979 int state = new_bit & LOCK_USAGE_STATE_MASK; 1955 int state = new_bit & LOCK_USAGE_STATE_MASK;
@@ -1985,45 +1961,160 @@ static int exclusive_bit(int new_bit)
1985 return state | (dir ^ LOCK_USAGE_DIR_MASK); 1961 return state | (dir ^ LOCK_USAGE_DIR_MASK);
1986} 1962}
1987 1963
1964/*
1965 * Observe that when given a bitmask where each bitnr is encoded as above, a
1966 * right shift of the mask transforms the individual bitnrs as -1 and
1967 * conversely, a left shift transforms into +1 for the individual bitnrs.
1968 *
1969 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
1970 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
1971 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
1972 *
1973 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
1974 *
1975 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
1976 * all bits set) and recompose with bitnr1 flipped.
1977 */
1978static unsigned long invert_dir_mask(unsigned long mask)
1979{
1980 unsigned long excl = 0;
1981
1982 /* Invert dir */
1983 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
1984 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
1985
1986 return excl;
1987}
1988
1989/*
1990 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
1991 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
1992 * And then mask out all bitnr0.
1993 */
1994static unsigned long exclusive_mask(unsigned long mask)
1995{
1996 unsigned long excl = invert_dir_mask(mask);
1997
1998 /* Strip read */
1999 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2000 excl &= ~LOCKF_IRQ_READ;
2001
2002 return excl;
2003}
2004
2005/*
2006 * Retrieve the _possible_ original mask to which @mask is
2007 * exclusive. Ie: this is the opposite of exclusive_mask().
2008 * Note that 2 possible original bits can match an exclusive
2009 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2010 * cleared. So both are returned for each exclusive bit.
2011 */
2012static unsigned long original_mask(unsigned long mask)
2013{
2014 unsigned long excl = invert_dir_mask(mask);
2015
2016 /* Include read in existing usages */
2017 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2018
2019 return excl;
2020}
2021
2022/*
2023 * Find the first pair of bit match between an original
2024 * usage mask and an exclusive usage mask.
2025 */
2026static int find_exclusive_match(unsigned long mask,
2027 unsigned long excl_mask,
2028 enum lock_usage_bit *bitp,
2029 enum lock_usage_bit *excl_bitp)
2030{
2031 int bit, excl;
2032
2033 for_each_set_bit(bit, &mask, LOCK_USED) {
2034 excl = exclusive_bit(bit);
2035 if (excl_mask & lock_flag(excl)) {
2036 *bitp = bit;
2037 *excl_bitp = excl;
2038 return 0;
2039 }
2040 }
2041 return -1;
2042}
2043
2044/*
2045 * Prove that the new dependency does not connect a hardirq-safe(-read)
2046 * lock with a hardirq-unsafe lock - to achieve this we search
2047 * the backwards-subgraph starting at <prev>, and the
2048 * forwards-subgraph starting at <next>:
2049 */
1988static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 2050static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1989 struct held_lock *next, enum lock_usage_bit bit) 2051 struct held_lock *next)
1990{ 2052{
2053 unsigned long usage_mask = 0, forward_mask, backward_mask;
2054 enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2055 struct lock_list *uninitialized_var(target_entry1);
2056 struct lock_list *uninitialized_var(target_entry);
2057 struct lock_list this, that;
2058 int ret;
2059
1991 /* 2060 /*
1992 * Prove that the new dependency does not connect a hardirq-safe 2061 * Step 1: gather all hard/soft IRQs usages backward in an
1993 * lock with a hardirq-unsafe lock - to achieve this we search 2062 * accumulated usage mask.
1994 * the backwards-subgraph starting at <prev>, and the
1995 * forwards-subgraph starting at <next>:
1996 */ 2063 */
1997 if (!check_usage(curr, prev, next, bit, 2064 this.parent = NULL;
1998 exclusive_bit(bit), state_name(bit))) 2065 this.class = hlock_class(prev);
1999 return 0; 2066
2067 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2068 if (ret < 0)
2069 return print_bfs_bug(ret);
2000 2070
2001 bit++; /* _READ */ 2071 usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2072 if (!usage_mask)
2073 return 1;
2002 2074
2003 /* 2075 /*
2004 * Prove that the new dependency does not connect a hardirq-safe-read 2076 * Step 2: find exclusive uses forward that match the previous
2005 * lock with a hardirq-unsafe lock - to achieve this we search 2077 * backward accumulated mask.
2006 * the backwards-subgraph starting at <prev>, and the
2007 * forwards-subgraph starting at <next>:
2008 */ 2078 */
2009 if (!check_usage(curr, prev, next, bit, 2079 forward_mask = exclusive_mask(usage_mask);
2010 exclusive_bit(bit), state_name(bit)))
2011 return 0;
2012 2080
2013 return 1; 2081 that.parent = NULL;
2014} 2082 that.class = hlock_class(next);
2015 2083
2016static int 2084 ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2017check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 2085 if (ret < 0)
2018 struct held_lock *next) 2086 return print_bfs_bug(ret);
2019{ 2087 if (ret == 1)
2020#define LOCKDEP_STATE(__STATE) \ 2088 return ret;
2021 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
2022 return 0;
2023#include "lockdep_states.h"
2024#undef LOCKDEP_STATE
2025 2089
2026 return 1; 2090 /*
2091 * Step 3: we found a bad match! Now retrieve a lock from the backward
2092 * list whose usage mask matches the exclusive usage mask from the
2093 * lock found on the forward list.
2094 */
2095 backward_mask = original_mask(target_entry1->class->usage_mask);
2096
2097 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2098 if (ret < 0)
2099 return print_bfs_bug(ret);
2100 if (DEBUG_LOCKS_WARN_ON(ret == 1))
2101 return 1;
2102
2103 /*
2104 * Step 4: narrow down to a pair of incompatible usage bits
2105 * and report it.
2106 */
2107 ret = find_exclusive_match(target_entry->class->usage_mask,
2108 target_entry1->class->usage_mask,
2109 &backward_bit, &forward_bit);
2110 if (DEBUG_LOCKS_WARN_ON(ret == -1))
2111 return 1;
2112
2113 return print_bad_irq_dependency(curr, &this, &that,
2114 target_entry, target_entry1,
2115 prev, next,
2116 backward_bit, forward_bit,
2117 state_name(backward_bit));
2027} 2118}
2028 2119
2029static void inc_chains(void) 2120static void inc_chains(void)
@@ -2040,9 +2131,8 @@ static void inc_chains(void)
2040 2131
2041#else 2132#else
2042 2133
2043static inline int 2134static inline int check_irq_usage(struct task_struct *curr,
2044check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 2135 struct held_lock *prev, struct held_lock *next)
2045 struct held_lock *next)
2046{ 2136{
2047 return 1; 2137 return 1;
2048} 2138}
@@ -2170,8 +2260,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
2170 */ 2260 */
2171static int 2261static int
2172check_prev_add(struct task_struct *curr, struct held_lock *prev, 2262check_prev_add(struct task_struct *curr, struct held_lock *prev,
2173 struct held_lock *next, int distance, struct stack_trace *trace, 2263 struct held_lock *next, int distance, struct lock_trace *trace)
2174 int (*save)(struct stack_trace *trace))
2175{ 2264{
2176 struct lock_list *uninitialized_var(target_entry); 2265 struct lock_list *uninitialized_var(target_entry);
2177 struct lock_list *entry; 2266 struct lock_list *entry;
@@ -2209,20 +2298,20 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2209 this.parent = NULL; 2298 this.parent = NULL;
2210 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 2299 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
2211 if (unlikely(!ret)) { 2300 if (unlikely(!ret)) {
2212 if (!trace->entries) { 2301 if (!trace->nr_entries) {
2213 /* 2302 /*
2214 * If @save fails here, the printing might trigger 2303 * If save_trace fails here, the printing might
2215 * a WARN but because of the !nr_entries it should 2304 * trigger a WARN but because of the !nr_entries it
2216 * not do bad things. 2305 * should not do bad things.
2217 */ 2306 */
2218 save(trace); 2307 save_trace(trace);
2219 } 2308 }
2220 return print_circular_bug(&this, target_entry, next, prev, trace); 2309 return print_circular_bug(&this, target_entry, next, prev);
2221 } 2310 }
2222 else if (unlikely(ret < 0)) 2311 else if (unlikely(ret < 0))
2223 return print_bfs_bug(ret); 2312 return print_bfs_bug(ret);
2224 2313
2225 if (!check_prev_add_irq(curr, prev, next)) 2314 if (!check_irq_usage(curr, prev, next))
2226 return 0; 2315 return 0;
2227 2316
2228 /* 2317 /*
@@ -2265,7 +2354,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2265 return print_bfs_bug(ret); 2354 return print_bfs_bug(ret);
2266 2355
2267 2356
2268 if (!trace->entries && !save(trace)) 2357 if (!trace->nr_entries && !save_trace(trace))
2269 return 0; 2358 return 0;
2270 2359
2271 /* 2360 /*
@@ -2297,14 +2386,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2297static int 2386static int
2298check_prevs_add(struct task_struct *curr, struct held_lock *next) 2387check_prevs_add(struct task_struct *curr, struct held_lock *next)
2299{ 2388{
2389 struct lock_trace trace = { .nr_entries = 0 };
2300 int depth = curr->lockdep_depth; 2390 int depth = curr->lockdep_depth;
2301 struct held_lock *hlock; 2391 struct held_lock *hlock;
2302 struct stack_trace trace = {
2303 .nr_entries = 0,
2304 .max_entries = 0,
2305 .entries = NULL,
2306 .skip = 0,
2307 };
2308 2392
2309 /* 2393 /*
2310 * Debugging checks. 2394 * Debugging checks.
@@ -2330,7 +2414,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2330 * added: 2414 * added:
2331 */ 2415 */
2332 if (hlock->read != 2 && hlock->check) { 2416 if (hlock->read != 2 && hlock->check) {
2333 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); 2417 int ret = check_prev_add(curr, hlock, next, distance,
2418 &trace);
2334 if (!ret) 2419 if (!ret)
2335 return 0; 2420 return 0;
2336 2421
@@ -2731,6 +2816,10 @@ static inline int validate_chain(struct task_struct *curr,
2731{ 2816{
2732 return 1; 2817 return 1;
2733} 2818}
2819
2820static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
2821{
2822}
2734#endif 2823#endif
2735 2824
2736/* 2825/*
@@ -2784,6 +2873,12 @@ static void check_chain_key(struct task_struct *curr)
2784#endif 2873#endif
2785} 2874}
2786 2875
2876static int mark_lock(struct task_struct *curr, struct held_lock *this,
2877 enum lock_usage_bit new_bit);
2878
2879#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2880
2881
2787static void 2882static void
2788print_usage_bug_scenario(struct held_lock *lock) 2883print_usage_bug_scenario(struct held_lock *lock)
2789{ 2884{
@@ -2827,7 +2922,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2827 print_lock(this); 2922 print_lock(this);
2828 2923
2829 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 2924 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2830 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2925 print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2831 2926
2832 print_irqtrace_events(curr); 2927 print_irqtrace_events(curr);
2833 pr_warn("\nother info that might help us debug this:\n"); 2928 pr_warn("\nother info that might help us debug this:\n");
@@ -2853,10 +2948,6 @@ valid_state(struct task_struct *curr, struct held_lock *this,
2853 return 1; 2948 return 1;
2854} 2949}
2855 2950
2856static int mark_lock(struct task_struct *curr, struct held_lock *this,
2857 enum lock_usage_bit new_bit);
2858
2859#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2860 2951
2861/* 2952/*
2862 * print irq inversion bug: 2953 * print irq inversion bug:
@@ -2936,7 +3027,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2936 3027
2937 root.parent = NULL; 3028 root.parent = NULL;
2938 root.class = hlock_class(this); 3029 root.class = hlock_class(this);
2939 ret = find_usage_forwards(&root, bit, &target_entry); 3030 ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
2940 if (ret < 0) 3031 if (ret < 0)
2941 return print_bfs_bug(ret); 3032 return print_bfs_bug(ret);
2942 if (ret == 1) 3033 if (ret == 1)
@@ -2960,7 +3051,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2960 3051
2961 root.parent = NULL; 3052 root.parent = NULL;
2962 root.class = hlock_class(this); 3053 root.class = hlock_class(this);
2963 ret = find_usage_backwards(&root, bit, &target_entry); 3054 ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
2964 if (ret < 0) 3055 if (ret < 0)
2965 return print_bfs_bug(ret); 3056 return print_bfs_bug(ret);
2966 if (ret == 1) 3057 if (ret == 1)
@@ -3015,7 +3106,7 @@ static int (*state_verbose_f[])(struct lock_class *class) = {
3015static inline int state_verbose(enum lock_usage_bit bit, 3106static inline int state_verbose(enum lock_usage_bit bit,
3016 struct lock_class *class) 3107 struct lock_class *class)
3017{ 3108{
3018 return state_verbose_f[bit >> 2](class); 3109 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
3019} 3110}
3020 3111
3021typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 3112typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
@@ -3157,7 +3248,7 @@ void lockdep_hardirqs_on(unsigned long ip)
3157 /* 3248 /*
3158 * See the fine text that goes along with this variable definition. 3249 * See the fine text that goes along with this variable definition.
3159 */ 3250 */
3160 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 3251 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3161 return; 3252 return;
3162 3253
3163 /* 3254 /*
@@ -4689,8 +4780,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4689 return; 4780 return;
4690 4781
4691 raw_local_irq_save(flags); 4782 raw_local_irq_save(flags);
4692 if (!graph_lock()) 4783 arch_spin_lock(&lockdep_lock);
4693 goto out_irq; 4784 current->lockdep_recursion = 1;
4694 4785
4695 /* closed head */ 4786 /* closed head */
4696 pf = delayed_free.pf + (delayed_free.index ^ 1); 4787 pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4793,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4702 */ 4793 */
4703 call_rcu_zapped(delayed_free.pf + delayed_free.index); 4794 call_rcu_zapped(delayed_free.pf + delayed_free.index);
4704 4795
4705 graph_unlock(); 4796 current->lockdep_recursion = 0;
4706out_irq: 4797 arch_spin_unlock(&lockdep_lock);
4707 raw_local_irq_restore(flags); 4798 raw_local_irq_restore(flags);
4708} 4799}
4709 4800
@@ -4744,21 +4835,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
4744{ 4835{
4745 struct pending_free *pf; 4836 struct pending_free *pf;
4746 unsigned long flags; 4837 unsigned long flags;
4747 int locked;
4748 4838
4749 init_data_structures_once(); 4839 init_data_structures_once();
4750 4840
4751 raw_local_irq_save(flags); 4841 raw_local_irq_save(flags);
4752 locked = graph_lock(); 4842 arch_spin_lock(&lockdep_lock);
4753 if (!locked) 4843 current->lockdep_recursion = 1;
4754 goto out_irq;
4755
4756 pf = get_pending_free(); 4844 pf = get_pending_free();
4757 __lockdep_free_key_range(pf, start, size); 4845 __lockdep_free_key_range(pf, start, size);
4758 call_rcu_zapped(pf); 4846 call_rcu_zapped(pf);
4759 4847 current->lockdep_recursion = 0;
4760 graph_unlock(); 4848 arch_spin_unlock(&lockdep_lock);
4761out_irq:
4762 raw_local_irq_restore(flags); 4849 raw_local_irq_restore(flags);
4763 4850
4764 /* 4851 /*