aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c267
1 files changed, 183 insertions, 84 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 91c6b89f04df..27b992fe8cec 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -501,11 +501,11 @@ static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
501{ 501{
502 char c = '.'; 502 char c = '.';
503 503
504 if (class->usage_mask & lock_flag(bit + 2)) 504 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
505 c = '+'; 505 c = '+';
506 if (class->usage_mask & lock_flag(bit)) { 506 if (class->usage_mask & lock_flag(bit)) {
507 c = '-'; 507 c = '-';
508 if (class->usage_mask & lock_flag(bit + 2)) 508 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
509 c = '?'; 509 c = '?';
510 } 510 }
511 511
@@ -1666,19 +1666,25 @@ check_redundant(struct lock_list *root, struct lock_class *target,
1666} 1666}
1667 1667
1668#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1668#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1669
1670static inline int usage_accumulate(struct lock_list *entry, void *mask)
1671{
1672 *(unsigned long *)mask |= entry->class->usage_mask;
1673
1674 return 0;
1675}
1676
1669/* 1677/*
1670 * Forwards and backwards subgraph searching, for the purposes of 1678 * Forwards and backwards subgraph searching, for the purposes of
1671 * proving that two subgraphs can be connected by a new dependency 1679 * proving that two subgraphs can be connected by a new dependency
1672 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1680 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1673 */ 1681 */
1674 1682
1675static inline int usage_match(struct lock_list *entry, void *bit) 1683static inline int usage_match(struct lock_list *entry, void *mask)
1676{ 1684{
1677 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1685 return entry->class->usage_mask & *(unsigned long *)mask;
1678} 1686}
1679 1687
1680
1681
1682/* 1688/*
1683 * Find a node in the forwards-direction dependency sub-graph starting 1689 * Find a node in the forwards-direction dependency sub-graph starting
1684 * at @root->class that matches @bit. 1690 * at @root->class that matches @bit.
@@ -1690,14 +1696,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
1690 * Return <0 on error. 1696 * Return <0 on error.
1691 */ 1697 */
1692static int 1698static int
1693find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, 1699find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
1694 struct lock_list **target_entry) 1700 struct lock_list **target_entry)
1695{ 1701{
1696 int result; 1702 int result;
1697 1703
1698 debug_atomic_inc(nr_find_usage_forwards_checks); 1704 debug_atomic_inc(nr_find_usage_forwards_checks);
1699 1705
1700 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); 1706 result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
1701 1707
1702 return result; 1708 return result;
1703} 1709}
@@ -1713,14 +1719,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1713 * Return <0 on error. 1719 * Return <0 on error.
1714 */ 1720 */
1715static int 1721static int
1716find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, 1722find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
1717 struct lock_list **target_entry) 1723 struct lock_list **target_entry)
1718{ 1724{
1719 int result; 1725 int result;
1720 1726
1721 debug_atomic_inc(nr_find_usage_backwards_checks); 1727 debug_atomic_inc(nr_find_usage_backwards_checks);
1722 1728
1723 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); 1729 result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
1724 1730
1725 return result; 1731 return result;
1726} 1732}
@@ -1912,39 +1918,6 @@ print_bad_irq_dependency(struct task_struct *curr,
1912 return 0; 1918 return 0;
1913} 1919}
1914 1920
1915static int
1916check_usage(struct task_struct *curr, struct held_lock *prev,
1917 struct held_lock *next, enum lock_usage_bit bit_backwards,
1918 enum lock_usage_bit bit_forwards, const char *irqclass)
1919{
1920 int ret;
1921 struct lock_list this, that;
1922 struct lock_list *uninitialized_var(target_entry);
1923 struct lock_list *uninitialized_var(target_entry1);
1924
1925 this.parent = NULL;
1926
1927 this.class = hlock_class(prev);
1928 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1929 if (ret < 0)
1930 return print_bfs_bug(ret);
1931 if (ret == 1)
1932 return ret;
1933
1934 that.parent = NULL;
1935 that.class = hlock_class(next);
1936 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1937 if (ret < 0)
1938 return print_bfs_bug(ret);
1939 if (ret == 1)
1940 return ret;
1941
1942 return print_bad_irq_dependency(curr, &this, &that,
1943 target_entry, target_entry1,
1944 prev, next,
1945 bit_backwards, bit_forwards, irqclass);
1946}
1947
1948static const char *state_names[] = { 1921static const char *state_names[] = {
1949#define LOCKDEP_STATE(__STATE) \ 1922#define LOCKDEP_STATE(__STATE) \
1950 __stringify(__STATE), 1923 __stringify(__STATE),
@@ -1961,9 +1934,19 @@ static const char *state_rnames[] = {
1961 1934
1962static inline const char *state_name(enum lock_usage_bit bit) 1935static inline const char *state_name(enum lock_usage_bit bit)
1963{ 1936{
1964 return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2]; 1937 if (bit & LOCK_USAGE_READ_MASK)
1938 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
1939 else
1940 return state_names[bit >> LOCK_USAGE_DIR_MASK];
1965} 1941}
1966 1942
1943/*
1944 * The bit number is encoded like:
1945 *
1946 * bit0: 0 exclusive, 1 read lock
1947 * bit1: 0 used in irq, 1 irq enabled
1948 * bit2-n: state
1949 */
1967static int exclusive_bit(int new_bit) 1950static int exclusive_bit(int new_bit)
1968{ 1951{
1969 int state = new_bit & LOCK_USAGE_STATE_MASK; 1952 int state = new_bit & LOCK_USAGE_STATE_MASK;
@@ -1975,45 +1958,160 @@ static int exclusive_bit(int new_bit)
1975 return state | (dir ^ LOCK_USAGE_DIR_MASK); 1958 return state | (dir ^ LOCK_USAGE_DIR_MASK);
1976} 1959}
1977 1960
1961/*
1962 * Observe that when given a bitmask where each bitnr is encoded as above, a
1963 * right shift of the mask transforms the individual bitnrs as -1 and
1964 * conversely, a left shift transforms into +1 for the individual bitnrs.
1965 *
1966 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
1967 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
1968 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
1969 *
1970 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
1971 *
1972 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
1973 * all bits set) and recompose with bitnr1 flipped.
1974 */
1975static unsigned long invert_dir_mask(unsigned long mask)
1976{
1977 unsigned long excl = 0;
1978
1979 /* Invert dir */
1980 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
1981 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
1982
1983 return excl;
1984}
1985
1986/*
1987 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
1988 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
1989 * And then mask out all bitnr0.
1990 */
1991static unsigned long exclusive_mask(unsigned long mask)
1992{
1993 unsigned long excl = invert_dir_mask(mask);
1994
1995 /* Strip read */
1996 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
1997 excl &= ~LOCKF_IRQ_READ;
1998
1999 return excl;
2000}
2001
2002/*
2003 * Retrieve the _possible_ original mask to which @mask is
2004 * exclusive. Ie: this is the opposite of exclusive_mask().
2005 * Note that 2 possible original bits can match an exclusive
2006 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2007 * cleared. So both are returned for each exclusive bit.
2008 */
2009static unsigned long original_mask(unsigned long mask)
2010{
2011 unsigned long excl = invert_dir_mask(mask);
2012
2013 /* Include read in existing usages */
2014 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2015
2016 return excl;
2017}
2018
2019/*
2020 * Find the first pair of bit match between an original
2021 * usage mask and an exclusive usage mask.
2022 */
2023static int find_exclusive_match(unsigned long mask,
2024 unsigned long excl_mask,
2025 enum lock_usage_bit *bitp,
2026 enum lock_usage_bit *excl_bitp)
2027{
2028 int bit, excl;
2029
2030 for_each_set_bit(bit, &mask, LOCK_USED) {
2031 excl = exclusive_bit(bit);
2032 if (excl_mask & lock_flag(excl)) {
2033 *bitp = bit;
2034 *excl_bitp = excl;
2035 return 0;
2036 }
2037 }
2038 return -1;
2039}
2040
2041/*
2042 * Prove that the new dependency does not connect a hardirq-safe(-read)
2043 * lock with a hardirq-unsafe lock - to achieve this we search
2044 * the backwards-subgraph starting at <prev>, and the
2045 * forwards-subgraph starting at <next>:
2046 */
1978static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 2047static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1979 struct held_lock *next, enum lock_usage_bit bit) 2048 struct held_lock *next)
1980{ 2049{
2050 unsigned long usage_mask = 0, forward_mask, backward_mask;
2051 enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2052 struct lock_list *uninitialized_var(target_entry1);
2053 struct lock_list *uninitialized_var(target_entry);
2054 struct lock_list this, that;
2055 int ret;
2056
1981 /* 2057 /*
1982 * Prove that the new dependency does not connect a hardirq-safe 2058 * Step 1: gather all hard/soft IRQs usages backward in an
1983 * lock with a hardirq-unsafe lock - to achieve this we search 2059 * accumulated usage mask.
1984 * the backwards-subgraph starting at <prev>, and the
1985 * forwards-subgraph starting at <next>:
1986 */ 2060 */
1987 if (!check_usage(curr, prev, next, bit, 2061 this.parent = NULL;
1988 exclusive_bit(bit), state_name(bit))) 2062 this.class = hlock_class(prev);
1989 return 0; 2063
2064 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2065 if (ret < 0)
2066 return print_bfs_bug(ret);
1990 2067
1991 bit++; /* _READ */ 2068 usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2069 if (!usage_mask)
2070 return 1;
1992 2071
1993 /* 2072 /*
1994 * Prove that the new dependency does not connect a hardirq-safe-read 2073 * Step 2: find exclusive uses forward that match the previous
1995 * lock with a hardirq-unsafe lock - to achieve this we search 2074 * backward accumulated mask.
1996 * the backwards-subgraph starting at <prev>, and the
1997 * forwards-subgraph starting at <next>:
1998 */ 2075 */
1999 if (!check_usage(curr, prev, next, bit, 2076 forward_mask = exclusive_mask(usage_mask);
2000 exclusive_bit(bit), state_name(bit)))
2001 return 0;
2002 2077
2003 return 1; 2078 that.parent = NULL;
2004} 2079 that.class = hlock_class(next);
2005 2080
2006static int 2081 ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2007check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 2082 if (ret < 0)
2008 struct held_lock *next) 2083 return print_bfs_bug(ret);
2009{ 2084 if (ret == 1)
2010#define LOCKDEP_STATE(__STATE) \ 2085 return ret;
2011 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
2012 return 0;
2013#include "lockdep_states.h"
2014#undef LOCKDEP_STATE
2015 2086
2016 return 1; 2087 /*
2088 * Step 3: we found a bad match! Now retrieve a lock from the backward
2089 * list whose usage mask matches the exclusive usage mask from the
2090 * lock found on the forward list.
2091 */
2092 backward_mask = original_mask(target_entry1->class->usage_mask);
2093
2094 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2095 if (ret < 0)
2096 return print_bfs_bug(ret);
2097 if (DEBUG_LOCKS_WARN_ON(ret == 1))
2098 return 1;
2099
2100 /*
2101 * Step 4: narrow down to a pair of incompatible usage bits
2102 * and report it.
2103 */
2104 ret = find_exclusive_match(target_entry->class->usage_mask,
2105 target_entry1->class->usage_mask,
2106 &backward_bit, &forward_bit);
2107 if (DEBUG_LOCKS_WARN_ON(ret == -1))
2108 return 1;
2109
2110 return print_bad_irq_dependency(curr, &this, &that,
2111 target_entry, target_entry1,
2112 prev, next,
2113 backward_bit, forward_bit,
2114 state_name(backward_bit));
2017} 2115}
2018 2116
2019static void inc_chains(void) 2117static void inc_chains(void)
@@ -2030,9 +2128,8 @@ static void inc_chains(void)
2030 2128
2031#else 2129#else
2032 2130
2033static inline int 2131static inline int check_irq_usage(struct task_struct *curr,
2034check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 2132 struct held_lock *prev, struct held_lock *next)
2035 struct held_lock *next)
2036{ 2133{
2037 return 1; 2134 return 1;
2038} 2135}
@@ -2211,7 +2308,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
2211 else if (unlikely(ret < 0)) 2308 else if (unlikely(ret < 0))
2212 return print_bfs_bug(ret); 2309 return print_bfs_bug(ret);
2213 2310
2214 if (!check_prev_add_irq(curr, prev, next)) 2311 if (!check_irq_usage(curr, prev, next))
2215 return 0; 2312 return 0;
2216 2313
2217 /* 2314 /*
@@ -2773,6 +2870,12 @@ static void check_chain_key(struct task_struct *curr)
2773#endif 2870#endif
2774} 2871}
2775 2872
2873static int mark_lock(struct task_struct *curr, struct held_lock *this,
2874 enum lock_usage_bit new_bit);
2875
2876#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2877
2878
2776static void 2879static void
2777print_usage_bug_scenario(struct held_lock *lock) 2880print_usage_bug_scenario(struct held_lock *lock)
2778{ 2881{
@@ -2842,10 +2945,6 @@ valid_state(struct task_struct *curr, struct held_lock *this,
2842 return 1; 2945 return 1;
2843} 2946}
2844 2947
2845static int mark_lock(struct task_struct *curr, struct held_lock *this,
2846 enum lock_usage_bit new_bit);
2847
2848#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2849 2948
2850/* 2949/*
2851 * print irq inversion bug: 2950 * print irq inversion bug:
@@ -2925,7 +3024,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2925 3024
2926 root.parent = NULL; 3025 root.parent = NULL;
2927 root.class = hlock_class(this); 3026 root.class = hlock_class(this);
2928 ret = find_usage_forwards(&root, bit, &target_entry); 3027 ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
2929 if (ret < 0) 3028 if (ret < 0)
2930 return print_bfs_bug(ret); 3029 return print_bfs_bug(ret);
2931 if (ret == 1) 3030 if (ret == 1)
@@ -2949,7 +3048,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2949 3048
2950 root.parent = NULL; 3049 root.parent = NULL;
2951 root.class = hlock_class(this); 3050 root.class = hlock_class(this);
2952 ret = find_usage_backwards(&root, bit, &target_entry); 3051 ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
2953 if (ret < 0) 3052 if (ret < 0)
2954 return print_bfs_bug(ret); 3053 return print_bfs_bug(ret);
2955 if (ret == 1) 3054 if (ret == 1)
@@ -3004,7 +3103,7 @@ static int (*state_verbose_f[])(struct lock_class *class) = {
3004static inline int state_verbose(enum lock_usage_bit bit, 3103static inline int state_verbose(enum lock_usage_bit bit,
3005 struct lock_class *class) 3104 struct lock_class *class)
3006{ 3105{
3007 return state_verbose_f[bit >> 2](class); 3106 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
3008} 3107}
3009 3108
3010typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 3109typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
@@ -3146,7 +3245,7 @@ void lockdep_hardirqs_on(unsigned long ip)
3146 /* 3245 /*
3147 * See the fine text that goes along with this variable definition. 3246 * See the fine text that goes along with this variable definition.
3148 */ 3247 */
3149 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 3248 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3150 return; 3249 return;
3151 3250
3152 /* 3251 /*