diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 113 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 3 |
2 files changed, 65 insertions, 51 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e14d383dcb0b..d3c72ad8d09e 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | |||
124 | unsigned long nr_lock_classes; | 124 | unsigned long nr_lock_classes; |
125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
126 | 126 | ||
127 | static inline struct lock_class *hlock_class(struct held_lock *hlock) | ||
128 | { | ||
129 | if (!hlock->class_idx) { | ||
130 | DEBUG_LOCKS_WARN_ON(1); | ||
131 | return NULL; | ||
132 | } | ||
133 | return lock_classes + hlock->class_idx - 1; | ||
134 | } | ||
135 | |||
127 | #ifdef CONFIG_LOCK_STAT | 136 | #ifdef CONFIG_LOCK_STAT |
128 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 137 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
129 | 138 | ||
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock) | |||
222 | 231 | ||
223 | holdtime = sched_clock() - hlock->holdtime_stamp; | 232 | holdtime = sched_clock() - hlock->holdtime_stamp; |
224 | 233 | ||
225 | stats = get_lock_stats(hlock->class); | 234 | stats = get_lock_stats(hlock_class(hlock)); |
226 | if (hlock->read) | 235 | if (hlock->read) |
227 | lock_time_inc(&stats->read_holdtime, holdtime); | 236 | lock_time_inc(&stats->read_holdtime, holdtime); |
228 | else | 237 | else |
@@ -518,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
518 | 527 | ||
519 | static void print_lock(struct held_lock *hlock) | 528 | static void print_lock(struct held_lock *hlock) |
520 | { | 529 | { |
521 | print_lock_name(hlock->class); | 530 | print_lock_name(hlock_class(hlock)); |
522 | printk(", at: "); | 531 | printk(", at: "); |
523 | print_ip_sym(hlock->acquire_ip); | 532 | print_ip_sym(hlock->acquire_ip); |
524 | } | 533 | } |
@@ -948,7 +957,7 @@ static noinline int print_circular_bug_tail(void) | |||
948 | if (debug_locks_silent) | 957 | if (debug_locks_silent) |
949 | return 0; | 958 | return 0; |
950 | 959 | ||
951 | this.class = check_source->class; | 960 | this.class = hlock_class(check_source); |
952 | if (!save_trace(&this.trace)) | 961 | if (!save_trace(&this.trace)) |
953 | return 0; | 962 | return 0; |
954 | 963 | ||
@@ -1057,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
1057 | * Check this lock's dependency list: | 1066 | * Check this lock's dependency list: |
1058 | */ | 1067 | */ |
1059 | list_for_each_entry(entry, &source->locks_after, entry) { | 1068 | list_for_each_entry(entry, &source->locks_after, entry) { |
1060 | if (entry->class == check_target->class) | 1069 | if (entry->class == hlock_class(check_target)) |
1061 | return print_circular_bug_header(entry, depth+1); | 1070 | return print_circular_bug_header(entry, depth+1); |
1062 | debug_atomic_inc(&nr_cyclic_checks); | 1071 | debug_atomic_inc(&nr_cyclic_checks); |
1063 | if (!check_noncircular(entry->class, depth+1)) | 1072 | if (!check_noncircular(entry->class, depth+1)) |
@@ -1150,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
1150 | return 2; | 1159 | return 2; |
1151 | } | 1160 | } |
1152 | 1161 | ||
1162 | if (!source && debug_locks_off_graph_unlock()) { | ||
1163 | WARN_ON(1); | ||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1153 | /* | 1167 | /* |
1154 | * Check this lock's dependency list: | 1168 | * Check this lock's dependency list: |
1155 | */ | 1169 | */ |
@@ -1189,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1189 | printk("\nand this task is already holding:\n"); | 1203 | printk("\nand this task is already holding:\n"); |
1190 | print_lock(prev); | 1204 | print_lock(prev); |
1191 | printk("which would create a new lock dependency:\n"); | 1205 | printk("which would create a new lock dependency:\n"); |
1192 | print_lock_name(prev->class); | 1206 | print_lock_name(hlock_class(prev)); |
1193 | printk(" ->"); | 1207 | printk(" ->"); |
1194 | print_lock_name(next->class); | 1208 | print_lock_name(hlock_class(next)); |
1195 | printk("\n"); | 1209 | printk("\n"); |
1196 | 1210 | ||
1197 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1211 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
@@ -1232,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1232 | 1246 | ||
1233 | find_usage_bit = bit_backwards; | 1247 | find_usage_bit = bit_backwards; |
1234 | /* fills in <backwards_match> */ | 1248 | /* fills in <backwards_match> */ |
1235 | ret = find_usage_backwards(prev->class, 0); | 1249 | ret = find_usage_backwards(hlock_class(prev), 0); |
1236 | if (!ret || ret == 1) | 1250 | if (!ret || ret == 1) |
1237 | return ret; | 1251 | return ret; |
1238 | 1252 | ||
1239 | find_usage_bit = bit_forwards; | 1253 | find_usage_bit = bit_forwards; |
1240 | ret = find_usage_forwards(next->class, 0); | 1254 | ret = find_usage_forwards(hlock_class(next), 0); |
1241 | if (!ret || ret == 1) | 1255 | if (!ret || ret == 1) |
1242 | return ret; | 1256 | return ret; |
1243 | /* ret == 2 */ | 1257 | /* ret == 2 */ |
@@ -1362,7 +1376,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
1362 | 1376 | ||
1363 | for (i = 0; i < curr->lockdep_depth; i++) { | 1377 | for (i = 0; i < curr->lockdep_depth; i++) { |
1364 | prev = curr->held_locks + i; | 1378 | prev = curr->held_locks + i; |
1365 | if (prev->class != next->class) | 1379 | if (hlock_class(prev) != hlock_class(next)) |
1366 | continue; | 1380 | continue; |
1367 | /* | 1381 | /* |
1368 | * Allow read-after-read recursion of the same | 1382 | * Allow read-after-read recursion of the same |
@@ -1415,7 +1429,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1415 | */ | 1429 | */ |
1416 | check_source = next; | 1430 | check_source = next; |
1417 | check_target = prev; | 1431 | check_target = prev; |
1418 | if (!(check_noncircular(next->class, 0))) | 1432 | if (!(check_noncircular(hlock_class(next), 0))) |
1419 | return print_circular_bug_tail(); | 1433 | return print_circular_bug_tail(); |
1420 | 1434 | ||
1421 | if (!check_prev_add_irq(curr, prev, next)) | 1435 | if (!check_prev_add_irq(curr, prev, next)) |
@@ -1439,8 +1453,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1439 | * chains - the second one will be new, but L1 already has | 1453 | * chains - the second one will be new, but L1 already has |
1440 | * L2 added to its dependency list, due to the first chain.) | 1454 | * L2 added to its dependency list, due to the first chain.) |
1441 | */ | 1455 | */ |
1442 | list_for_each_entry(entry, &prev->class->locks_after, entry) { | 1456 | list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { |
1443 | if (entry->class == next->class) { | 1457 | if (entry->class == hlock_class(next)) { |
1444 | if (distance == 1) | 1458 | if (distance == 1) |
1445 | entry->distance = 1; | 1459 | entry->distance = 1; |
1446 | return 2; | 1460 | return 2; |
@@ -1451,26 +1465,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1451 | * Ok, all validations passed, add the new lock | 1465 | * Ok, all validations passed, add the new lock |
1452 | * to the previous lock's dependency list: | 1466 | * to the previous lock's dependency list: |
1453 | */ | 1467 | */ |
1454 | ret = add_lock_to_list(prev->class, next->class, | 1468 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
1455 | &prev->class->locks_after, next->acquire_ip, distance); | 1469 | &hlock_class(prev)->locks_after, |
1470 | next->acquire_ip, distance); | ||
1456 | 1471 | ||
1457 | if (!ret) | 1472 | if (!ret) |
1458 | return 0; | 1473 | return 0; |
1459 | 1474 | ||
1460 | ret = add_lock_to_list(next->class, prev->class, | 1475 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
1461 | &next->class->locks_before, next->acquire_ip, distance); | 1476 | &hlock_class(next)->locks_before, |
1477 | next->acquire_ip, distance); | ||
1462 | if (!ret) | 1478 | if (!ret) |
1463 | return 0; | 1479 | return 0; |
1464 | 1480 | ||
1465 | /* | 1481 | /* |
1466 | * Debugging printouts: | 1482 | * Debugging printouts: |
1467 | */ | 1483 | */ |
1468 | if (verbose(prev->class) || verbose(next->class)) { | 1484 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { |
1469 | graph_unlock(); | 1485 | graph_unlock(); |
1470 | printk("\n new dependency: "); | 1486 | printk("\n new dependency: "); |
1471 | print_lock_name(prev->class); | 1487 | print_lock_name(hlock_class(prev)); |
1472 | printk(" => "); | 1488 | printk(" => "); |
1473 | print_lock_name(next->class); | 1489 | print_lock_name(hlock_class(next)); |
1474 | printk("\n"); | 1490 | printk("\n"); |
1475 | dump_stack(); | 1491 | dump_stack(); |
1476 | return graph_lock(); | 1492 | return graph_lock(); |
@@ -1567,7 +1583,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
1567 | struct held_lock *hlock, | 1583 | struct held_lock *hlock, |
1568 | u64 chain_key) | 1584 | u64 chain_key) |
1569 | { | 1585 | { |
1570 | struct lock_class *class = hlock->class; | 1586 | struct lock_class *class = hlock_class(hlock); |
1571 | struct list_head *hash_head = chainhashentry(chain_key); | 1587 | struct list_head *hash_head = chainhashentry(chain_key); |
1572 | struct lock_chain *chain; | 1588 | struct lock_chain *chain; |
1573 | struct held_lock *hlock_curr, *hlock_next; | 1589 | struct held_lock *hlock_curr, *hlock_next; |
@@ -1640,7 +1656,7 @@ cache_hit: | |||
1640 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | 1656 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
1641 | chain->base = cn; | 1657 | chain->base = cn; |
1642 | for (j = 0; j < chain->depth - 1; j++, i++) { | 1658 | for (j = 0; j < chain->depth - 1; j++, i++) { |
1643 | int lock_id = curr->held_locks[i].class - lock_classes; | 1659 | int lock_id = curr->held_locks[i].class_idx - 1; |
1644 | chain_hlocks[chain->base + j] = lock_id; | 1660 | chain_hlocks[chain->base + j] = lock_id; |
1645 | } | 1661 | } |
1646 | chain_hlocks[chain->base + j] = class - lock_classes; | 1662 | chain_hlocks[chain->base + j] = class - lock_classes; |
@@ -1736,7 +1752,7 @@ static void check_chain_key(struct task_struct *curr) | |||
1736 | WARN_ON(1); | 1752 | WARN_ON(1); |
1737 | return; | 1753 | return; |
1738 | } | 1754 | } |
1739 | id = hlock->class - lock_classes; | 1755 | id = hlock->class_idx - 1; |
1740 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 1756 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
1741 | return; | 1757 | return; |
1742 | 1758 | ||
@@ -1781,7 +1797,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
1781 | print_lock(this); | 1797 | print_lock(this); |
1782 | 1798 | ||
1783 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 1799 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); |
1784 | print_stack_trace(this->class->usage_traces + prev_bit, 1); | 1800 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
1785 | 1801 | ||
1786 | print_irqtrace_events(curr); | 1802 | print_irqtrace_events(curr); |
1787 | printk("\nother info that might help us debug this:\n"); | 1803 | printk("\nother info that might help us debug this:\n"); |
@@ -1800,7 +1816,7 @@ static inline int | |||
1800 | valid_state(struct task_struct *curr, struct held_lock *this, | 1816 | valid_state(struct task_struct *curr, struct held_lock *this, |
1801 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | 1817 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) |
1802 | { | 1818 | { |
1803 | if (unlikely(this->class->usage_mask & (1 << bad_bit))) | 1819 | if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) |
1804 | return print_usage_bug(curr, this, bad_bit, new_bit); | 1820 | return print_usage_bug(curr, this, bad_bit, new_bit); |
1805 | return 1; | 1821 | return 1; |
1806 | } | 1822 | } |
@@ -1839,7 +1855,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1839 | lockdep_print_held_locks(curr); | 1855 | lockdep_print_held_locks(curr); |
1840 | 1856 | ||
1841 | printk("\nthe first lock's dependencies:\n"); | 1857 | printk("\nthe first lock's dependencies:\n"); |
1842 | print_lock_dependencies(this->class, 0); | 1858 | print_lock_dependencies(hlock_class(this), 0); |
1843 | 1859 | ||
1844 | printk("\nthe second lock's dependencies:\n"); | 1860 | printk("\nthe second lock's dependencies:\n"); |
1845 | print_lock_dependencies(other, 0); | 1861 | print_lock_dependencies(other, 0); |
@@ -1862,7 +1878,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
1862 | 1878 | ||
1863 | find_usage_bit = bit; | 1879 | find_usage_bit = bit; |
1864 | /* fills in <forwards_match> */ | 1880 | /* fills in <forwards_match> */ |
1865 | ret = find_usage_forwards(this->class, 0); | 1881 | ret = find_usage_forwards(hlock_class(this), 0); |
1866 | if (!ret || ret == 1) | 1882 | if (!ret || ret == 1) |
1867 | return ret; | 1883 | return ret; |
1868 | 1884 | ||
@@ -1881,7 +1897,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
1881 | 1897 | ||
1882 | find_usage_bit = bit; | 1898 | find_usage_bit = bit; |
1883 | /* fills in <backwards_match> */ | 1899 | /* fills in <backwards_match> */ |
1884 | ret = find_usage_backwards(this->class, 0); | 1900 | ret = find_usage_backwards(hlock_class(this), 0); |
1885 | if (!ret || ret == 1) | 1901 | if (!ret || ret == 1) |
1886 | return ret; | 1902 | return ret; |
1887 | 1903 | ||
@@ -1947,7 +1963,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1947 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 1963 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) |
1948 | return 0; | 1964 | return 0; |
1949 | #endif | 1965 | #endif |
1950 | if (hardirq_verbose(this->class)) | 1966 | if (hardirq_verbose(hlock_class(this))) |
1951 | ret = 2; | 1967 | ret = 2; |
1952 | break; | 1968 | break; |
1953 | case LOCK_USED_IN_SOFTIRQ: | 1969 | case LOCK_USED_IN_SOFTIRQ: |
@@ -1972,7 +1988,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1972 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | 1988 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) |
1973 | return 0; | 1989 | return 0; |
1974 | #endif | 1990 | #endif |
1975 | if (softirq_verbose(this->class)) | 1991 | if (softirq_verbose(hlock_class(this))) |
1976 | ret = 2; | 1992 | ret = 2; |
1977 | break; | 1993 | break; |
1978 | case LOCK_USED_IN_HARDIRQ_READ: | 1994 | case LOCK_USED_IN_HARDIRQ_READ: |
@@ -1985,7 +2001,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1985 | if (!check_usage_forwards(curr, this, | 2001 | if (!check_usage_forwards(curr, this, |
1986 | LOCK_ENABLED_HARDIRQS, "hard")) | 2002 | LOCK_ENABLED_HARDIRQS, "hard")) |
1987 | return 0; | 2003 | return 0; |
1988 | if (hardirq_verbose(this->class)) | 2004 | if (hardirq_verbose(hlock_class(this))) |
1989 | ret = 2; | 2005 | ret = 2; |
1990 | break; | 2006 | break; |
1991 | case LOCK_USED_IN_SOFTIRQ_READ: | 2007 | case LOCK_USED_IN_SOFTIRQ_READ: |
@@ -1998,7 +2014,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1998 | if (!check_usage_forwards(curr, this, | 2014 | if (!check_usage_forwards(curr, this, |
1999 | LOCK_ENABLED_SOFTIRQS, "soft")) | 2015 | LOCK_ENABLED_SOFTIRQS, "soft")) |
2000 | return 0; | 2016 | return 0; |
2001 | if (softirq_verbose(this->class)) | 2017 | if (softirq_verbose(hlock_class(this))) |
2002 | ret = 2; | 2018 | ret = 2; |
2003 | break; | 2019 | break; |
2004 | case LOCK_ENABLED_HARDIRQS: | 2020 | case LOCK_ENABLED_HARDIRQS: |
@@ -2024,7 +2040,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2024 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | 2040 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) |
2025 | return 0; | 2041 | return 0; |
2026 | #endif | 2042 | #endif |
2027 | if (hardirq_verbose(this->class)) | 2043 | if (hardirq_verbose(hlock_class(this))) |
2028 | ret = 2; | 2044 | ret = 2; |
2029 | break; | 2045 | break; |
2030 | case LOCK_ENABLED_SOFTIRQS: | 2046 | case LOCK_ENABLED_SOFTIRQS: |
@@ -2050,7 +2066,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2050 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | 2066 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) |
2051 | return 0; | 2067 | return 0; |
2052 | #endif | 2068 | #endif |
2053 | if (softirq_verbose(this->class)) | 2069 | if (softirq_verbose(hlock_class(this))) |
2054 | ret = 2; | 2070 | ret = 2; |
2055 | break; | 2071 | break; |
2056 | case LOCK_ENABLED_HARDIRQS_READ: | 2072 | case LOCK_ENABLED_HARDIRQS_READ: |
@@ -2065,7 +2081,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2065 | LOCK_USED_IN_HARDIRQ, "hard")) | 2081 | LOCK_USED_IN_HARDIRQ, "hard")) |
2066 | return 0; | 2082 | return 0; |
2067 | #endif | 2083 | #endif |
2068 | if (hardirq_verbose(this->class)) | 2084 | if (hardirq_verbose(hlock_class(this))) |
2069 | ret = 2; | 2085 | ret = 2; |
2070 | break; | 2086 | break; |
2071 | case LOCK_ENABLED_SOFTIRQS_READ: | 2087 | case LOCK_ENABLED_SOFTIRQS_READ: |
@@ -2080,7 +2096,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
2080 | LOCK_USED_IN_SOFTIRQ, "soft")) | 2096 | LOCK_USED_IN_SOFTIRQ, "soft")) |
2081 | return 0; | 2097 | return 0; |
2082 | #endif | 2098 | #endif |
2083 | if (softirq_verbose(this->class)) | 2099 | if (softirq_verbose(hlock_class(this))) |
2084 | ret = 2; | 2100 | ret = 2; |
2085 | break; | 2101 | break; |
2086 | default: | 2102 | default: |
@@ -2396,7 +2412,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2396 | * If already set then do not dirty the cacheline, | 2412 | * If already set then do not dirty the cacheline, |
2397 | * nor do any checks: | 2413 | * nor do any checks: |
2398 | */ | 2414 | */ |
2399 | if (likely(this->class->usage_mask & new_mask)) | 2415 | if (likely(hlock_class(this)->usage_mask & new_mask)) |
2400 | return 1; | 2416 | return 1; |
2401 | 2417 | ||
2402 | if (!graph_lock()) | 2418 | if (!graph_lock()) |
@@ -2404,14 +2420,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2404 | /* | 2420 | /* |
2405 | * Make sure we didnt race: | 2421 | * Make sure we didnt race: |
2406 | */ | 2422 | */ |
2407 | if (unlikely(this->class->usage_mask & new_mask)) { | 2423 | if (unlikely(hlock_class(this)->usage_mask & new_mask)) { |
2408 | graph_unlock(); | 2424 | graph_unlock(); |
2409 | return 1; | 2425 | return 1; |
2410 | } | 2426 | } |
2411 | 2427 | ||
2412 | this->class->usage_mask |= new_mask; | 2428 | hlock_class(this)->usage_mask |= new_mask; |
2413 | 2429 | ||
2414 | if (!save_trace(this->class->usage_traces + new_bit)) | 2430 | if (!save_trace(hlock_class(this)->usage_traces + new_bit)) |
2415 | return 0; | 2431 | return 0; |
2416 | 2432 | ||
2417 | switch (new_bit) { | 2433 | switch (new_bit) { |
@@ -2545,8 +2561,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2545 | return 0; | 2561 | return 0; |
2546 | 2562 | ||
2547 | hlock = curr->held_locks + depth; | 2563 | hlock = curr->held_locks + depth; |
2548 | 2564 | if (DEBUG_LOCKS_WARN_ON(!class)) | |
2549 | hlock->class = class; | 2565 | return 0; |
2566 | hlock->class_idx = class - lock_classes + 1; | ||
2550 | hlock->acquire_ip = ip; | 2567 | hlock->acquire_ip = ip; |
2551 | hlock->instance = lock; | 2568 | hlock->instance = lock; |
2552 | hlock->trylock = trylock; | 2569 | hlock->trylock = trylock; |
@@ -2690,7 +2707,7 @@ __lock_set_subclass(struct lockdep_map *lock, | |||
2690 | 2707 | ||
2691 | found_it: | 2708 | found_it: |
2692 | class = register_lock_class(lock, subclass, 0); | 2709 | class = register_lock_class(lock, subclass, 0); |
2693 | hlock->class = class; | 2710 | hlock->class_idx = class - lock_classes + 1; |
2694 | 2711 | ||
2695 | curr->lockdep_depth = i; | 2712 | curr->lockdep_depth = i; |
2696 | curr->curr_chain_key = hlock->prev_chain_key; | 2713 | curr->curr_chain_key = hlock->prev_chain_key; |
@@ -2698,7 +2715,7 @@ found_it: | |||
2698 | for (; i < depth; i++) { | 2715 | for (; i < depth; i++) { |
2699 | hlock = curr->held_locks + i; | 2716 | hlock = curr->held_locks + i; |
2700 | if (!__lock_acquire(hlock->instance, | 2717 | if (!__lock_acquire(hlock->instance, |
2701 | hlock->class->subclass, hlock->trylock, | 2718 | hlock_class(hlock)->subclass, hlock->trylock, |
2702 | hlock->read, hlock->check, hlock->hardirqs_off, | 2719 | hlock->read, hlock->check, hlock->hardirqs_off, |
2703 | hlock->acquire_ip)) | 2720 | hlock->acquire_ip)) |
2704 | return 0; | 2721 | return 0; |
@@ -2759,7 +2776,7 @@ found_it: | |||
2759 | for (i++; i < depth; i++) { | 2776 | for (i++; i < depth; i++) { |
2760 | hlock = curr->held_locks + i; | 2777 | hlock = curr->held_locks + i; |
2761 | if (!__lock_acquire(hlock->instance, | 2778 | if (!__lock_acquire(hlock->instance, |
2762 | hlock->class->subclass, hlock->trylock, | 2779 | hlock_class(hlock)->subclass, hlock->trylock, |
2763 | hlock->read, hlock->check, hlock->hardirqs_off, | 2780 | hlock->read, hlock->check, hlock->hardirqs_off, |
2764 | hlock->acquire_ip)) | 2781 | hlock->acquire_ip)) |
2765 | return 0; | 2782 | return 0; |
@@ -2804,7 +2821,7 @@ static int lock_release_nested(struct task_struct *curr, | |||
2804 | 2821 | ||
2805 | #ifdef CONFIG_DEBUG_LOCKDEP | 2822 | #ifdef CONFIG_DEBUG_LOCKDEP |
2806 | hlock->prev_chain_key = 0; | 2823 | hlock->prev_chain_key = 0; |
2807 | hlock->class = NULL; | 2824 | hlock->class_idx = 0; |
2808 | hlock->acquire_ip = 0; | 2825 | hlock->acquire_ip = 0; |
2809 | hlock->irq_context = 0; | 2826 | hlock->irq_context = 0; |
2810 | #endif | 2827 | #endif |
@@ -3000,9 +3017,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3000 | found_it: | 3017 | found_it: |
3001 | hlock->waittime_stamp = sched_clock(); | 3018 | hlock->waittime_stamp = sched_clock(); |
3002 | 3019 | ||
3003 | point = lock_contention_point(hlock->class, ip); | 3020 | point = lock_contention_point(hlock_class(hlock), ip); |
3004 | 3021 | ||
3005 | stats = get_lock_stats(hlock->class); | 3022 | stats = get_lock_stats(hlock_class(hlock)); |
3006 | if (point < ARRAY_SIZE(stats->contention_point)) | 3023 | if (point < ARRAY_SIZE(stats->contention_point)) |
3007 | stats->contention_point[i]++; | 3024 | stats->contention_point[i]++; |
3008 | if (lock->cpu != smp_processor_id()) | 3025 | if (lock->cpu != smp_processor_id()) |
@@ -3048,7 +3065,7 @@ found_it: | |||
3048 | hlock->holdtime_stamp = now; | 3065 | hlock->holdtime_stamp = now; |
3049 | } | 3066 | } |
3050 | 3067 | ||
3051 | stats = get_lock_stats(hlock->class); | 3068 | stats = get_lock_stats(hlock_class(hlock)); |
3052 | if (waittime) { | 3069 | if (waittime) { |
3053 | if (hlock->read) | 3070 | if (hlock->read) |
3054 | lock_time_inc(&stats->read_waittime, waittime); | 3071 | lock_time_inc(&stats->read_waittime, waittime); |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 68d44ec77ab5..55db193d366d 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -17,9 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | #define MAX_LOCKDEP_ENTRIES 8192UL | 18 | #define MAX_LOCKDEP_ENTRIES 8192UL |
19 | 19 | ||
20 | #define MAX_LOCKDEP_KEYS_BITS 11 | ||
21 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) | ||
22 | |||
23 | #define MAX_LOCKDEP_CHAINS_BITS 14 | 20 | #define MAX_LOCKDEP_CHAINS_BITS 14 |
24 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | 21 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
25 | 22 | ||