aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c281
1 files changed, 228 insertions, 53 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f2852a510232..298c9276dfdb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -490,6 +490,18 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
490 usage[i] = '\0'; 490 usage[i] = '\0';
491} 491}
492 492
493static int __print_lock_name(struct lock_class *class)
494{
495 char str[KSYM_NAME_LEN];
496 const char *name;
497
498 name = class->name;
499 if (!name)
500 name = __get_key_name(class->key, str);
501
502 return printk("%s", name);
503}
504
493static void print_lock_name(struct lock_class *class) 505static void print_lock_name(struct lock_class *class)
494{ 506{
495 char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; 507 char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
@@ -639,6 +651,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
639 } 651 }
640#endif 652#endif
641 653
654 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
655 debug_locks_off();
656 printk(KERN_ERR
657 "BUG: looking up invalid subclass: %u\n", subclass);
658 printk(KERN_ERR
659 "turning off the locking correctness validator.\n");
660 dump_stack();
661 return NULL;
662 }
663
642 /* 664 /*
643 * Static locks do not have their class-keys yet - for them the key 665 * Static locks do not have their class-keys yet - for them the key
644 * is the lock object itself: 666 * is the lock object itself:
@@ -774,7 +796,9 @@ out_unlock_set:
774 raw_local_irq_restore(flags); 796 raw_local_irq_restore(flags);
775 797
776 if (!subclass || force) 798 if (!subclass || force)
777 lock->class_cache = class; 799 lock->class_cache[0] = class;
800 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
801 lock->class_cache[subclass] = class;
778 802
779 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 803 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
780 return NULL; 804 return NULL;
@@ -1041,6 +1065,56 @@ print_circular_bug_entry(struct lock_list *target, int depth)
1041 return 0; 1065 return 0;
1042} 1066}
1043 1067
1068static void
1069print_circular_lock_scenario(struct held_lock *src,
1070 struct held_lock *tgt,
1071 struct lock_list *prt)
1072{
1073 struct lock_class *source = hlock_class(src);
1074 struct lock_class *target = hlock_class(tgt);
1075 struct lock_class *parent = prt->class;
1076
1077 /*
1078 * A direct locking problem where unsafe_class lock is taken
1079 * directly by safe_class lock, then all we need to show
1080 * is the deadlock scenario, as it is obvious that the
1081 * unsafe lock is taken under the safe lock.
1082 *
1083 * But if there is a chain instead, where the safe lock takes
1084 * an intermediate lock (middle_class) where this lock is
1085 * not the same as the safe lock, then the lock chain is
1086 * used to describe the problem. Otherwise we would need
1087 * to show a different CPU case for each link in the chain
1088 * from the safe_class lock to the unsafe_class lock.
1089 */
1090 if (parent != source) {
1091 printk("Chain exists of:\n ");
1092 __print_lock_name(source);
1093 printk(" --> ");
1094 __print_lock_name(parent);
1095 printk(" --> ");
1096 __print_lock_name(target);
1097 printk("\n\n");
1098 }
1099
1100 printk(" Possible unsafe locking scenario:\n\n");
1101 printk(" CPU0 CPU1\n");
1102 printk(" ---- ----\n");
1103 printk(" lock(");
1104 __print_lock_name(target);
1105 printk(");\n");
1106 printk(" lock(");
1107 __print_lock_name(parent);
1108 printk(");\n");
1109 printk(" lock(");
1110 __print_lock_name(target);
1111 printk(");\n");
1112 printk(" lock(");
1113 __print_lock_name(source);
1114 printk(");\n");
1115 printk("\n *** DEADLOCK ***\n\n");
1116}
1117
1044/* 1118/*
1045 * When a circular dependency is detected, print the 1119 * When a circular dependency is detected, print the
1046 * header first: 1120 * header first:
@@ -1084,6 +1158,7 @@ static noinline int print_circular_bug(struct lock_list *this,
1084{ 1158{
1085 struct task_struct *curr = current; 1159 struct task_struct *curr = current;
1086 struct lock_list *parent; 1160 struct lock_list *parent;
1161 struct lock_list *first_parent;
1087 int depth; 1162 int depth;
1088 1163
1089 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1164 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
@@ -1097,6 +1172,7 @@ static noinline int print_circular_bug(struct lock_list *this,
1097 print_circular_bug_header(target, depth, check_src, check_tgt); 1172 print_circular_bug_header(target, depth, check_src, check_tgt);
1098 1173
1099 parent = get_lock_parent(target); 1174 parent = get_lock_parent(target);
1175 first_parent = parent;
1100 1176
1101 while (parent) { 1177 while (parent) {
1102 print_circular_bug_entry(parent, --depth); 1178 print_circular_bug_entry(parent, --depth);
@@ -1104,6 +1180,9 @@ static noinline int print_circular_bug(struct lock_list *this,
1104 } 1180 }
1105 1181
1106 printk("\nother info that might help us debug this:\n\n"); 1182 printk("\nother info that might help us debug this:\n\n");
1183 print_circular_lock_scenario(check_src, check_tgt,
1184 first_parent);
1185
1107 lockdep_print_held_locks(curr); 1186 lockdep_print_held_locks(curr);
1108 1187
1109 printk("\nstack backtrace:\n"); 1188 printk("\nstack backtrace:\n");
@@ -1302,7 +1381,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1302 printk("\n"); 1381 printk("\n");
1303 1382
1304 if (depth == 0 && (entry != root)) { 1383 if (depth == 0 && (entry != root)) {
1305 printk("lockdep:%s bad BFS generated tree\n", __func__); 1384 printk("lockdep:%s bad path found in chain graph\n", __func__);
1306 break; 1385 break;
1307 } 1386 }
1308 1387
@@ -1313,6 +1392,62 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
1313 return; 1392 return;
1314} 1393}
1315 1394
1395static void
1396print_irq_lock_scenario(struct lock_list *safe_entry,
1397 struct lock_list *unsafe_entry,
1398 struct lock_class *prev_class,
1399 struct lock_class *next_class)
1400{
1401 struct lock_class *safe_class = safe_entry->class;
1402 struct lock_class *unsafe_class = unsafe_entry->class;
1403 struct lock_class *middle_class = prev_class;
1404
1405 if (middle_class == safe_class)
1406 middle_class = next_class;
1407
1408 /*
1409 * A direct locking problem where unsafe_class lock is taken
1410 * directly by safe_class lock, then all we need to show
1411 * is the deadlock scenario, as it is obvious that the
1412 * unsafe lock is taken under the safe lock.
1413 *
1414 * But if there is a chain instead, where the safe lock takes
1415 * an intermediate lock (middle_class) where this lock is
1416 * not the same as the safe lock, then the lock chain is
1417 * used to describe the problem. Otherwise we would need
1418 * to show a different CPU case for each link in the chain
1419 * from the safe_class lock to the unsafe_class lock.
1420 */
1421 if (middle_class != unsafe_class) {
1422 printk("Chain exists of:\n ");
1423 __print_lock_name(safe_class);
1424 printk(" --> ");
1425 __print_lock_name(middle_class);
1426 printk(" --> ");
1427 __print_lock_name(unsafe_class);
1428 printk("\n\n");
1429 }
1430
1431 printk(" Possible interrupt unsafe locking scenario:\n\n");
1432 printk(" CPU0 CPU1\n");
1433 printk(" ---- ----\n");
1434 printk(" lock(");
1435 __print_lock_name(unsafe_class);
1436 printk(");\n");
1437 printk(" local_irq_disable();\n");
1438 printk(" lock(");
1439 __print_lock_name(safe_class);
1440 printk(");\n");
1441 printk(" lock(");
1442 __print_lock_name(middle_class);
1443 printk(");\n");
1444 printk(" <Interrupt>\n");
1445 printk(" lock(");
1446 __print_lock_name(safe_class);
1447 printk(");\n");
1448 printk("\n *** DEADLOCK ***\n\n");
1449}
1450
1316static int 1451static int
1317print_bad_irq_dependency(struct task_struct *curr, 1452print_bad_irq_dependency(struct task_struct *curr,
1318 struct lock_list *prev_root, 1453 struct lock_list *prev_root,
@@ -1364,6 +1499,9 @@ print_bad_irq_dependency(struct task_struct *curr,
1364 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1499 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1365 1500
1366 printk("\nother info that might help us debug this:\n\n"); 1501 printk("\nother info that might help us debug this:\n\n");
1502 print_irq_lock_scenario(backwards_entry, forwards_entry,
1503 hlock_class(prev), hlock_class(next));
1504
1367 lockdep_print_held_locks(curr); 1505 lockdep_print_held_locks(curr);
1368 1506
1369 printk("\nthe dependencies between %s-irq-safe lock", irqclass); 1507 printk("\nthe dependencies between %s-irq-safe lock", irqclass);
@@ -1527,6 +1665,26 @@ static inline void inc_chains(void)
1527 1665
1528#endif 1666#endif
1529 1667
1668static void
1669print_deadlock_scenario(struct held_lock *nxt,
1670 struct held_lock *prv)
1671{
1672 struct lock_class *next = hlock_class(nxt);
1673 struct lock_class *prev = hlock_class(prv);
1674
1675 printk(" Possible unsafe locking scenario:\n\n");
1676 printk(" CPU0\n");
1677 printk(" ----\n");
1678 printk(" lock(");
1679 __print_lock_name(prev);
1680 printk(");\n");
1681 printk(" lock(");
1682 __print_lock_name(next);
1683 printk(");\n");
1684 printk("\n *** DEADLOCK ***\n\n");
1685 printk(" May be due to missing lock nesting notation\n\n");
1686}
1687
1530static int 1688static int
1531print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 1689print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1532 struct held_lock *next) 1690 struct held_lock *next)
@@ -1545,6 +1703,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1545 print_lock(prev); 1703 print_lock(prev);
1546 1704
1547 printk("\nother info that might help us debug this:\n"); 1705 printk("\nother info that might help us debug this:\n");
1706 print_deadlock_scenario(next, prev);
1548 lockdep_print_held_locks(curr); 1707 lockdep_print_held_locks(curr);
1549 1708
1550 printk("\nstack backtrace:\n"); 1709 printk("\nstack backtrace:\n");
@@ -1814,7 +1973,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1814 struct list_head *hash_head = chainhashentry(chain_key); 1973 struct list_head *hash_head = chainhashentry(chain_key);
1815 struct lock_chain *chain; 1974 struct lock_chain *chain;
1816 struct held_lock *hlock_curr, *hlock_next; 1975 struct held_lock *hlock_curr, *hlock_next;
1817 int i, j, n, cn; 1976 int i, j;
1818 1977
1819 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 1978 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1820 return 0; 1979 return 0;
@@ -1874,15 +2033,9 @@ cache_hit:
1874 } 2033 }
1875 i++; 2034 i++;
1876 chain->depth = curr->lockdep_depth + 1 - i; 2035 chain->depth = curr->lockdep_depth + 1 - i;
1877 cn = nr_chain_hlocks; 2036 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1878 while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { 2037 chain->base = nr_chain_hlocks;
1879 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); 2038 nr_chain_hlocks += chain->depth;
1880 if (n == cn)
1881 break;
1882 cn = n;
1883 }
1884 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1885 chain->base = cn;
1886 for (j = 0; j < chain->depth - 1; j++, i++) { 2039 for (j = 0; j < chain->depth - 1; j++, i++) {
1887 int lock_id = curr->held_locks[i].class_idx - 1; 2040 int lock_id = curr->held_locks[i].class_idx - 1;
1888 chain_hlocks[chain->base + j] = lock_id; 2041 chain_hlocks[chain->base + j] = lock_id;
@@ -1999,6 +2152,24 @@ static void check_chain_key(struct task_struct *curr)
1999#endif 2152#endif
2000} 2153}
2001 2154
2155static void
2156print_usage_bug_scenario(struct held_lock *lock)
2157{
2158 struct lock_class *class = hlock_class(lock);
2159
2160 printk(" Possible unsafe locking scenario:\n\n");
2161 printk(" CPU0\n");
2162 printk(" ----\n");
2163 printk(" lock(");
2164 __print_lock_name(class);
2165 printk(");\n");
2166 printk(" <Interrupt>\n");
2167 printk(" lock(");
2168 __print_lock_name(class);
2169 printk(");\n");
2170 printk("\n *** DEADLOCK ***\n\n");
2171}
2172
2002static int 2173static int
2003print_usage_bug(struct task_struct *curr, struct held_lock *this, 2174print_usage_bug(struct task_struct *curr, struct held_lock *this,
2004 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 2175 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
@@ -2027,6 +2198,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2027 2198
2028 print_irqtrace_events(curr); 2199 print_irqtrace_events(curr);
2029 printk("\nother info that might help us debug this:\n"); 2200 printk("\nother info that might help us debug this:\n");
2201 print_usage_bug_scenario(this);
2202
2030 lockdep_print_held_locks(curr); 2203 lockdep_print_held_locks(curr);
2031 2204
2032 printk("\nstack backtrace:\n"); 2205 printk("\nstack backtrace:\n");
@@ -2061,6 +2234,10 @@ print_irq_inversion_bug(struct task_struct *curr,
2061 struct held_lock *this, int forwards, 2234 struct held_lock *this, int forwards,
2062 const char *irqclass) 2235 const char *irqclass)
2063{ 2236{
2237 struct lock_list *entry = other;
2238 struct lock_list *middle = NULL;
2239 int depth;
2240
2064 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2241 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2065 return 0; 2242 return 0;
2066 2243
@@ -2079,6 +2256,25 @@ print_irq_inversion_bug(struct task_struct *curr,
2079 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2256 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2080 2257
2081 printk("\nother info that might help us debug this:\n"); 2258 printk("\nother info that might help us debug this:\n");
2259
2260 /* Find a middle lock (if one exists) */
2261 depth = get_lock_depth(other);
2262 do {
2263 if (depth == 0 && (entry != root)) {
2264 printk("lockdep:%s bad path found in chain graph\n", __func__);
2265 break;
2266 }
2267 middle = entry;
2268 entry = get_lock_parent(entry);
2269 depth--;
2270 } while (entry && entry != root && (depth >= 0));
2271 if (forwards)
2272 print_irq_lock_scenario(root, other,
2273 middle ? middle->class : root->class, other->class);
2274 else
2275 print_irq_lock_scenario(other, root,
2276 middle ? middle->class : other->class, root->class);
2277
2082 lockdep_print_held_locks(curr); 2278 lockdep_print_held_locks(curr);
2083 2279
2084 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 2280 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
@@ -2280,22 +2476,6 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
2280} 2476}
2281 2477
2282/* 2478/*
2283 * Debugging helper: via this flag we know that we are in
2284 * 'early bootup code', and will warn about any invalid irqs-on event:
2285 */
2286static int early_boot_irqs_enabled;
2287
2288void early_boot_irqs_off(void)
2289{
2290 early_boot_irqs_enabled = 0;
2291}
2292
2293void early_boot_irqs_on(void)
2294{
2295 early_boot_irqs_enabled = 1;
2296}
2297
2298/*
2299 * Hardirqs will be enabled: 2479 * Hardirqs will be enabled:
2300 */ 2480 */
2301void trace_hardirqs_on_caller(unsigned long ip) 2481void trace_hardirqs_on_caller(unsigned long ip)
@@ -2307,13 +2487,13 @@ void trace_hardirqs_on_caller(unsigned long ip)
2307 if (unlikely(!debug_locks || current->lockdep_recursion)) 2487 if (unlikely(!debug_locks || current->lockdep_recursion))
2308 return; 2488 return;
2309 2489
2310 if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) 2490 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2311 return; 2491 return;
2312 2492
2313 if (unlikely(curr->hardirqs_enabled)) { 2493 if (unlikely(curr->hardirqs_enabled)) {
2314 /* 2494 /*
2315 * Neither irq nor preemption are disabled here 2495 * Neither irq nor preemption are disabled here
2316 * so this is racy by nature but loosing one hit 2496 * so this is racy by nature but losing one hit
2317 * in a stat is not a big deal. 2497 * in a stat is not a big deal.
2318 */ 2498 */
2319 __debug_atomic_inc(redundant_hardirqs_on); 2499 __debug_atomic_inc(redundant_hardirqs_on);
@@ -2624,7 +2804,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2624 if (!graph_lock()) 2804 if (!graph_lock())
2625 return 0; 2805 return 0;
2626 /* 2806 /*
2627 * Make sure we didnt race: 2807 * Make sure we didn't race:
2628 */ 2808 */
2629 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 2809 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2630 graph_unlock(); 2810 graph_unlock();
@@ -2679,7 +2859,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2679void lockdep_init_map(struct lockdep_map *lock, const char *name, 2859void lockdep_init_map(struct lockdep_map *lock, const char *name,
2680 struct lock_class_key *key, int subclass) 2860 struct lock_class_key *key, int subclass)
2681{ 2861{
2682 lock->class_cache = NULL; 2862 int i;
2863
2864 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2865 lock->class_cache[i] = NULL;
2866
2683#ifdef CONFIG_LOCK_STAT 2867#ifdef CONFIG_LOCK_STAT
2684 lock->cpu = raw_smp_processor_id(); 2868 lock->cpu = raw_smp_processor_id();
2685#endif 2869#endif
@@ -2739,21 +2923,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2739 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2923 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2740 return 0; 2924 return 0;
2741 2925
2742 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2743 debug_locks_off();
2744 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2745 printk("turning off the locking correctness validator.\n");
2746 dump_stack();
2747 return 0;
2748 }
2749
2750 if (lock->key == &__lockdep_no_validate__) 2926 if (lock->key == &__lockdep_no_validate__)
2751 check = 1; 2927 check = 1;
2752 2928
2753 if (!subclass) 2929 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
2754 class = lock->class_cache; 2930 class = lock->class_cache[subclass];
2755 /* 2931 /*
2756 * Not cached yet or subclass? 2932 * Not cached?
2757 */ 2933 */
2758 if (unlikely(!class)) { 2934 if (unlikely(!class)) {
2759 class = register_lock_class(lock, subclass, 0); 2935 class = register_lock_class(lock, subclass, 0);
@@ -2918,7 +3094,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2918 return 1; 3094 return 1;
2919 3095
2920 if (hlock->references) { 3096 if (hlock->references) {
2921 struct lock_class *class = lock->class_cache; 3097 struct lock_class *class = lock->class_cache[0];
2922 3098
2923 if (!class) 3099 if (!class)
2924 class = look_up_lock_class(lock, 0); 3100 class = look_up_lock_class(lock, 0);
@@ -3250,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
3250 int ret = 0; 3426 int ret = 0;
3251 3427
3252 if (unlikely(current->lockdep_recursion)) 3428 if (unlikely(current->lockdep_recursion))
3253 return ret; 3429 return 1; /* avoid false negative lockdep_assert_held() */
3254 3430
3255 raw_local_irq_save(flags); 3431 raw_local_irq_save(flags);
3256 check_flags(flags); 3432 check_flags(flags);
@@ -3559,7 +3735,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3559 if (list_empty(head)) 3735 if (list_empty(head))
3560 continue; 3736 continue;
3561 list_for_each_entry_safe(class, next, head, hash_entry) { 3737 list_for_each_entry_safe(class, next, head, hash_entry) {
3562 if (unlikely(class == lock->class_cache)) { 3738 int match = 0;
3739
3740 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3741 match |= class == lock->class_cache[j];
3742
3743 if (unlikely(match)) {
3563 if (debug_locks_off_graph_unlock()) 3744 if (debug_locks_off_graph_unlock())
3564 WARN_ON(1); 3745 WARN_ON(1);
3565 goto out_restore; 3746 goto out_restore;
@@ -3775,7 +3956,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
3775 * Careful: only use this function if you are sure that 3956 * Careful: only use this function if you are sure that
3776 * the task cannot run in parallel! 3957 * the task cannot run in parallel!
3777 */ 3958 */
3778void __debug_show_held_locks(struct task_struct *task) 3959void debug_show_held_locks(struct task_struct *task)
3779{ 3960{
3780 if (unlikely(!debug_locks)) { 3961 if (unlikely(!debug_locks)) {
3781 printk("INFO: lockdep is turned off.\n"); 3962 printk("INFO: lockdep is turned off.\n");
@@ -3783,12 +3964,6 @@ void __debug_show_held_locks(struct task_struct *task)
3783 } 3964 }
3784 lockdep_print_held_locks(task); 3965 lockdep_print_held_locks(task);
3785} 3966}
3786EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3787
3788void debug_show_held_locks(struct task_struct *task)
3789{
3790 __debug_show_held_locks(task);
3791}
3792EXPORT_SYMBOL_GPL(debug_show_held_locks); 3967EXPORT_SYMBOL_GPL(debug_show_held_locks);
3793 3968
3794void lockdep_sys_exit(void) 3969void lockdep_sys_exit(void)