diff options
| -rw-r--r-- | include/linux/lockdep.h | 53 | ||||
| -rw-r--r-- | kernel/lockdep.c | 247 | ||||
| -rw-r--r-- | kernel/sysctl.c | 22 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 11 |
4 files changed, 333 insertions, 0 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 14c937d345cb..8f946f614f8e 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #define __LINUX_LOCKDEP_H | 9 | #define __LINUX_LOCKDEP_H |
| 10 | 10 | ||
| 11 | struct task_struct; | 11 | struct task_struct; |
| 12 | struct lockdep_map; | ||
| 12 | 13 | ||
| 13 | #ifdef CONFIG_LOCKDEP | 14 | #ifdef CONFIG_LOCKDEP |
| 14 | 15 | ||
| @@ -114,8 +115,32 @@ struct lock_class { | |||
| 114 | 115 | ||
| 115 | const char *name; | 116 | const char *name; |
| 116 | int name_version; | 117 | int name_version; |
| 118 | |||
| 119 | #ifdef CONFIG_LOCK_STAT | ||
| 120 | unsigned long contention_point[4]; | ||
| 121 | #endif | ||
| 122 | }; | ||
| 123 | |||
| 124 | #ifdef CONFIG_LOCK_STAT | ||
| 125 | struct lock_time { | ||
| 126 | s64 min; | ||
| 127 | s64 max; | ||
| 128 | s64 total; | ||
| 129 | unsigned long nr; | ||
| 117 | }; | 130 | }; |
| 118 | 131 | ||
| 132 | struct lock_class_stats { | ||
| 133 | unsigned long contention_point[4]; | ||
| 134 | struct lock_time read_waittime; | ||
| 135 | struct lock_time write_waittime; | ||
| 136 | struct lock_time read_holdtime; | ||
| 137 | struct lock_time write_holdtime; | ||
| 138 | }; | ||
| 139 | |||
| 140 | struct lock_class_stats lock_stats(struct lock_class *class); | ||
| 141 | void clear_lock_stats(struct lock_class *class); | ||
| 142 | #endif | ||
| 143 | |||
| 119 | /* | 144 | /* |
| 120 | * Map the lock object (the lock instance) to the lock-class object. | 145 | * Map the lock object (the lock instance) to the lock-class object. |
| 121 | * This is embedded into specific lock instances: | 146 | * This is embedded into specific lock instances: |
| @@ -165,6 +190,10 @@ struct held_lock { | |||
| 165 | unsigned long acquire_ip; | 190 | unsigned long acquire_ip; |
| 166 | struct lockdep_map *instance; | 191 | struct lockdep_map *instance; |
| 167 | 192 | ||
| 193 | #ifdef CONFIG_LOCK_STAT | ||
| 194 | u64 waittime_stamp; | ||
| 195 | u64 holdtime_stamp; | ||
| 196 | #endif | ||
| 168 | /* | 197 | /* |
| 169 | * The lock-stack is unified in that the lock chains of interrupt | 198 | * The lock-stack is unified in that the lock chains of interrupt |
| 170 | * contexts nest ontop of process context chains, but we 'separate' | 199 | * contexts nest ontop of process context chains, but we 'separate' |
| @@ -281,6 +310,30 @@ struct lock_class_key { }; | |||
| 281 | 310 | ||
| 282 | #endif /* !LOCKDEP */ | 311 | #endif /* !LOCKDEP */ |
| 283 | 312 | ||
| 313 | #ifdef CONFIG_LOCK_STAT | ||
| 314 | |||
| 315 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | ||
| 316 | extern void lock_acquired(struct lockdep_map *lock); | ||
| 317 | |||
| 318 | #define LOCK_CONTENDED(_lock, try, lock) \ | ||
| 319 | do { \ | ||
| 320 | if (!try(_lock)) { \ | ||
| 321 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | ||
| 322 | lock(_lock); \ | ||
| 323 | lock_acquired(&(_lock)->dep_map); \ | ||
| 324 | } \ | ||
| 325 | } while (0) | ||
| 326 | |||
| 327 | #else /* CONFIG_LOCK_STAT */ | ||
| 328 | |||
| 329 | #define lock_contended(lockdep_map, ip) do {} while (0) | ||
| 330 | #define lock_acquired(lockdep_map) do {} while (0) | ||
| 331 | |||
| 332 | #define LOCK_CONTENDED(_lock, try, lock) \ | ||
| 333 | lock(_lock) | ||
| 334 | |||
| 335 | #endif /* CONFIG_LOCK_STAT */ | ||
| 336 | |||
| 284 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) | 337 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) |
| 285 | extern void early_init_irq_lock_class(void); | 338 | extern void early_init_irq_lock_class(void); |
| 286 | #else | 339 | #else |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 87ac36425070..70ca4db28aff 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -42,6 +42,20 @@ | |||
| 42 | 42 | ||
| 43 | #include "lockdep_internals.h" | 43 | #include "lockdep_internals.h" |
| 44 | 44 | ||
| 45 | #ifdef CONFIG_PROVE_LOCKING | ||
| 46 | int prove_locking = 1; | ||
| 47 | module_param(prove_locking, int, 0644); | ||
| 48 | #else | ||
| 49 | #define prove_locking 0 | ||
| 50 | #endif | ||
| 51 | |||
| 52 | #ifdef CONFIG_LOCK_STAT | ||
| 53 | int lock_stat = 1; | ||
| 54 | module_param(lock_stat, int, 0644); | ||
| 55 | #else | ||
| 56 | #define lock_stat 0 | ||
| 57 | #endif | ||
| 58 | |||
| 45 | /* | 59 | /* |
| 46 | * lockdep_lock: protects the lockdep graph, the hashes and the | 60 | * lockdep_lock: protects the lockdep graph, the hashes and the |
| 47 | * class/list/hash allocators. | 61 | * class/list/hash allocators. |
| @@ -104,6 +118,70 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | |||
| 104 | unsigned long nr_lock_classes; | 118 | unsigned long nr_lock_classes; |
| 105 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 119 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
| 106 | 120 | ||
| 121 | #ifdef CONFIG_LOCK_STAT | ||
| 122 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | ||
| 123 | |||
| 124 | static int lock_contention_point(struct lock_class *class, unsigned long ip) | ||
| 125 | { | ||
| 126 | int i; | ||
| 127 | |||
| 128 | for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { | ||
| 129 | if (class->contention_point[i] == 0) { | ||
| 130 | class->contention_point[i] = ip; | ||
| 131 | break; | ||
| 132 | } | ||
| 133 | if (class->contention_point[i] == ip) | ||
| 134 | break; | ||
| 135 | } | ||
| 136 | |||
| 137 | return i; | ||
| 138 | } | ||
| 139 | |||
| 140 | static void lock_time_inc(struct lock_time *lt, s64 time) | ||
| 141 | { | ||
| 142 | if (time > lt->max) | ||
| 143 | lt->max = time; | ||
| 144 | |||
| 145 | if (time < lt->min || !lt->min) | ||
| 146 | lt->min = time; | ||
| 147 | |||
| 148 | lt->total += time; | ||
| 149 | lt->nr++; | ||
| 150 | } | ||
| 151 | |||
| 152 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) | ||
| 153 | { | ||
| 154 | return &get_cpu_var(lock_stats)[class - lock_classes]; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void put_lock_stats(struct lock_class_stats *stats) | ||
| 158 | { | ||
| 159 | put_cpu_var(lock_stats); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void lock_release_holdtime(struct held_lock *hlock) | ||
| 163 | { | ||
| 164 | struct lock_class_stats *stats; | ||
| 165 | s64 holdtime; | ||
| 166 | |||
| 167 | if (!lock_stat) | ||
| 168 | return; | ||
| 169 | |||
| 170 | holdtime = sched_clock() - hlock->holdtime_stamp; | ||
| 171 | |||
| 172 | stats = get_lock_stats(hlock->class); | ||
| 173 | if (hlock->read) | ||
| 174 | lock_time_inc(&stats->read_holdtime, holdtime); | ||
| 175 | else | ||
| 176 | lock_time_inc(&stats->write_holdtime, holdtime); | ||
| 177 | put_lock_stats(stats); | ||
| 178 | } | ||
| 179 | #else | ||
| 180 | static inline void lock_release_holdtime(struct held_lock *hlock) | ||
| 181 | { | ||
| 182 | } | ||
| 183 | #endif | ||
| 184 | |||
| 107 | /* | 185 | /* |
| 108 | * We keep a global list of all lock classes. The list only grows, | 186 | * We keep a global list of all lock classes. The list only grows, |
| 109 | * never shrinks. The list is only accessed with the lockdep | 187 | * never shrinks. The list is only accessed with the lockdep |
| @@ -2221,6 +2299,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2221 | int chain_head = 0; | 2299 | int chain_head = 0; |
| 2222 | u64 chain_key; | 2300 | u64 chain_key; |
| 2223 | 2301 | ||
| 2302 | if (!prove_locking) | ||
| 2303 | check = 1; | ||
| 2304 | |||
| 2224 | if (unlikely(!debug_locks)) | 2305 | if (unlikely(!debug_locks)) |
| 2225 | return 0; | 2306 | return 0; |
| 2226 | 2307 | ||
| @@ -2271,6 +2352,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2271 | hlock->read = read; | 2352 | hlock->read = read; |
| 2272 | hlock->check = check; | 2353 | hlock->check = check; |
| 2273 | hlock->hardirqs_off = hardirqs_off; | 2354 | hlock->hardirqs_off = hardirqs_off; |
| 2355 | #ifdef CONFIG_LOCK_STAT | ||
| 2356 | hlock->waittime_stamp = 0; | ||
| 2357 | hlock->holdtime_stamp = sched_clock(); | ||
| 2358 | #endif | ||
| 2274 | 2359 | ||
| 2275 | if (check == 2 && !mark_irqflags(curr, hlock)) | 2360 | if (check == 2 && !mark_irqflags(curr, hlock)) |
| 2276 | return 0; | 2361 | return 0; |
| @@ -2411,6 +2496,8 @@ lock_release_non_nested(struct task_struct *curr, | |||
| 2411 | return print_unlock_inbalance_bug(curr, lock, ip); | 2496 | return print_unlock_inbalance_bug(curr, lock, ip); |
| 2412 | 2497 | ||
| 2413 | found_it: | 2498 | found_it: |
| 2499 | lock_release_holdtime(hlock); | ||
| 2500 | |||
| 2414 | /* | 2501 | /* |
| 2415 | * We have the right lock to unlock, 'hlock' points to it. | 2502 | * We have the right lock to unlock, 'hlock' points to it. |
| 2416 | * Now we remove it from the stack, and add back the other | 2503 | * Now we remove it from the stack, and add back the other |
| @@ -2463,6 +2550,8 @@ static int lock_release_nested(struct task_struct *curr, | |||
| 2463 | 2550 | ||
| 2464 | curr->curr_chain_key = hlock->prev_chain_key; | 2551 | curr->curr_chain_key = hlock->prev_chain_key; |
| 2465 | 2552 | ||
| 2553 | lock_release_holdtime(hlock); | ||
| 2554 | |||
| 2466 | #ifdef CONFIG_DEBUG_LOCKDEP | 2555 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 2467 | hlock->prev_chain_key = 0; | 2556 | hlock->prev_chain_key = 0; |
| 2468 | hlock->class = NULL; | 2557 | hlock->class = NULL; |
| @@ -2537,6 +2626,9 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2537 | { | 2626 | { |
| 2538 | unsigned long flags; | 2627 | unsigned long flags; |
| 2539 | 2628 | ||
| 2629 | if (unlikely(!lock_stat && !prove_locking)) | ||
| 2630 | return; | ||
| 2631 | |||
| 2540 | if (unlikely(current->lockdep_recursion)) | 2632 | if (unlikely(current->lockdep_recursion)) |
| 2541 | return; | 2633 | return; |
| 2542 | 2634 | ||
| @@ -2556,6 +2648,9 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
| 2556 | { | 2648 | { |
| 2557 | unsigned long flags; | 2649 | unsigned long flags; |
| 2558 | 2650 | ||
| 2651 | if (unlikely(!lock_stat && !prove_locking)) | ||
| 2652 | return; | ||
| 2653 | |||
| 2559 | if (unlikely(current->lockdep_recursion)) | 2654 | if (unlikely(current->lockdep_recursion)) |
| 2560 | return; | 2655 | return; |
| 2561 | 2656 | ||
| @@ -2569,6 +2664,158 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
| 2569 | 2664 | ||
| 2570 | EXPORT_SYMBOL_GPL(lock_release); | 2665 | EXPORT_SYMBOL_GPL(lock_release); |
| 2571 | 2666 | ||
| 2667 | #ifdef CONFIG_LOCK_STAT | ||
| 2668 | static int | ||
| 2669 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | ||
| 2670 | unsigned long ip) | ||
| 2671 | { | ||
| 2672 | if (!debug_locks_off()) | ||
| 2673 | return 0; | ||
| 2674 | if (debug_locks_silent) | ||
| 2675 | return 0; | ||
| 2676 | |||
| 2677 | printk("\n=================================\n"); | ||
| 2678 | printk( "[ BUG: bad contention detected! ]\n"); | ||
| 2679 | printk( "---------------------------------\n"); | ||
| 2680 | printk("%s/%d is trying to contend lock (", | ||
| 2681 | curr->comm, curr->pid); | ||
| 2682 | print_lockdep_cache(lock); | ||
| 2683 | printk(") at:\n"); | ||
| 2684 | print_ip_sym(ip); | ||
| 2685 | printk("but there are no locks held!\n"); | ||
| 2686 | printk("\nother info that might help us debug this:\n"); | ||
| 2687 | lockdep_print_held_locks(curr); | ||
| 2688 | |||
| 2689 | printk("\nstack backtrace:\n"); | ||
| 2690 | dump_stack(); | ||
| 2691 | |||
| 2692 | return 0; | ||
| 2693 | } | ||
| 2694 | |||
| 2695 | static void | ||
| 2696 | __lock_contended(struct lockdep_map *lock, unsigned long ip) | ||
| 2697 | { | ||
| 2698 | struct task_struct *curr = current; | ||
| 2699 | struct held_lock *hlock, *prev_hlock; | ||
| 2700 | struct lock_class_stats *stats; | ||
| 2701 | unsigned int depth; | ||
| 2702 | int i, point; | ||
| 2703 | |||
| 2704 | depth = curr->lockdep_depth; | ||
| 2705 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
| 2706 | return; | ||
| 2707 | |||
| 2708 | prev_hlock = NULL; | ||
| 2709 | for (i = depth-1; i >= 0; i--) { | ||
| 2710 | hlock = curr->held_locks + i; | ||
| 2711 | /* | ||
| 2712 | * We must not cross into another context: | ||
| 2713 | */ | ||
| 2714 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
| 2715 | break; | ||
| 2716 | if (hlock->instance == lock) | ||
| 2717 | goto found_it; | ||
| 2718 | prev_hlock = hlock; | ||
| 2719 | } | ||
| 2720 | print_lock_contention_bug(curr, lock, ip); | ||
| 2721 | return; | ||
| 2722 | |||
| 2723 | found_it: | ||
| 2724 | hlock->waittime_stamp = sched_clock(); | ||
| 2725 | |||
| 2726 | point = lock_contention_point(hlock->class, ip); | ||
| 2727 | |||
| 2728 | stats = get_lock_stats(hlock->class); | ||
| 2729 | if (point < ARRAY_SIZE(stats->contention_point)) | ||
| 2730 | stats->contention_point[i]++; | ||
| 2731 | put_lock_stats(stats); | ||
| 2732 | } | ||
| 2733 | |||
| 2734 | static void | ||
| 2735 | __lock_acquired(struct lockdep_map *lock) | ||
| 2736 | { | ||
| 2737 | struct task_struct *curr = current; | ||
| 2738 | struct held_lock *hlock, *prev_hlock; | ||
| 2739 | struct lock_class_stats *stats; | ||
| 2740 | unsigned int depth; | ||
| 2741 | u64 now; | ||
| 2742 | s64 waittime; | ||
| 2743 | int i; | ||
| 2744 | |||
| 2745 | depth = curr->lockdep_depth; | ||
| 2746 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
| 2747 | return; | ||
| 2748 | |||
| 2749 | prev_hlock = NULL; | ||
| 2750 | for (i = depth-1; i >= 0; i--) { | ||
| 2751 | hlock = curr->held_locks + i; | ||
| 2752 | /* | ||
| 2753 | * We must not cross into another context: | ||
| 2754 | */ | ||
| 2755 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
| 2756 | break; | ||
| 2757 | if (hlock->instance == lock) | ||
| 2758 | goto found_it; | ||
| 2759 | prev_hlock = hlock; | ||
| 2760 | } | ||
| 2761 | print_lock_contention_bug(curr, lock, _RET_IP_); | ||
| 2762 | return; | ||
| 2763 | |||
| 2764 | found_it: | ||
| 2765 | if (!hlock->waittime_stamp) | ||
| 2766 | return; | ||
| 2767 | |||
| 2768 | now = sched_clock(); | ||
| 2769 | waittime = now - hlock->waittime_stamp; | ||
| 2770 | hlock->holdtime_stamp = now; | ||
| 2771 | |||
| 2772 | stats = get_lock_stats(hlock->class); | ||
| 2773 | if (hlock->read) | ||
| 2774 | lock_time_inc(&stats->read_waittime, waittime); | ||
| 2775 | else | ||
| 2776 | lock_time_inc(&stats->write_waittime, waittime); | ||
| 2777 | put_lock_stats(stats); | ||
| 2778 | } | ||
| 2779 | |||
| 2780 | void lock_contended(struct lockdep_map *lock, unsigned long ip) | ||
| 2781 | { | ||
| 2782 | unsigned long flags; | ||
| 2783 | |||
| 2784 | if (unlikely(!lock_stat)) | ||
| 2785 | return; | ||
| 2786 | |||
| 2787 | if (unlikely(current->lockdep_recursion)) | ||
| 2788 | return; | ||
| 2789 | |||
| 2790 | raw_local_irq_save(flags); | ||
| 2791 | check_flags(flags); | ||
| 2792 | current->lockdep_recursion = 1; | ||
| 2793 | __lock_contended(lock, ip); | ||
| 2794 | current->lockdep_recursion = 0; | ||
| 2795 | raw_local_irq_restore(flags); | ||
| 2796 | } | ||
| 2797 | EXPORT_SYMBOL_GPL(lock_contended); | ||
| 2798 | |||
| 2799 | void lock_acquired(struct lockdep_map *lock) | ||
| 2800 | { | ||
| 2801 | unsigned long flags; | ||
| 2802 | |||
| 2803 | if (unlikely(!lock_stat)) | ||
| 2804 | return; | ||
| 2805 | |||
| 2806 | if (unlikely(current->lockdep_recursion)) | ||
| 2807 | return; | ||
| 2808 | |||
| 2809 | raw_local_irq_save(flags); | ||
| 2810 | check_flags(flags); | ||
| 2811 | current->lockdep_recursion = 1; | ||
| 2812 | __lock_acquired(lock); | ||
| 2813 | current->lockdep_recursion = 0; | ||
| 2814 | raw_local_irq_restore(flags); | ||
| 2815 | } | ||
| 2816 | EXPORT_SYMBOL_GPL(lock_acquired); | ||
| 2817 | #endif | ||
| 2818 | |||
| 2572 | /* | 2819 | /* |
| 2573 | * Used by the testsuite, sanitize the validator state | 2820 | * Used by the testsuite, sanitize the validator state |
| 2574 | * after a simulated failure: | 2821 | * after a simulated failure: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 2aaa3f98185d..e69179b1809c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -161,6 +161,8 @@ extern ctl_table inotify_table[]; | |||
| 161 | int sysctl_legacy_va_layout; | 161 | int sysctl_legacy_va_layout; |
| 162 | #endif | 162 | #endif |
| 163 | 163 | ||
| 164 | extern int prove_locking; | ||
| 165 | extern int lock_stat; | ||
| 164 | 166 | ||
| 165 | /* The default sysctl tables: */ | 167 | /* The default sysctl tables: */ |
| 166 | 168 | ||
| @@ -282,6 +284,26 @@ static ctl_table kern_table[] = { | |||
| 282 | .mode = 0644, | 284 | .mode = 0644, |
| 283 | .proc_handler = &proc_dointvec, | 285 | .proc_handler = &proc_dointvec, |
| 284 | }, | 286 | }, |
| 287 | #ifdef CONFIG_PROVE_LOCKING | ||
| 288 | { | ||
| 289 | .ctl_name = CTL_UNNUMBERED, | ||
| 290 | .procname = "prove_locking", | ||
| 291 | .data = &prove_locking, | ||
| 292 | .maxlen = sizeof(int), | ||
| 293 | .mode = 0644, | ||
| 294 | .proc_handler = &proc_dointvec, | ||
| 295 | }, | ||
| 296 | #endif | ||
| 297 | #ifdef CONFIG_LOCK_STAT | ||
| 298 | { | ||
| 299 | .ctl_name = CTL_UNNUMBERED, | ||
| 300 | .procname = "lock_stat", | ||
| 301 | .data = &lock_stat, | ||
| 302 | .maxlen = sizeof(int), | ||
| 303 | .mode = 0644, | ||
| 304 | .proc_handler = &proc_dointvec, | ||
| 305 | }, | ||
| 306 | #endif | ||
| 285 | { | 307 | { |
| 286 | .ctl_name = CTL_UNNUMBERED, | 308 | .ctl_name = CTL_UNNUMBERED, |
| 287 | .procname = "sched_features", | 309 | .procname = "sched_features", |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 640844024ffd..f3e0c2abcbd0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -283,6 +283,17 @@ config LOCKDEP | |||
| 283 | select KALLSYMS | 283 | select KALLSYMS |
| 284 | select KALLSYMS_ALL | 284 | select KALLSYMS_ALL |
| 285 | 285 | ||
| 286 | config LOCK_STAT | ||
| 287 | bool "Lock usage statisitics" | ||
| 288 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | ||
| 289 | select LOCKDEP | ||
| 290 | select DEBUG_SPINLOCK | ||
| 291 | select DEBUG_MUTEXES | ||
| 292 | select DEBUG_LOCK_ALLOC | ||
| 293 | default n | ||
| 294 | help | ||
| 295 | This feature enables tracking lock contention points | ||
| 296 | |||
| 286 | config DEBUG_LOCKDEP | 297 | config DEBUG_LOCKDEP |
| 287 | bool "Lock dependency engine debugging" | 298 | bool "Lock dependency engine debugging" |
| 288 | depends on DEBUG_KERNEL && LOCKDEP | 299 | depends on DEBUG_KERNEL && LOCKDEP |
