aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-09-01 19:30:29 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-18 07:58:57 -0400
commit0119fee449f501d55924914a90f152540dd4ef9a (patch)
tree2ed9f5cd1fbdbaf6b237426ec4452d0e7ecf3a08 /kernel/lockdep.c
parentcb475de3d12df6912bc95048202ae8c280d4cad5 (diff)
lockdep: Comment all warnings
Andrew requested I comment all the lockdep WARN()s to help other people figure out wth is wrong.. Requested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1315301493.3191.9.camel@twins Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c156
1 files changed, 147 insertions, 9 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 91d67ce3a8d5..c081fa967c8f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -96,8 +96,13 @@ static int graph_lock(void)
96 96
97static inline int graph_unlock(void) 97static inline int graph_unlock(void)
98{ 98{
99 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) 99 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
100 /*
101 * The lockdep graph lock isn't locked while we expect it to
102 * be, we're confused now, bye!
103 */
100 return DEBUG_LOCKS_WARN_ON(1); 104 return DEBUG_LOCKS_WARN_ON(1);
105 }
101 106
102 current->lockdep_recursion--; 107 current->lockdep_recursion--;
103 arch_spin_unlock(&lockdep_lock); 108 arch_spin_unlock(&lockdep_lock);
@@ -134,6 +139,9 @@ static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
134static inline struct lock_class *hlock_class(struct held_lock *hlock) 139static inline struct lock_class *hlock_class(struct held_lock *hlock)
135{ 140{
136 if (!hlock->class_idx) { 141 if (!hlock->class_idx) {
142 /*
143 * Someone passed in garbage, we give up.
144 */
137 DEBUG_LOCKS_WARN_ON(1); 145 DEBUG_LOCKS_WARN_ON(1);
138 return NULL; 146 return NULL;
139 } 147 }
@@ -687,6 +695,10 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
687 */ 695 */
688 list_for_each_entry(class, hash_head, hash_entry) { 696 list_for_each_entry(class, hash_head, hash_entry) {
689 if (class->key == key) { 697 if (class->key == key) {
698 /*
699 * Huh! same key, different name? Did someone trample
700 * on some memory? We're most confused.
701 */
690 WARN_ON_ONCE(class->name != lock->name); 702 WARN_ON_ONCE(class->name != lock->name);
691 return class; 703 return class;
692 } 704 }
@@ -800,6 +812,10 @@ out_unlock_set:
800 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 812 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
801 lock->class_cache[subclass] = class; 813 lock->class_cache[subclass] = class;
802 814
815 /*
816 * Hash collision, did we smoke some? We found a class with a matching
817 * hash but the subclass -- which is hashed in -- didn't match.
818 */
803 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 819 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
804 return NULL; 820 return NULL;
805 821
@@ -926,7 +942,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
926 unsigned long nr; 942 unsigned long nr;
927 943
928 nr = lock - list_entries; 944 nr = lock - list_entries;
929 WARN_ON(nr >= nr_list_entries); 945 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
930 lock->parent = parent; 946 lock->parent = parent;
931 lock->class->dep_gen_id = lockdep_dependency_gen_id; 947 lock->class->dep_gen_id = lockdep_dependency_gen_id;
932} 948}
@@ -936,7 +952,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
936 unsigned long nr; 952 unsigned long nr;
937 953
938 nr = lock - list_entries; 954 nr = lock - list_entries;
939 WARN_ON(nr >= nr_list_entries); 955 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
940 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 956 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
941} 957}
942 958
@@ -1196,6 +1212,9 @@ static noinline int print_bfs_bug(int ret)
1196 if (!debug_locks_off_graph_unlock()) 1212 if (!debug_locks_off_graph_unlock())
1197 return 0; 1213 return 0;
1198 1214
1215 /*
1216 * Breadth-first-search failed, graph got corrupted?
1217 */
1199 WARN(1, "lockdep bfs error:%d\n", ret); 1218 WARN(1, "lockdep bfs error:%d\n", ret);
1200 1219
1201 return 0; 1220 return 0;
@@ -1944,6 +1963,11 @@ out_bug:
1944 if (!debug_locks_off_graph_unlock()) 1963 if (!debug_locks_off_graph_unlock())
1945 return 0; 1964 return 0;
1946 1965
1966 /*
1967 * Clearly we all shouldn't be here, but since we made it we
1968 * can reliable say we messed up our state. See the above two
1969 * gotos for reasons why we could possibly end up here.
1970 */
1947 WARN_ON(1); 1971 WARN_ON(1);
1948 1972
1949 return 0; 1973 return 0;
@@ -1975,6 +1999,11 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1975 struct held_lock *hlock_curr, *hlock_next; 1999 struct held_lock *hlock_curr, *hlock_next;
1976 int i, j; 2000 int i, j;
1977 2001
2002 /*
2003 * We might need to take the graph lock, ensure we've got IRQs
2004 * disabled to make this an IRQ-safe lock.. for recursion reasons
2005 * lockdep won't complain about its own locking errors.
2006 */
1978 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2007 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1979 return 0; 2008 return 0;
1980 /* 2009 /*
@@ -2126,6 +2155,10 @@ static void check_chain_key(struct task_struct *curr)
2126 hlock = curr->held_locks + i; 2155 hlock = curr->held_locks + i;
2127 if (chain_key != hlock->prev_chain_key) { 2156 if (chain_key != hlock->prev_chain_key) {
2128 debug_locks_off(); 2157 debug_locks_off();
2158 /*
2159 * We got mighty confused, our chain keys don't match
2160 * with what we expect, someone trample on our task state?
2161 */
2129 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2162 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2130 curr->lockdep_depth, i, 2163 curr->lockdep_depth, i,
2131 (unsigned long long)chain_key, 2164 (unsigned long long)chain_key,
@@ -2133,6 +2166,9 @@ static void check_chain_key(struct task_struct *curr)
2133 return; 2166 return;
2134 } 2167 }
2135 id = hlock->class_idx - 1; 2168 id = hlock->class_idx - 1;
2169 /*
2170 * Whoops ran out of static storage again?
2171 */
2136 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 2172 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2137 return; 2173 return;
2138 2174
@@ -2144,6 +2180,10 @@ static void check_chain_key(struct task_struct *curr)
2144 } 2180 }
2145 if (chain_key != curr->curr_chain_key) { 2181 if (chain_key != curr->curr_chain_key) {
2146 debug_locks_off(); 2182 debug_locks_off();
2183 /*
2184 * More smoking hash instead of calculating it, damn see these
2185 * numbers float.. I bet that a pink elephant stepped on my memory.
2186 */
2147 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2187 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2148 curr->lockdep_depth, i, 2188 curr->lockdep_depth, i,
2149 (unsigned long long)chain_key, 2189 (unsigned long long)chain_key,
@@ -2525,12 +2565,24 @@ void trace_hardirqs_on_caller(unsigned long ip)
2525 return; 2565 return;
2526 } 2566 }
2527 2567
2568 /*
2569 * We're enabling irqs and according to our state above irqs weren't
2570 * already enabled, yet we find the hardware thinks they are in fact
2571 * enabled.. someone messed up their IRQ state tracing.
2572 */
2528 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2573 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2529 return; 2574 return;
2530 2575
2576 /*
2577 * See the fine text that goes along with this variable definition.
2578 */
2531 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 2579 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2532 return; 2580 return;
2533 2581
2582 /*
2583 * Can't allow enabling interrupts while in an interrupt handler,
2584 * that's general bad form and such. Recursion, limited stack etc..
2585 */
2534 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 2586 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2535 return; 2587 return;
2536 2588
@@ -2558,6 +2610,10 @@ void trace_hardirqs_off_caller(unsigned long ip)
2558 if (unlikely(!debug_locks || current->lockdep_recursion)) 2610 if (unlikely(!debug_locks || current->lockdep_recursion))
2559 return; 2611 return;
2560 2612
2613 /*
2614 * So we're supposed to get called after you mask local IRQs, but for
2615 * some reason the hardware doesn't quite think you did a proper job.
2616 */
2561 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2617 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2562 return; 2618 return;
2563 2619
@@ -2590,6 +2646,10 @@ void trace_softirqs_on(unsigned long ip)
2590 if (unlikely(!debug_locks || current->lockdep_recursion)) 2646 if (unlikely(!debug_locks || current->lockdep_recursion))
2591 return; 2647 return;
2592 2648
2649 /*
2650 * We fancy IRQs being disabled here, see softirq.c, avoids
2651 * funny state and nesting things.
2652 */
2593 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2653 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2594 return; 2654 return;
2595 2655
@@ -2626,6 +2686,9 @@ void trace_softirqs_off(unsigned long ip)
2626 if (unlikely(!debug_locks || current->lockdep_recursion)) 2686 if (unlikely(!debug_locks || current->lockdep_recursion))
2627 return; 2687 return;
2628 2688
2689 /*
2690 * We fancy IRQs being disabled here, see softirq.c
2691 */
2629 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2692 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2630 return; 2693 return;
2631 2694
@@ -2637,6 +2700,9 @@ void trace_softirqs_off(unsigned long ip)
2637 curr->softirq_disable_ip = ip; 2700 curr->softirq_disable_ip = ip;
2638 curr->softirq_disable_event = ++curr->irq_events; 2701 curr->softirq_disable_event = ++curr->irq_events;
2639 debug_atomic_inc(softirqs_off_events); 2702 debug_atomic_inc(softirqs_off_events);
2703 /*
2704 * Whoops, we wanted softirqs off, so why aren't they?
2705 */
2640 DEBUG_LOCKS_WARN_ON(!softirq_count()); 2706 DEBUG_LOCKS_WARN_ON(!softirq_count());
2641 } else 2707 } else
2642 debug_atomic_inc(redundant_softirqs_off); 2708 debug_atomic_inc(redundant_softirqs_off);
@@ -2661,6 +2727,9 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2661 if (!(gfp_mask & __GFP_FS)) 2727 if (!(gfp_mask & __GFP_FS))
2662 return; 2728 return;
2663 2729
2730 /*
2731 * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2732 */
2664 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) 2733 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2665 return; 2734 return;
2666 2735
@@ -2773,13 +2842,13 @@ static int separate_irq_context(struct task_struct *curr,
2773 return 0; 2842 return 0;
2774} 2843}
2775 2844
2776#else 2845#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2777 2846
2778static inline 2847static inline
2779int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2848int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2780 enum lock_usage_bit new_bit) 2849 enum lock_usage_bit new_bit)
2781{ 2850{
2782 WARN_ON(1); 2851 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2783 return 1; 2852 return 1;
2784} 2853}
2785 2854
@@ -2799,7 +2868,7 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
2799{ 2868{
2800} 2869}
2801 2870
2802#endif 2871#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2803 2872
2804/* 2873/*
2805 * Mark a lock with a usage bit, and validate the state transition: 2874 * Mark a lock with a usage bit, and validate the state transition:
@@ -2880,6 +2949,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2880 lock->cpu = raw_smp_processor_id(); 2949 lock->cpu = raw_smp_processor_id();
2881#endif 2950#endif
2882 2951
2952 /*
2953 * Can't be having no nameless bastards around this place!
2954 */
2883 if (DEBUG_LOCKS_WARN_ON(!name)) { 2955 if (DEBUG_LOCKS_WARN_ON(!name)) {
2884 lock->name = "NULL"; 2956 lock->name = "NULL";
2885 return; 2957 return;
@@ -2887,6 +2959,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2887 2959
2888 lock->name = name; 2960 lock->name = name;
2889 2961
2962 /*
2963 * No key, no joy, we need to hash something.
2964 */
2890 if (DEBUG_LOCKS_WARN_ON(!key)) 2965 if (DEBUG_LOCKS_WARN_ON(!key))
2891 return; 2966 return;
2892 /* 2967 /*
@@ -2894,6 +2969,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2894 */ 2969 */
2895 if (!static_obj(key)) { 2970 if (!static_obj(key)) {
2896 printk("BUG: key %p not in .data!\n", key); 2971 printk("BUG: key %p not in .data!\n", key);
2972 /*
2973 * What it says above ^^^^^, I suggest you read it.
2974 */
2897 DEBUG_LOCKS_WARN_ON(1); 2975 DEBUG_LOCKS_WARN_ON(1);
2898 return; 2976 return;
2899 } 2977 }
@@ -2932,6 +3010,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2932 if (unlikely(!debug_locks)) 3010 if (unlikely(!debug_locks))
2933 return 0; 3011 return 0;
2934 3012
3013 /*
3014 * Lockdep should run with IRQs disabled, otherwise we could
3015 * get an interrupt which would want to take locks, which would
3016 * end up in lockdep and have you got a head-ache already?
3017 */
2935 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3018 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2936 return 0; 3019 return 0;
2937 3020
@@ -2963,6 +3046,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2963 * dependency checks are done) 3046 * dependency checks are done)
2964 */ 3047 */
2965 depth = curr->lockdep_depth; 3048 depth = curr->lockdep_depth;
3049 /*
3050 * Ran out of static storage for our per-task lock stack again have we?
3051 */
2966 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3052 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2967 return 0; 3053 return 0;
2968 3054
@@ -2981,6 +3067,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2981 } 3067 }
2982 3068
2983 hlock = curr->held_locks + depth; 3069 hlock = curr->held_locks + depth;
3070 /*
3071 * Plain impossible, we just registered it and checked it weren't no
3072 * NULL like.. I bet this mushroom I ate was good!
3073 */
2984 if (DEBUG_LOCKS_WARN_ON(!class)) 3074 if (DEBUG_LOCKS_WARN_ON(!class))
2985 return 0; 3075 return 0;
2986 hlock->class_idx = class_idx; 3076 hlock->class_idx = class_idx;
@@ -3015,11 +3105,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3015 * the hash, not class->key. 3105 * the hash, not class->key.
3016 */ 3106 */
3017 id = class - lock_classes; 3107 id = class - lock_classes;
3108 /*
3109 * Whoops, we did it again.. ran straight out of our static allocation.
3110 */
3018 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 3111 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3019 return 0; 3112 return 0;
3020 3113
3021 chain_key = curr->curr_chain_key; 3114 chain_key = curr->curr_chain_key;
3022 if (!depth) { 3115 if (!depth) {
3116 /*
3117 * How can we have a chain hash when we ain't got no keys?!
3118 */
3023 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3119 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3024 return 0; 3120 return 0;
3025 chain_head = 1; 3121 chain_head = 1;
@@ -3091,6 +3187,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3091{ 3187{
3092 if (unlikely(!debug_locks)) 3188 if (unlikely(!debug_locks))
3093 return 0; 3189 return 0;
3190 /*
3191 * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
3192 */
3094 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3193 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3095 return 0; 3194 return 0;
3096 3195
@@ -3120,6 +3219,11 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3120 if (!class) 3219 if (!class)
3121 return 0; 3220 return 0;
3122 3221
3222 /*
3223 * References, but not a lock we're actually ref-counting?
3224 * State got messed up, follow the sites that change ->references
3225 * and try to make sense of it.
3226 */
3123 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3227 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3124 return 0; 3228 return 0;
3125 3229
@@ -3142,6 +3246,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
3142 int i; 3246 int i;
3143 3247
3144 depth = curr->lockdep_depth; 3248 depth = curr->lockdep_depth;
3249 /*
3250 * This function is about (re)setting the class of a held lock,
3251 * yet we're not actually holding any locks. Naughty user!
3252 */
3145 if (DEBUG_LOCKS_WARN_ON(!depth)) 3253 if (DEBUG_LOCKS_WARN_ON(!depth))
3146 return 0; 3254 return 0;
3147 3255
@@ -3177,6 +3285,10 @@ found_it:
3177 return 0; 3285 return 0;
3178 } 3286 }
3179 3287
3288 /*
3289 * I took it apart and put it back together again, except now I have
3290 * these 'spare' parts.. where shall I put them.
3291 */
3180 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3292 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3181 return 0; 3293 return 0;
3182 return 1; 3294 return 1;
@@ -3201,6 +3313,10 @@ lock_release_non_nested(struct task_struct *curr,
3201 * of held locks: 3313 * of held locks:
3202 */ 3314 */
3203 depth = curr->lockdep_depth; 3315 depth = curr->lockdep_depth;
3316 /*
3317 * So we're all set to release this lock.. wait what lock? We don't
3318 * own any locks, you've been drinking again?
3319 */
3204 if (DEBUG_LOCKS_WARN_ON(!depth)) 3320 if (DEBUG_LOCKS_WARN_ON(!depth))
3205 return 0; 3321 return 0;
3206 3322
@@ -3253,6 +3369,10 @@ found_it:
3253 return 0; 3369 return 0;
3254 } 3370 }
3255 3371
3372 /*
3373 * We had N bottles of beer on the wall, we drank one, but now
3374 * there's not N-1 bottles of beer left on the wall...
3375 */
3256 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3376 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3257 return 0; 3377 return 0;
3258 return 1; 3378 return 1;
@@ -3283,6 +3403,9 @@ static int lock_release_nested(struct task_struct *curr,
3283 return lock_release_non_nested(curr, lock, ip); 3403 return lock_release_non_nested(curr, lock, ip);
3284 curr->lockdep_depth--; 3404 curr->lockdep_depth--;
3285 3405
3406 /*
3407 * No more locks, but somehow we've got hash left over, who left it?
3408 */
3286 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) 3409 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3287 return 0; 3410 return 0;
3288 3411
@@ -3365,10 +3488,13 @@ static void check_flags(unsigned long flags)
3365 * check if not in hardirq contexts: 3488 * check if not in hardirq contexts:
3366 */ 3489 */
3367 if (!hardirq_count()) { 3490 if (!hardirq_count()) {
3368 if (softirq_count()) 3491 if (softirq_count()) {
3492 /* like the above, but with softirqs */
3369 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 3493 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3370 else 3494 } else {
3495 /* lick the above, does it taste good? */
3371 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 3496 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3497 }
3372 } 3498 }
3373 3499
3374 if (!debug_locks) 3500 if (!debug_locks)
@@ -3506,6 +3632,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3506 int i, contention_point, contending_point; 3632 int i, contention_point, contending_point;
3507 3633
3508 depth = curr->lockdep_depth; 3634 depth = curr->lockdep_depth;
3635 /*
3636 * Whee, we contended on this lock, except it seems we're not
3637 * actually trying to acquire anything much at all..
3638 */
3509 if (DEBUG_LOCKS_WARN_ON(!depth)) 3639 if (DEBUG_LOCKS_WARN_ON(!depth))
3510 return; 3640 return;
3511 3641
@@ -3555,6 +3685,10 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3555 int i, cpu; 3685 int i, cpu;
3556 3686
3557 depth = curr->lockdep_depth; 3687 depth = curr->lockdep_depth;
3688 /*
3689 * Yay, we acquired ownership of this lock we didn't try to
3690 * acquire, how the heck did that happen?
3691 */
3558 if (DEBUG_LOCKS_WARN_ON(!depth)) 3692 if (DEBUG_LOCKS_WARN_ON(!depth))
3559 return; 3693 return;
3560 3694
@@ -3759,8 +3893,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3759 match |= class == lock->class_cache[j]; 3893 match |= class == lock->class_cache[j];
3760 3894
3761 if (unlikely(match)) { 3895 if (unlikely(match)) {
3762 if (debug_locks_off_graph_unlock()) 3896 if (debug_locks_off_graph_unlock()) {
3897 /*
3898 * We all just reset everything, how did it match?
3899 */
3763 WARN_ON(1); 3900 WARN_ON(1);
3901 }
3764 goto out_restore; 3902 goto out_restore;
3765 } 3903 }
3766 } 3904 }