summaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c116
1 files changed, 30 insertions, 86 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index dd13f865ad40..1efada2dd9dd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -138,7 +138,7 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
138 * get freed - this significantly simplifies the debugging code. 138 * get freed - this significantly simplifies the debugging code.
139 */ 139 */
140unsigned long nr_lock_classes; 140unsigned long nr_lock_classes;
141static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 141struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
142 142
143static inline struct lock_class *hlock_class(struct held_lock *hlock) 143static inline struct lock_class *hlock_class(struct held_lock *hlock)
144{ 144{
@@ -1391,7 +1391,9 @@ static void print_lock_class_header(struct lock_class *class, int depth)
1391 1391
1392 printk("%*s->", depth, ""); 1392 printk("%*s->", depth, "");
1393 print_lock_name(class); 1393 print_lock_name(class);
1394 printk(KERN_CONT " ops: %lu", class->ops); 1394#ifdef CONFIG_DEBUG_LOCKDEP
1395 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1396#endif
1395 printk(KERN_CONT " {\n"); 1397 printk(KERN_CONT " {\n");
1396 1398
1397 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { 1399 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
@@ -2148,76 +2150,6 @@ static int check_no_collision(struct task_struct *curr,
2148} 2150}
2149 2151
2150/* 2152/*
2151 * This is for building a chain between just two different classes,
2152 * instead of adding a new hlock upon current, which is done by
2153 * add_chain_cache().
2154 *
2155 * This can be called in any context with two classes, while
2156 * add_chain_cache() must be done within the lock owener's context
2157 * since it uses hlock which might be racy in another context.
2158 */
2159static inline int add_chain_cache_classes(unsigned int prev,
2160 unsigned int next,
2161 unsigned int irq_context,
2162 u64 chain_key)
2163{
2164 struct hlist_head *hash_head = chainhashentry(chain_key);
2165 struct lock_chain *chain;
2166
2167 /*
2168 * Allocate a new chain entry from the static array, and add
2169 * it to the hash:
2170 */
2171
2172 /*
2173 * We might need to take the graph lock, ensure we've got IRQs
2174 * disabled to make this an IRQ-safe lock.. for recursion reasons
2175 * lockdep won't complain about its own locking errors.
2176 */
2177 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2178 return 0;
2179
2180 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2181 if (!debug_locks_off_graph_unlock())
2182 return 0;
2183
2184 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2185 dump_stack();
2186 return 0;
2187 }
2188
2189 chain = lock_chains + nr_lock_chains++;
2190 chain->chain_key = chain_key;
2191 chain->irq_context = irq_context;
2192 chain->depth = 2;
2193 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2194 chain->base = nr_chain_hlocks;
2195 nr_chain_hlocks += chain->depth;
2196 chain_hlocks[chain->base] = prev - 1;
2197 chain_hlocks[chain->base + 1] = next -1;
2198 }
2199#ifdef CONFIG_DEBUG_LOCKDEP
2200 /*
2201 * Important for check_no_collision().
2202 */
2203 else {
2204 if (!debug_locks_off_graph_unlock())
2205 return 0;
2206
2207 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2208 dump_stack();
2209 return 0;
2210 }
2211#endif
2212
2213 hlist_add_head_rcu(&chain->entry, hash_head);
2214 debug_atomic_inc(chain_lookup_misses);
2215 inc_chains();
2216
2217 return 1;
2218}
2219
2220/*
2221 * Adds a dependency chain into chain hashtable. And must be called with 2153 * Adds a dependency chain into chain hashtable. And must be called with
2222 * graph_lock held. 2154 * graph_lock held.
2223 * 2155 *
@@ -3262,6 +3194,10 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
3262/* 3194/*
3263 * This gets called for every mutex_lock*()/spin_lock*() operation. 3195 * This gets called for every mutex_lock*()/spin_lock*() operation.
3264 * We maintain the dependency maps and validate the locking attempt: 3196 * We maintain the dependency maps and validate the locking attempt:
3197 *
3198 * The callers must make sure that IRQs are disabled before calling it,
3199 * otherwise we could get an interrupt which would want to take locks,
3200 * which would end up in lockdep again.
3265 */ 3201 */
3266static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3202static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3267 int trylock, int read, int check, int hardirqs_off, 3203 int trylock, int read, int check, int hardirqs_off,
@@ -3279,14 +3215,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3279 if (unlikely(!debug_locks)) 3215 if (unlikely(!debug_locks))
3280 return 0; 3216 return 0;
3281 3217
3282 /*
3283 * Lockdep should run with IRQs disabled, otherwise we could
3284 * get an interrupt which would want to take locks, which would
3285 * end up in lockdep and have you got a head-ache already?
3286 */
3287 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3288 return 0;
3289
3290 if (!prove_locking || lock->key == &__lockdep_no_validate__) 3218 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3291 check = 0; 3219 check = 0;
3292 3220
@@ -3300,7 +3228,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3300 if (!class) 3228 if (!class)
3301 return 0; 3229 return 0;
3302 } 3230 }
3303 atomic_inc((atomic_t *)&class->ops); 3231
3232 debug_class_ops_inc(class);
3233
3304 if (very_verbose(class)) { 3234 if (very_verbose(class)) {
3305 printk("\nacquire class [%px] %s", class->key, class->name); 3235 printk("\nacquire class [%px] %s", class->key, class->name);
3306 if (class->name_version > 1) 3236 if (class->name_version > 1)
@@ -3543,6 +3473,9 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3543{ 3473{
3544 struct held_lock *hlock; 3474 struct held_lock *hlock;
3545 3475
3476 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3477 return 0;
3478
3546 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { 3479 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3547 if (!__lock_acquire(hlock->instance, 3480 if (!__lock_acquire(hlock->instance,
3548 hlock_class(hlock)->subclass, 3481 hlock_class(hlock)->subclass,
@@ -3696,6 +3629,13 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3696 curr->lockdep_depth = i; 3629 curr->lockdep_depth = i;
3697 curr->curr_chain_key = hlock->prev_chain_key; 3630 curr->curr_chain_key = hlock->prev_chain_key;
3698 3631
3632 /*
3633 * The most likely case is when the unlock is on the innermost
3634 * lock. In this case, we are done!
3635 */
3636 if (i == depth-1)
3637 return 1;
3638
3699 if (reacquire_held_locks(curr, depth, i + 1)) 3639 if (reacquire_held_locks(curr, depth, i + 1))
3700 return 0; 3640 return 0;
3701 3641
@@ -3703,10 +3643,14 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3703 * We had N bottles of beer on the wall, we drank one, but now 3643 * We had N bottles of beer on the wall, we drank one, but now
3704 * there's not N-1 bottles of beer left on the wall... 3644 * there's not N-1 bottles of beer left on the wall...
3705 */ 3645 */
3706 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3646 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
3707 return 0;
3708 3647
3709 return 1; 3648 /*
3649 * Since reacquire_held_locks() would have called check_chain_key()
3650 * indirectly via __lock_acquire(), we don't need to do it again
3651 * on return.
3652 */
3653 return 0;
3710} 3654}
3711 3655
3712static int __lock_is_held(const struct lockdep_map *lock, int read) 3656static int __lock_is_held(const struct lockdep_map *lock, int read)
@@ -4122,7 +4066,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
4122{ 4066{
4123 unsigned long flags; 4067 unsigned long flags;
4124 4068
4125 if (unlikely(!lock_stat)) 4069 if (unlikely(!lock_stat || !debug_locks))
4126 return; 4070 return;
4127 4071
4128 if (unlikely(current->lockdep_recursion)) 4072 if (unlikely(current->lockdep_recursion))
@@ -4142,7 +4086,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4142{ 4086{
4143 unsigned long flags; 4087 unsigned long flags;
4144 4088
4145 if (unlikely(!lock_stat)) 4089 if (unlikely(!lock_stat || !debug_locks))
4146 return; 4090 return;
4147 4091
4148 if (unlikely(current->lockdep_recursion)) 4092 if (unlikely(current->lockdep_recursion))