diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-11 19:45:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-11 19:45:46 -0400 |
commit | 9b4d0bab32e18e4f72781f9fa309a81495b2aff3 (patch) | |
tree | ce95c619d06d10bd0b2c0039f76a03deefaab2c2 | |
parent | 7019b1b50097a94d0f8a77b81bee0b19b108c634 (diff) | |
parent | 23a0ee908cbfba3264d19729c67c22b20fa73886 (diff) |
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
lockdep: fix debug_lock_alloc
lockdep: increase MAX_LOCKDEP_KEYS
generic-ipi: fix stack and rcu interaction bug in smp_call_function_mask()
lockdep: fix overflow in the hlock shrinkage code
lockdep: rename map_[acquire|release]() => lock_map_[acquire|release]()
lockdep: handle chains involving classes defined in modules
mm: fix mm_take_all_locks() locking order
lockdep: annotate mm_take_all_locks()
lockdep: spin_lock_nest_lock()
lockdep: lock protection locks
lockdep: map_acquire
lockdep: shrink held_lock structure
lockdep: re-annotate scheduler runqueues
lockdep: lock_set_subclass - reset a held lock's subclass
lockdep: change scheduler annotation
debug_locks: set oops_in_progress if we will log messages.
lockdep: fix combinatorial explosion in lock subgraph traversal
-rw-r--r-- | fs/jbd/transaction.c | 4 | ||||
-rw-r--r-- | fs/jbd2/transaction.c | 4 | ||||
-rw-r--r-- | include/linux/lockdep.h | 70 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 2 | ||||
-rw-r--r-- | include/linux/spinlock.h | 6 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 295 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 6 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 37 | ||||
-rw-r--r-- | kernel/sched.c | 21 | ||||
-rw-r--r-- | kernel/sched_rt.c | 8 | ||||
-rw-r--r-- | kernel/smp.c | 54 | ||||
-rw-r--r-- | kernel/spinlock.c | 11 | ||||
-rw-r--r-- | kernel/workqueue.c | 24 | ||||
-rw-r--r-- | lib/debug_locks.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 20 |
16 files changed, 413 insertions, 153 deletions
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 8dee32007500..0540ca27a446 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks) | |||
291 | goto out; | 291 | goto out; |
292 | } | 292 | } |
293 | 293 | ||
294 | lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 294 | lock_map_acquire(&handle->h_lockdep_map); |
295 | 295 | ||
296 | out: | 296 | out: |
297 | return handle; | 297 | return handle; |
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle) | |||
1448 | spin_unlock(&journal->j_state_lock); | 1448 | spin_unlock(&journal->j_state_lock); |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); | 1451 | lock_map_release(&handle->h_lockdep_map); |
1452 | 1452 | ||
1453 | jbd_free_handle(handle); | 1453 | jbd_free_handle(handle); |
1454 | return err; | 1454 | return err; |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 4f7cadbb19fa..e5d540588fa9 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) | |||
301 | goto out; | 301 | goto out; |
302 | } | 302 | } |
303 | 303 | ||
304 | lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 304 | lock_map_acquire(&handle->h_lockdep_map); |
305 | out: | 305 | out: |
306 | return handle; | 306 | return handle; |
307 | } | 307 | } |
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle) | |||
1279 | spin_unlock(&journal->j_state_lock); | 1279 | spin_unlock(&journal->j_state_lock); |
1280 | } | 1280 | } |
1281 | 1281 | ||
1282 | lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); | 1282 | lock_map_release(&handle->h_lockdep_map); |
1283 | 1283 | ||
1284 | jbd2_free_handle(handle); | 1284 | jbd2_free_handle(handle); |
1285 | return err; | 1285 | return err; |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf1..331e5f1c2d8e 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -89,6 +89,7 @@ struct lock_class { | |||
89 | 89 | ||
90 | struct lockdep_subclass_key *key; | 90 | struct lockdep_subclass_key *key; |
91 | unsigned int subclass; | 91 | unsigned int subclass; |
92 | unsigned int dep_gen_id; | ||
92 | 93 | ||
93 | /* | 94 | /* |
94 | * IRQ/softirq usage tracking bits: | 95 | * IRQ/softirq usage tracking bits: |
@@ -189,6 +190,14 @@ struct lock_chain { | |||
189 | u64 chain_key; | 190 | u64 chain_key; |
190 | }; | 191 | }; |
191 | 192 | ||
193 | #define MAX_LOCKDEP_KEYS_BITS 13 | ||
194 | /* | ||
195 | * Subtract one because we offset hlock->class_idx by 1 in order | ||
196 | * to make 0 mean no class. This avoids overflowing the class_idx | ||
197 | * bitfield and hitting the BUG in hlock_class(). | ||
198 | */ | ||
199 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) | ||
200 | |||
192 | struct held_lock { | 201 | struct held_lock { |
193 | /* | 202 | /* |
194 | * One-way hash of the dependency chain up to this point. We | 203 | * One-way hash of the dependency chain up to this point. We |
@@ -205,14 +214,14 @@ struct held_lock { | |||
205 | * with zero), here we store the previous hash value: | 214 | * with zero), here we store the previous hash value: |
206 | */ | 215 | */ |
207 | u64 prev_chain_key; | 216 | u64 prev_chain_key; |
208 | struct lock_class *class; | ||
209 | unsigned long acquire_ip; | 217 | unsigned long acquire_ip; |
210 | struct lockdep_map *instance; | 218 | struct lockdep_map *instance; |
211 | 219 | struct lockdep_map *nest_lock; | |
212 | #ifdef CONFIG_LOCK_STAT | 220 | #ifdef CONFIG_LOCK_STAT |
213 | u64 waittime_stamp; | 221 | u64 waittime_stamp; |
214 | u64 holdtime_stamp; | 222 | u64 holdtime_stamp; |
215 | #endif | 223 | #endif |
224 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; | ||
216 | /* | 225 | /* |
217 | * The lock-stack is unified in that the lock chains of interrupt | 226 | * The lock-stack is unified in that the lock chains of interrupt |
218 | * contexts nest ontop of process context chains, but we 'separate' | 227 | * contexts nest ontop of process context chains, but we 'separate' |
@@ -226,11 +235,11 @@ struct held_lock { | |||
226 | * The following field is used to detect when we cross into an | 235 | * The following field is used to detect when we cross into an |
227 | * interrupt context: | 236 | * interrupt context: |
228 | */ | 237 | */ |
229 | int irq_context; | 238 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
230 | int trylock; | 239 | unsigned int trylock:1; |
231 | int read; | 240 | unsigned int read:2; /* see lock_acquire() comment */ |
232 | int check; | 241 | unsigned int check:2; /* see lock_acquire() comment */ |
233 | int hardirqs_off; | 242 | unsigned int hardirqs_off:1; |
234 | }; | 243 | }; |
235 | 244 | ||
236 | /* | 245 | /* |
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
294 | * 2: full validation | 303 | * 2: full validation |
295 | */ | 304 | */ |
296 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 305 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
297 | int trylock, int read, int check, unsigned long ip); | 306 | int trylock, int read, int check, |
307 | struct lockdep_map *nest_lock, unsigned long ip); | ||
298 | 308 | ||
299 | extern void lock_release(struct lockdep_map *lock, int nested, | 309 | extern void lock_release(struct lockdep_map *lock, int nested, |
300 | unsigned long ip); | 310 | unsigned long ip); |
301 | 311 | ||
312 | extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, | ||
313 | unsigned long ip); | ||
314 | |||
302 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 315 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
303 | 316 | ||
304 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 317 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
@@ -313,8 +326,9 @@ static inline void lockdep_on(void) | |||
313 | { | 326 | { |
314 | } | 327 | } |
315 | 328 | ||
316 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) | 329 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
317 | # define lock_release(l, n, i) do { } while (0) | 330 | # define lock_release(l, n, i) do { } while (0) |
331 | # define lock_set_subclass(l, s, i) do { } while (0) | ||
318 | # define lockdep_init() do { } while (0) | 332 | # define lockdep_init() do { } while (0) |
319 | # define lockdep_info() do { } while (0) | 333 | # define lockdep_info() do { } while (0) |
320 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) | 334 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) |
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
400 | 414 | ||
401 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 415 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
402 | # ifdef CONFIG_PROVE_LOCKING | 416 | # ifdef CONFIG_PROVE_LOCKING |
403 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 417 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
418 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | ||
404 | # else | 419 | # else |
405 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 420 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
421 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) | ||
406 | # endif | 422 | # endif |
407 | # define spin_release(l, n, i) lock_release(l, n, i) | 423 | # define spin_release(l, n, i) lock_release(l, n, i) |
408 | #else | 424 | #else |
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
412 | 428 | ||
413 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 429 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
414 | # ifdef CONFIG_PROVE_LOCKING | 430 | # ifdef CONFIG_PROVE_LOCKING |
415 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 431 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
416 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) | 432 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
417 | # else | 433 | # else |
418 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 434 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
419 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) | 435 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
420 | # endif | 436 | # endif |
421 | # define rwlock_release(l, n, i) lock_release(l, n, i) | 437 | # define rwlock_release(l, n, i) lock_release(l, n, i) |
422 | #else | 438 | #else |
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
427 | 443 | ||
428 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 444 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
429 | # ifdef CONFIG_PROVE_LOCKING | 445 | # ifdef CONFIG_PROVE_LOCKING |
430 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 446 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
431 | # else | 447 | # else |
432 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 448 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
433 | # endif | 449 | # endif |
434 | # define mutex_release(l, n, i) lock_release(l, n, i) | 450 | # define mutex_release(l, n, i) lock_release(l, n, i) |
435 | #else | 451 | #else |
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
439 | 455 | ||
440 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 456 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
441 | # ifdef CONFIG_PROVE_LOCKING | 457 | # ifdef CONFIG_PROVE_LOCKING |
442 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 458 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
443 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) | 459 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
444 | # else | 460 | # else |
445 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 461 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
446 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) | 462 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
447 | # endif | 463 | # endif |
448 | # define rwsem_release(l, n, i) lock_release(l, n, i) | 464 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
449 | #else | 465 | #else |
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
452 | # define rwsem_release(l, n, i) do { } while (0) | 468 | # define rwsem_release(l, n, i) do { } while (0) |
453 | #endif | 469 | #endif |
454 | 470 | ||
471 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
472 | # ifdef CONFIG_PROVE_LOCKING | ||
473 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) | ||
474 | # else | ||
475 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) | ||
476 | # endif | ||
477 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) | ||
478 | #else | ||
479 | # define lock_map_acquire(l) do { } while (0) | ||
480 | # define lock_map_release(l) do { } while (0) | ||
481 | #endif | ||
482 | |||
455 | #endif /* __LINUX_LOCKDEP_H */ | 483 | #endif /* __LINUX_LOCKDEP_H */ |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcfe..4ab843622727 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); | |||
117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
118 | extern struct lockdep_map rcu_lock_map; | 118 | extern struct lockdep_map rcu_lock_map; |
119 | # define rcu_read_acquire() \ | 119 | # define rcu_read_acquire() \ |
120 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | 120 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
121 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 121 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
122 | #else | 122 | #else |
123 | # define rcu_read_acquire() do { } while (0) | 123 | # define rcu_read_acquire() do { } while (0) |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61e5610ad165..e0c0fccced46 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -183,8 +183,14 @@ do { \ | |||
183 | 183 | ||
184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
185 | # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) | 185 | # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) |
186 | # define spin_lock_nest_lock(lock, nest_lock) \ | ||
187 | do { \ | ||
188 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | ||
189 | _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | ||
190 | } while (0) | ||
186 | #else | 191 | #else |
187 | # define spin_lock_nested(lock, subclass) _spin_lock(lock) | 192 | # define spin_lock_nested(lock, subclass) _spin_lock(lock) |
193 | # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) | ||
188 | #endif | 194 | #endif |
189 | 195 | ||
190 | #define write_lock(lock) _write_lock(lock) | 196 | #define write_lock(lock) _write_lock(lock) |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce7296..d79845d034b5 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); | |||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); | 22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | 23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) |
24 | __acquires(lock); | 24 | __acquires(lock); |
25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) | ||
26 | __acquires(lock); | ||
25 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); | 27 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); |
26 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); | 28 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); |
27 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); | 29 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d38a64362973..1aa91fd6b06e 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | |||
124 | unsigned long nr_lock_classes; | 124 | unsigned long nr_lock_classes; |
125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
126 | 126 | ||
127 | static inline struct lock_class *hlock_class(struct held_lock *hlock) | ||
128 | { | ||
129 | if (!hlock->class_idx) { | ||
130 | DEBUG_LOCKS_WARN_ON(1); | ||
131 | return NULL; | ||
132 | } | ||
133 | return lock_classes + hlock->class_idx - 1; | ||
134 | } | ||
135 | |||
127 | #ifdef CONFIG_LOCK_STAT | 136 | #ifdef CONFIG_LOCK_STAT |
128 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 137 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
129 | 138 | ||
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock) | |||
222 | 231 | ||
223 | holdtime = sched_clock() - hlock->holdtime_stamp; | 232 | holdtime = sched_clock() - hlock->holdtime_stamp; |
224 | 233 | ||
225 | stats = get_lock_stats(hlock->class); | 234 | stats = get_lock_stats(hlock_class(hlock)); |
226 | if (hlock->read) | 235 | if (hlock->read) |
227 | lock_time_inc(&stats->read_holdtime, holdtime); | 236 | lock_time_inc(&stats->read_holdtime, holdtime); |
228 | else | 237 | else |
@@ -372,6 +381,19 @@ unsigned int nr_process_chains; | |||
372 | unsigned int max_lockdep_depth; | 381 | unsigned int max_lockdep_depth; |
373 | unsigned int max_recursion_depth; | 382 | unsigned int max_recursion_depth; |
374 | 383 | ||
384 | static unsigned int lockdep_dependency_gen_id; | ||
385 | |||
386 | static bool lockdep_dependency_visit(struct lock_class *source, | ||
387 | unsigned int depth) | ||
388 | { | ||
389 | if (!depth) | ||
390 | lockdep_dependency_gen_id++; | ||
391 | if (source->dep_gen_id == lockdep_dependency_gen_id) | ||
392 | return true; | ||
393 | source->dep_gen_id = lockdep_dependency_gen_id; | ||
394 | return false; | ||
395 | } | ||
396 | |||
375 | #ifdef CONFIG_DEBUG_LOCKDEP | 397 | #ifdef CONFIG_DEBUG_LOCKDEP |
376 | /* | 398 | /* |
377 | * We cannot printk in early bootup code. Not even early_printk() | 399 | * We cannot printk in early bootup code. Not even early_printk() |
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
505 | 527 | ||
506 | static void print_lock(struct held_lock *hlock) | 528 | static void print_lock(struct held_lock *hlock) |
507 | { | 529 | { |
508 | print_lock_name(hlock->class); | 530 | print_lock_name(hlock_class(hlock)); |
509 | printk(", at: "); | 531 | printk(", at: "); |
510 | print_ip_sym(hlock->acquire_ip); | 532 | print_ip_sym(hlock->acquire_ip); |
511 | } | 533 | } |
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
558 | { | 580 | { |
559 | struct lock_list *entry; | 581 | struct lock_list *entry; |
560 | 582 | ||
583 | if (lockdep_dependency_visit(class, depth)) | ||
584 | return; | ||
585 | |||
561 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | 586 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) |
562 | return; | 587 | return; |
563 | 588 | ||
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void) | |||
932 | if (debug_locks_silent) | 957 | if (debug_locks_silent) |
933 | return 0; | 958 | return 0; |
934 | 959 | ||
935 | this.class = check_source->class; | 960 | this.class = hlock_class(check_source); |
936 | if (!save_trace(&this.trace)) | 961 | if (!save_trace(&this.trace)) |
937 | return 0; | 962 | return 0; |
938 | 963 | ||
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void) | |||
959 | return 0; | 984 | return 0; |
960 | } | 985 | } |
961 | 986 | ||
987 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, | ||
988 | unsigned int depth) | ||
989 | { | ||
990 | struct lock_list *entry; | ||
991 | unsigned long ret = 1; | ||
992 | |||
993 | if (lockdep_dependency_visit(class, depth)) | ||
994 | return 0; | ||
995 | |||
996 | /* | ||
997 | * Recurse this class's dependency list: | ||
998 | */ | ||
999 | list_for_each_entry(entry, &class->locks_after, entry) | ||
1000 | ret += __lockdep_count_forward_deps(entry->class, depth + 1); | ||
1001 | |||
1002 | return ret; | ||
1003 | } | ||
1004 | |||
1005 | unsigned long lockdep_count_forward_deps(struct lock_class *class) | ||
1006 | { | ||
1007 | unsigned long ret, flags; | ||
1008 | |||
1009 | local_irq_save(flags); | ||
1010 | __raw_spin_lock(&lockdep_lock); | ||
1011 | ret = __lockdep_count_forward_deps(class, 0); | ||
1012 | __raw_spin_unlock(&lockdep_lock); | ||
1013 | local_irq_restore(flags); | ||
1014 | |||
1015 | return ret; | ||
1016 | } | ||
1017 | |||
1018 | unsigned long __lockdep_count_backward_deps(struct lock_class *class, | ||
1019 | unsigned int depth) | ||
1020 | { | ||
1021 | struct lock_list *entry; | ||
1022 | unsigned long ret = 1; | ||
1023 | |||
1024 | if (lockdep_dependency_visit(class, depth)) | ||
1025 | return 0; | ||
1026 | /* | ||
1027 | * Recurse this class's dependency list: | ||
1028 | */ | ||
1029 | list_for_each_entry(entry, &class->locks_before, entry) | ||
1030 | ret += __lockdep_count_backward_deps(entry->class, depth + 1); | ||
1031 | |||
1032 | return ret; | ||
1033 | } | ||
1034 | |||
1035 | unsigned long lockdep_count_backward_deps(struct lock_class *class) | ||
1036 | { | ||
1037 | unsigned long ret, flags; | ||
1038 | |||
1039 | local_irq_save(flags); | ||
1040 | __raw_spin_lock(&lockdep_lock); | ||
1041 | ret = __lockdep_count_backward_deps(class, 0); | ||
1042 | __raw_spin_unlock(&lockdep_lock); | ||
1043 | local_irq_restore(flags); | ||
1044 | |||
1045 | return ret; | ||
1046 | } | ||
1047 | |||
962 | /* | 1048 | /* |
963 | * Prove that the dependency graph starting at <entry> can not | 1049 | * Prove that the dependency graph starting at <entry> can not |
964 | * lead to <target>. Print an error and return 0 if it does. | 1050 | * lead to <target>. Print an error and return 0 if it does. |
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
968 | { | 1054 | { |
969 | struct lock_list *entry; | 1055 | struct lock_list *entry; |
970 | 1056 | ||
1057 | if (lockdep_dependency_visit(source, depth)) | ||
1058 | return 1; | ||
1059 | |||
971 | debug_atomic_inc(&nr_cyclic_check_recursions); | 1060 | debug_atomic_inc(&nr_cyclic_check_recursions); |
972 | if (depth > max_recursion_depth) | 1061 | if (depth > max_recursion_depth) |
973 | max_recursion_depth = depth; | 1062 | max_recursion_depth = depth; |
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
977 | * Check this lock's dependency list: | 1066 | * Check this lock's dependency list: |
978 | */ | 1067 | */ |
979 | list_for_each_entry(entry, &source->locks_after, entry) { | 1068 | list_for_each_entry(entry, &source->locks_after, entry) { |
980 | if (entry->class == check_target->class) | 1069 | if (entry->class == hlock_class(check_target)) |
981 | return print_circular_bug_header(entry, depth+1); | 1070 | return print_circular_bug_header(entry, depth+1); |
982 | debug_atomic_inc(&nr_cyclic_checks); | 1071 | debug_atomic_inc(&nr_cyclic_checks); |
983 | if (!check_noncircular(entry->class, depth+1)) | 1072 | if (!check_noncircular(entry->class, depth+1)) |
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
1011 | struct lock_list *entry; | 1100 | struct lock_list *entry; |
1012 | int ret; | 1101 | int ret; |
1013 | 1102 | ||
1103 | if (lockdep_dependency_visit(source, depth)) | ||
1104 | return 1; | ||
1105 | |||
1014 | if (depth > max_recursion_depth) | 1106 | if (depth > max_recursion_depth) |
1015 | max_recursion_depth = depth; | 1107 | max_recursion_depth = depth; |
1016 | if (depth >= RECURSION_LIMIT) | 1108 | if (depth >= RECURSION_LIMIT) |
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
1050 | struct lock_list *entry; | 1142 | struct lock_list *entry; |
1051 | int ret; | 1143 | int ret; |
1052 | 1144 | ||
1145 | if (lockdep_dependency_visit(source, depth)) | ||
1146 | return 1; | ||
1147 | |||
1053 | if (!__raw_spin_is_locked(&lockdep_lock)) | 1148 | if (!__raw_spin_is_locked(&lockdep_lock)) |
1054 | return DEBUG_LOCKS_WARN_ON(1); | 1149 | return DEBUG_LOCKS_WARN_ON(1); |
1055 | 1150 | ||
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
1064 | return 2; | 1159 | return 2; |
1065 | } | 1160 | } |
1066 | 1161 | ||
1162 | if (!source && debug_locks_off_graph_unlock()) { | ||
1163 | WARN_ON(1); | ||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1067 | /* | 1167 | /* |
1068 | * Check this lock's dependency list: | 1168 | * Check this lock's dependency list: |
1069 | */ | 1169 | */ |
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1103 | printk("\nand this task is already holding:\n"); | 1203 | printk("\nand this task is already holding:\n"); |
1104 | print_lock(prev); | 1204 | print_lock(prev); |
1105 | printk("which would create a new lock dependency:\n"); | 1205 | printk("which would create a new lock dependency:\n"); |
1106 | print_lock_name(prev->class); | 1206 | print_lock_name(hlock_class(prev)); |
1107 | printk(" ->"); | 1207 | printk(" ->"); |
1108 | print_lock_name(next->class); | 1208 | print_lock_name(hlock_class(next)); |
1109 | printk("\n"); | 1209 | printk("\n"); |
1110 | 1210 | ||
1111 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1211 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1146 | 1246 | ||
1147 | find_usage_bit = bit_backwards; | 1247 | find_usage_bit = bit_backwards; |
1148 | /* fills in <backwards_match> */ | 1248 | /* fills in <backwards_match> */ |
1149 | ret = find_usage_backwards(prev->class, 0); | 1249 | ret = find_usage_backwards(hlock_class(prev), 0); |
1150 | if (!ret || ret == 1) | 1250 | if (!ret || ret == 1) |
1151 | return ret; | 1251 | return ret; |
1152 | 1252 | ||
1153 | find_usage_bit = bit_forwards; | 1253 | find_usage_bit = bit_forwards; |
1154 | ret = find_usage_forwards(next->class, 0); | 1254 | ret = find_usage_forwards(hlock_class(next), 0); |
1155 | if (!ret || ret == 1) | 1255 | if (!ret || ret == 1) |
1156 | return ret; | 1256 | return ret; |
1157 | /* ret == 2 */ | 1257 | /* ret == 2 */ |
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
1272 | struct lockdep_map *next_instance, int read) | 1372 | struct lockdep_map *next_instance, int read) |
1273 | { | 1373 | { |
1274 | struct held_lock *prev; | 1374 | struct held_lock *prev; |
1375 | struct held_lock *nest = NULL; | ||
1275 | int i; | 1376 | int i; |
1276 | 1377 | ||
1277 | for (i = 0; i < curr->lockdep_depth; i++) { | 1378 | for (i = 0; i < curr->lockdep_depth; i++) { |
1278 | prev = curr->held_locks + i; | 1379 | prev = curr->held_locks + i; |
1279 | if (prev->class != next->class) | 1380 | |
1381 | if (prev->instance == next->nest_lock) | ||
1382 | nest = prev; | ||
1383 | |||
1384 | if (hlock_class(prev) != hlock_class(next)) | ||
1280 | continue; | 1385 | continue; |
1386 | |||
1281 | /* | 1387 | /* |
1282 | * Allow read-after-read recursion of the same | 1388 | * Allow read-after-read recursion of the same |
1283 | * lock class (i.e. read_lock(lock)+read_lock(lock)): | 1389 | * lock class (i.e. read_lock(lock)+read_lock(lock)): |
1284 | */ | 1390 | */ |
1285 | if ((read == 2) && prev->read) | 1391 | if ((read == 2) && prev->read) |
1286 | return 2; | 1392 | return 2; |
1393 | |||
1394 | /* | ||
1395 | * We're holding the nest_lock, which serializes this lock's | ||
1396 | * nesting behaviour. | ||
1397 | */ | ||
1398 | if (nest) | ||
1399 | return 2; | ||
1400 | |||
1287 | return print_deadlock_bug(curr, prev, next); | 1401 | return print_deadlock_bug(curr, prev, next); |
1288 | } | 1402 | } |
1289 | return 1; | 1403 | return 1; |
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1329 | */ | 1443 | */ |
1330 | check_source = next; | 1444 | check_source = next; |
1331 | check_target = prev; | 1445 | check_target = prev; |
1332 | if (!(check_noncircular(next->class, 0))) | 1446 | if (!(check_noncircular(hlock_class(next), 0))) |
1333 | return print_circular_bug_tail(); | 1447 | return print_circular_bug_tail(); |
1334 | 1448 | ||
1335 | if (!check_prev_add_irq(curr, prev, next)) | 1449 | if (!check_prev_add_irq(curr, prev, next)) |
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1353 | * chains - the second one will be new, but L1 already has | 1467 | * chains - the second one will be new, but L1 already has |
1354 | * L2 added to its dependency list, due to the first chain.) | 1468 | * L2 added to its dependency list, due to the first chain.) |
1355 | */ | 1469 | */ |
1356 | list_for_each_entry(entry, &prev->class->locks_after, entry) { | 1470 | list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { |
1357 | if (entry->class == next->class) { | 1471 | if (entry->class == hlock_class(next)) { |
1358 | if (distance == 1) | 1472 | if (distance == 1) |
1359 | entry->distance = 1; | 1473 | entry->distance = 1; |
1360 | return 2; | 1474 | return 2; |
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1365 | * Ok, all validations passed, add the new lock | 1479 | * Ok, all validations passed, add the new lock |
1366 | * to the previous lock's dependency list: | 1480 | * to the previous lock's dependency list: |
1367 | */ | 1481 | */ |
1368 | ret = add_lock_to_list(prev->class, next->class, | 1482 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
1369 | &prev->class->locks_after, next->acquire_ip, distance); | 1483 | &hlock_class(prev)->locks_after, |
1484 | next->acquire_ip, distance); | ||
1370 | 1485 | ||
1371 | if (!ret) | 1486 | if (!ret) |
1372 | return 0; | 1487 | return 0; |
1373 | 1488 | ||
1374 | ret = add_lock_to_list(next->class, prev->class, | 1489 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
1375 | &next->class->locks_before, next->acquire_ip, distance); | 1490 | &hlock_class(next)->locks_before, |
1491 | next->acquire_ip, distance); | ||
1376 | if (!ret) | 1492 | if (!ret) |
1377 | return 0; | 1493 | return 0; |
1378 | 1494 | ||
1379 | /* | 1495 | /* |
1380 | * Debugging printouts: | 1496 | * Debugging printouts: |
1381 | */ | 1497 | */ |
1382 | if (verbose(prev->class) || verbose(next->class)) { | 1498 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { |
1383 | graph_unlock(); | 1499 | graph_unlock(); |
1384 | printk("\n new dependency: "); | 1500 | printk("\n new dependency: "); |
1385 | print_lock_name(prev->class); | 1501 | print_lock_name(hlock_class(prev)); |
1386 | printk(" => "); | 1502 | printk(" => "); |
1387 | print_lock_name(next->class); | 1503 | print_lock_name(hlock_class(next)); |
1388 | printk("\n"); | 1504 | printk("\n"); |
1389 | dump_stack(); | 1505 | dump_stack(); |
1390 | return graph_lock(); | 1506 | return graph_lock(); |
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
1481 | struct held_lock *hlock, | 1597 | struct held_lock *hlock, |
1482 | u64 chain_key) | 1598 | u64 chain_key) |
1483 | { | 1599 | { |
1484 | struct lock_class *class = hlock->class; | 1600 | struct lock_class *class = hlock_class(hlock); |
1485 | struct list_head *hash_head = chainhashentry(chain_key); | 1601 | struct list_head *hash_head = chainhashentry(chain_key); |
1486 | struct lock_chain *chain; | 1602 | struct lock_chain *chain; |
1487 | struct held_lock *hlock_curr, *hlock_next; | 1603 | struct held_lock *hlock_curr, *hlock_next; |
@@ -1554,7 +1670,7 @@ cache_hit: | |||
1554 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | 1670 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
1555 | chain->base = cn; | 1671 | chain->base = cn; |
1556 | for (j = 0; j < chain->depth - 1; j++, i++) { | 1672 | for (j = 0; j < chain->depth - 1; j++, i++) { |
1557 | int lock_id = curr->held_locks[i].class - lock_classes; | 1673 | int lock_id = curr->held_locks[i].class_idx - 1; |
1558 | chain_hlocks[chain->base + j] = lock_id; | 1674 | chain_hlocks[chain->base + j] = lock_id; |
1559 | } | 1675 | } |
1560 | chain_hlocks[chain->base + j] = class - lock_classes; | 1676 | chain_hlocks[chain->base + j] = class - lock_classes; |
@@ -1650,7 +1766,7 @@ static void check_chain_key(struct task_struct *curr) | |||
1650 | WARN_ON(1); | 1766 | WARN_ON(1); |
1651 | return; | 1767 | return; |
1652 | } | 1768 | } |
1653 | id = hlock->class - lock_classes; | 1769 | id = hlock->class_idx - 1; |
1654 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 1770 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
1655 | return; | 1771 | return; |
1656 | 1772 | ||
@@ -1695,7 +1811,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
1695 | print_lock(this); | 1811 | print_lock(this); |
1696 | 1812 | ||
1697 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 1813 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); |
1698 | print_stack_trace(this->class->usage_traces + prev_bit, 1); | 1814 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
1699 | 1815 | ||
1700 | print_irqtrace_events(curr); | 1816 | print_irqtrace_events(curr); |
1701 | printk("\nother info that might help us debug this:\n"); | 1817 | printk("\nother info that might help us debug this:\n"); |
@@ -1714,7 +1830,7 @@ static inline int | |||
1714 | valid_state(struct task_struct *curr, struct held_lock *this, | 1830 | valid_state(struct task_struct *curr, struct held_lock *this, |
1715 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | 1831 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) |
1716 | { | 1832 | { |
1717 | if (unlikely(this->class->usage_mask & (1 << bad_bit))) | 1833 | if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) |
1718 | return print_usage_bug(curr, this, bad_bit, new_bit); | 1834 | return print_usage_bug(curr, this, bad_bit, new_bit); |
1719 | return 1; | 1835 | return 1; |
1720 | } | 1836 | } |
@@ -1753,7 +1869,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1753 | lockdep_print_held_locks(curr); | 1869 | lockdep_print_held_locks(curr); |
1754 | 1870 | ||
1755 | printk("\nthe first lock's dependencies:\n"); | 1871 | printk("\nthe first lock's dependencies:\n"); |
1756 | print_lock_dependencies(this->class, 0); | 1872 | print_lock_dependencies(hlock_class(this), 0); |
1757 | 1873 | ||
1758 | printk("\nthe second lock's dependencies:\n"); | 1874 | printk("\nthe second lock's dependencies:\n"); |
1759 | print_lock_dependencies(other, 0); | 1875 | print_lock_dependencies(other, 0); |
@@ -1776,7 +1892,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
1776 | 1892 | ||
1777 | find_usage_bit = bit; | 1893 | find_usage_bit = bit; |
1778 | /* fills in <forwards_match> */ | 1894 | /* fills in <forwards_match> */ |
1779 | ret = find_usage_forwards(this->class, 0); | 1895 | ret = find_usage_forwards(hlock_class(this), 0); |
1780 | if (!ret || ret == 1) | 1896 | if (!ret || ret == 1) |
1781 | return ret; | 1897 | return ret; |
1782 | 1898 | ||
@@ -1795,7 +1911,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
1795 | 1911 | ||
1796 | find_usage_bit = bit; | 1912 | find_usage_bit = bit; |
1797 | /* fills in <backwards_match> */ | 1913 | /* fills in <backwards_match> */ |
1798 | ret = find_usage_backwards(this->class, 0); | 1914 | ret = find_usage_backwards(hlock_class(this), 0); |
1799 | if (!ret || ret == 1) | 1915 | if (!ret || ret == 1) |
1800 | return ret; | 1916 | return ret; |
1801 | 1917 | ||
@@ -1861,7 +1977,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1861 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 1977 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) |
1862 | return 0; | 1978 | return 0; |
1863 | #endif | 1979 | #endif |
1864 | if (hardirq_verbose(this->class)) | 1980 | if (hardirq_verbose(hlock_class(this))) |
1865 | ret = 2; | 1981 | ret = 2; |
1866 | break; | 1982 | break; |
1867 | case LOCK_USED_IN_SOFTIRQ: | 1983 | case LOCK_USED_IN_SOFTIRQ: |
@@ -1886,7 +2002,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1886 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | 2002 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) |
1887 | return 0; | 2003 | return 0; |
1888 | #endif | 2004 | #endif |
1889 | if (softirq_verbose(this->class)) | 2005 | if (softirq_verbose(hlock_class(this))) |
1890 | ret = 2; | 2006 | ret = 2; |
1891 | break; | 2007 | break; |
1892 | case LOCK_USED_IN_HARDIRQ_READ: | 2008 | case LOCK_USED_IN_HARDIRQ_READ: |
@@ -1899,7 +2015,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1899 | if (!check_usage_forwards(curr, this, | 2015 | if (!check_usage_forwards(curr, this, |
1900 | LOCK_ENABLED_HARDIRQS, "hard")) | 2016 | LOCK_ENABLED_HARDIRQS, "hard")) |
1901 | return 0; | 2017 | return 0; |
1902 | if (hardirq_verbose(this->class)) | 2018 | if (hardirq_verbose(hlock_class(this))) |
1903 | ret = 2; | 2019 | ret = 2; |
1904 | break; | 2020 | break; |
1905 | case LOCK_USED_IN_SOFTIRQ_READ: | 2021 | case LOCK_USED_IN_SOFTIRQ_READ: |
@@ -1912,7 +2028,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1912 | if (!check_usage_forwards(curr, this, | 2028 | if (!check_usage_forwards(curr, this, |
1913 | LOCK_ENABLED_SOFTIRQS, "soft")) | 2029 | LOCK_ENABLED_SOFTIRQS, "soft")) |
1914 | return 0; | 2030 | return 0; |
1915 | if (softirq_verbose(this->class)) | 2031 | if (softirq_verbose(hlock_class(this))) |
1916 | ret = 2; | 2032 | ret = 2; |
1917 | break; | 2033 | break; |
1918 | case LOCK_ENABLED_HARDIRQS: | 2034 | case LOCK_ENABLED_HARDIRQS: |
@@ -1938,7 +2054,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1938 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | 2054 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) |
1939 | return 0; | 2055 | return 0; |
1940 | #endif | 2056 | #endif |
1941 | if (hardirq_verbose(this->class)) | 2057 | if (hardirq_verbose(hlock_class(this))) |
1942 | ret = 2; | 2058 | ret = 2; |
1943 | break; | 2059 | break; |
1944 | case LOCK_ENABLED_SOFTIRQS: | 2060 | case LOCK_ENABLED_SOFTIRQS: |
@@ -1964,7 +2080,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1964 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | 2080 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) |
1965 | return 0; | 2081 | return 0; |
1966 | #endif | 2082 | #endif |
1967 | if (softirq_verbose(this->class)) | 2083 | if (softirq_verbose(hlock_class(this))) |
1968 | ret = 2; | 2084 | ret = 2; |
1969 | break; | 2085 | break; |
1970 | case LOCK_ENABLED_HARDIRQS_READ: | 2086 | case LOCK_ENABLED_HARDIRQS_READ: |
@@ -1979,7 +2095,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1979 | LOCK_USED_IN_HARDIRQ, "hard")) | 2095 | LOCK_USED_IN_HARDIRQ, "hard")) |
1980 | return 0; | 2096 | return 0; |
1981 | #endif | 2097 | #endif |
1982 | if (hardirq_verbose(this->class)) | 2098 | if (hardirq_verbose(hlock_class(this))) |
1983 | ret = 2; | 2099 | ret = 2; |
1984 | break; | 2100 | break; |
1985 | case LOCK_ENABLED_SOFTIRQS_READ: | 2101 | case LOCK_ENABLED_SOFTIRQS_READ: |
@@ -1994,7 +2110,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1994 | LOCK_USED_IN_SOFTIRQ, "soft")) | 2110 | LOCK_USED_IN_SOFTIRQ, "soft")) |
1995 | return 0; | 2111 | return 0; |
1996 | #endif | 2112 | #endif |
1997 | if (softirq_verbose(this->class)) | 2113 | if (softirq_verbose(hlock_class(this))) |
1998 | ret = 2; | 2114 | ret = 2; |
1999 | break; | 2115 | break; |
2000 | default: | 2116 | default: |
@@ -2310,7 +2426,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2310 | * If already set then do not dirty the cacheline, | 2426 | * If already set then do not dirty the cacheline, |
2311 | * nor do any checks: | 2427 | * nor do any checks: |
2312 | */ | 2428 | */ |
2313 | if (likely(this->class->usage_mask & new_mask)) | 2429 | if (likely(hlock_class(this)->usage_mask & new_mask)) |
2314 | return 1; | 2430 | return 1; |
2315 | 2431 | ||
2316 | if (!graph_lock()) | 2432 | if (!graph_lock()) |
@@ -2318,14 +2434,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2318 | /* | 2434 | /* |
2319 | * Make sure we didnt race: | 2435 | * Make sure we didnt race: |
2320 | */ | 2436 | */ |
2321 | if (unlikely(this->class->usage_mask & new_mask)) { | 2437 | if (unlikely(hlock_class(this)->usage_mask & new_mask)) { |
2322 | graph_unlock(); | 2438 | graph_unlock(); |
2323 | return 1; | 2439 | return 1; |
2324 | } | 2440 | } |
2325 | 2441 | ||
2326 | this->class->usage_mask |= new_mask; | 2442 | hlock_class(this)->usage_mask |= new_mask; |
2327 | 2443 | ||
2328 | if (!save_trace(this->class->usage_traces + new_bit)) | 2444 | if (!save_trace(hlock_class(this)->usage_traces + new_bit)) |
2329 | return 0; | 2445 | return 0; |
2330 | 2446 | ||
2331 | switch (new_bit) { | 2447 | switch (new_bit) { |
@@ -2405,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); | |||
2405 | */ | 2521 | */ |
2406 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2522 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2407 | int trylock, int read, int check, int hardirqs_off, | 2523 | int trylock, int read, int check, int hardirqs_off, |
2408 | unsigned long ip) | 2524 | struct lockdep_map *nest_lock, unsigned long ip) |
2409 | { | 2525 | { |
2410 | struct task_struct *curr = current; | 2526 | struct task_struct *curr = current; |
2411 | struct lock_class *class = NULL; | 2527 | struct lock_class *class = NULL; |
@@ -2459,10 +2575,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2459 | return 0; | 2575 | return 0; |
2460 | 2576 | ||
2461 | hlock = curr->held_locks + depth; | 2577 | hlock = curr->held_locks + depth; |
2462 | 2578 | if (DEBUG_LOCKS_WARN_ON(!class)) | |
2463 | hlock->class = class; | 2579 | return 0; |
2580 | hlock->class_idx = class - lock_classes + 1; | ||
2464 | hlock->acquire_ip = ip; | 2581 | hlock->acquire_ip = ip; |
2465 | hlock->instance = lock; | 2582 | hlock->instance = lock; |
2583 | hlock->nest_lock = nest_lock; | ||
2466 | hlock->trylock = trylock; | 2584 | hlock->trylock = trylock; |
2467 | hlock->read = read; | 2585 | hlock->read = read; |
2468 | hlock->check = check; | 2586 | hlock->check = check; |
@@ -2574,6 +2692,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
2574 | return 1; | 2692 | return 1; |
2575 | } | 2693 | } |
2576 | 2694 | ||
2695 | static int | ||
2696 | __lock_set_subclass(struct lockdep_map *lock, | ||
2697 | unsigned int subclass, unsigned long ip) | ||
2698 | { | ||
2699 | struct task_struct *curr = current; | ||
2700 | struct held_lock *hlock, *prev_hlock; | ||
2701 | struct lock_class *class; | ||
2702 | unsigned int depth; | ||
2703 | int i; | ||
2704 | |||
2705 | depth = curr->lockdep_depth; | ||
2706 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2707 | return 0; | ||
2708 | |||
2709 | prev_hlock = NULL; | ||
2710 | for (i = depth-1; i >= 0; i--) { | ||
2711 | hlock = curr->held_locks + i; | ||
2712 | /* | ||
2713 | * We must not cross into another context: | ||
2714 | */ | ||
2715 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2716 | break; | ||
2717 | if (hlock->instance == lock) | ||
2718 | goto found_it; | ||
2719 | prev_hlock = hlock; | ||
2720 | } | ||
2721 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
2722 | |||
2723 | found_it: | ||
2724 | class = register_lock_class(lock, subclass, 0); | ||
2725 | hlock->class_idx = class - lock_classes + 1; | ||
2726 | |||
2727 | curr->lockdep_depth = i; | ||
2728 | curr->curr_chain_key = hlock->prev_chain_key; | ||
2729 | |||
2730 | for (; i < depth; i++) { | ||
2731 | hlock = curr->held_locks + i; | ||
2732 | if (!__lock_acquire(hlock->instance, | ||
2733 | hlock_class(hlock)->subclass, hlock->trylock, | ||
2734 | hlock->read, hlock->check, hlock->hardirqs_off, | ||
2735 | hlock->nest_lock, hlock->acquire_ip)) | ||
2736 | return 0; | ||
2737 | } | ||
2738 | |||
2739 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) | ||
2740 | return 0; | ||
2741 | return 1; | ||
2742 | } | ||
2743 | |||
2577 | /* | 2744 | /* |
2578 | * Remove the lock to the list of currently held locks in a | 2745 | * Remove the lock to the list of currently held locks in a |
2579 | * potentially non-nested (out of order) manner. This is a | 2746 | * potentially non-nested (out of order) manner. This is a |
@@ -2624,9 +2791,9 @@ found_it: | |||
2624 | for (i++; i < depth; i++) { | 2791 | for (i++; i < depth; i++) { |
2625 | hlock = curr->held_locks + i; | 2792 | hlock = curr->held_locks + i; |
2626 | if (!__lock_acquire(hlock->instance, | 2793 | if (!__lock_acquire(hlock->instance, |
2627 | hlock->class->subclass, hlock->trylock, | 2794 | hlock_class(hlock)->subclass, hlock->trylock, |
2628 | hlock->read, hlock->check, hlock->hardirqs_off, | 2795 | hlock->read, hlock->check, hlock->hardirqs_off, |
2629 | hlock->acquire_ip)) | 2796 | hlock->nest_lock, hlock->acquire_ip)) |
2630 | return 0; | 2797 | return 0; |
2631 | } | 2798 | } |
2632 | 2799 | ||
@@ -2669,7 +2836,7 @@ static int lock_release_nested(struct task_struct *curr, | |||
2669 | 2836 | ||
2670 | #ifdef CONFIG_DEBUG_LOCKDEP | 2837 | #ifdef CONFIG_DEBUG_LOCKDEP |
2671 | hlock->prev_chain_key = 0; | 2838 | hlock->prev_chain_key = 0; |
2672 | hlock->class = NULL; | 2839 | hlock->class_idx = 0; |
2673 | hlock->acquire_ip = 0; | 2840 | hlock->acquire_ip = 0; |
2674 | hlock->irq_context = 0; | 2841 | hlock->irq_context = 0; |
2675 | #endif | 2842 | #endif |
@@ -2738,18 +2905,36 @@ static void check_flags(unsigned long flags) | |||
2738 | #endif | 2905 | #endif |
2739 | } | 2906 | } |
2740 | 2907 | ||
2908 | void | ||
2909 | lock_set_subclass(struct lockdep_map *lock, | ||
2910 | unsigned int subclass, unsigned long ip) | ||
2911 | { | ||
2912 | unsigned long flags; | ||
2913 | |||
2914 | if (unlikely(current->lockdep_recursion)) | ||
2915 | return; | ||
2916 | |||
2917 | raw_local_irq_save(flags); | ||
2918 | current->lockdep_recursion = 1; | ||
2919 | check_flags(flags); | ||
2920 | if (__lock_set_subclass(lock, subclass, ip)) | ||
2921 | check_chain_key(current); | ||
2922 | current->lockdep_recursion = 0; | ||
2923 | raw_local_irq_restore(flags); | ||
2924 | } | ||
2925 | |||
2926 | EXPORT_SYMBOL_GPL(lock_set_subclass); | ||
2927 | |||
2741 | /* | 2928 | /* |
2742 | * We are not always called with irqs disabled - do that here, | 2929 | * We are not always called with irqs disabled - do that here, |
2743 | * and also avoid lockdep recursion: | 2930 | * and also avoid lockdep recursion: |
2744 | */ | 2931 | */ |
2745 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2932 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2746 | int trylock, int read, int check, unsigned long ip) | 2933 | int trylock, int read, int check, |
2934 | struct lockdep_map *nest_lock, unsigned long ip) | ||
2747 | { | 2935 | { |
2748 | unsigned long flags; | 2936 | unsigned long flags; |
2749 | 2937 | ||
2750 | if (unlikely(!lock_stat && !prove_locking)) | ||
2751 | return; | ||
2752 | |||
2753 | if (unlikely(current->lockdep_recursion)) | 2938 | if (unlikely(current->lockdep_recursion)) |
2754 | return; | 2939 | return; |
2755 | 2940 | ||
@@ -2758,7 +2943,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2758 | 2943 | ||
2759 | current->lockdep_recursion = 1; | 2944 | current->lockdep_recursion = 1; |
2760 | __lock_acquire(lock, subclass, trylock, read, check, | 2945 | __lock_acquire(lock, subclass, trylock, read, check, |
2761 | irqs_disabled_flags(flags), ip); | 2946 | irqs_disabled_flags(flags), nest_lock, ip); |
2762 | current->lockdep_recursion = 0; | 2947 | current->lockdep_recursion = 0; |
2763 | raw_local_irq_restore(flags); | 2948 | raw_local_irq_restore(flags); |
2764 | } | 2949 | } |
@@ -2770,9 +2955,6 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2770 | { | 2955 | { |
2771 | unsigned long flags; | 2956 | unsigned long flags; |
2772 | 2957 | ||
2773 | if (unlikely(!lock_stat && !prove_locking)) | ||
2774 | return; | ||
2775 | |||
2776 | if (unlikely(current->lockdep_recursion)) | 2958 | if (unlikely(current->lockdep_recursion)) |
2777 | return; | 2959 | return; |
2778 | 2960 | ||
@@ -2845,9 +3027,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
2845 | found_it: | 3027 | found_it: |
2846 | hlock->waittime_stamp = sched_clock(); | 3028 | hlock->waittime_stamp = sched_clock(); |
2847 | 3029 | ||
2848 | point = lock_contention_point(hlock->class, ip); | 3030 | point = lock_contention_point(hlock_class(hlock), ip); |
2849 | 3031 | ||
2850 | stats = get_lock_stats(hlock->class); | 3032 | stats = get_lock_stats(hlock_class(hlock)); |
2851 | if (point < ARRAY_SIZE(stats->contention_point)) | 3033 | if (point < ARRAY_SIZE(stats->contention_point)) |
2852 | stats->contention_point[i]++; | 3034 | stats->contention_point[i]++; |
2853 | if (lock->cpu != smp_processor_id()) | 3035 | if (lock->cpu != smp_processor_id()) |
@@ -2893,7 +3075,7 @@ found_it: | |||
2893 | hlock->holdtime_stamp = now; | 3075 | hlock->holdtime_stamp = now; |
2894 | } | 3076 | } |
2895 | 3077 | ||
2896 | stats = get_lock_stats(hlock->class); | 3078 | stats = get_lock_stats(hlock_class(hlock)); |
2897 | if (waittime) { | 3079 | if (waittime) { |
2898 | if (hlock->read) | 3080 | if (hlock->read) |
2899 | lock_time_inc(&stats->read_waittime, waittime); | 3081 | lock_time_inc(&stats->read_waittime, waittime); |
@@ -2988,6 +3170,7 @@ static void zap_class(struct lock_class *class) | |||
2988 | list_del_rcu(&class->hash_entry); | 3170 | list_del_rcu(&class->hash_entry); |
2989 | list_del_rcu(&class->lock_entry); | 3171 | list_del_rcu(&class->lock_entry); |
2990 | 3172 | ||
3173 | class->key = NULL; | ||
2991 | } | 3174 | } |
2992 | 3175 | ||
2993 | static inline int within(const void *addr, void *start, unsigned long size) | 3176 | static inline int within(const void *addr, void *start, unsigned long size) |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index c3600a091a28..55db193d366d 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -17,9 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | #define MAX_LOCKDEP_ENTRIES 8192UL | 18 | #define MAX_LOCKDEP_ENTRIES 8192UL |
19 | 19 | ||
20 | #define MAX_LOCKDEP_KEYS_BITS 11 | ||
21 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) | ||
22 | |||
23 | #define MAX_LOCKDEP_CHAINS_BITS 14 | 20 | #define MAX_LOCKDEP_CHAINS_BITS 14 |
24 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | 21 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
25 | 22 | ||
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains; | |||
53 | extern unsigned int max_lockdep_depth; | 50 | extern unsigned int max_lockdep_depth; |
54 | extern unsigned int max_recursion_depth; | 51 | extern unsigned int max_recursion_depth; |
55 | 52 | ||
53 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | ||
54 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | ||
55 | |||
56 | #ifdef CONFIG_DEBUG_LOCKDEP | 56 | #ifdef CONFIG_DEBUG_LOCKDEP |
57 | /* | 57 | /* |
58 | * Various lockdep statistics: | 58 | * Various lockdep statistics: |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 9b0e940e2545..fa19aee604c2 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v) | |||
63 | { | 63 | { |
64 | } | 64 | } |
65 | 65 | ||
66 | static unsigned long count_forward_deps(struct lock_class *class) | ||
67 | { | ||
68 | struct lock_list *entry; | ||
69 | unsigned long ret = 1; | ||
70 | |||
71 | /* | ||
72 | * Recurse this class's dependency list: | ||
73 | */ | ||
74 | list_for_each_entry(entry, &class->locks_after, entry) | ||
75 | ret += count_forward_deps(entry->class); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static unsigned long count_backward_deps(struct lock_class *class) | ||
81 | { | ||
82 | struct lock_list *entry; | ||
83 | unsigned long ret = 1; | ||
84 | |||
85 | /* | ||
86 | * Recurse this class's dependency list: | ||
87 | */ | ||
88 | list_for_each_entry(entry, &class->locks_before, entry) | ||
89 | ret += count_backward_deps(entry->class); | ||
90 | |||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | static void print_name(struct seq_file *m, struct lock_class *class) | 66 | static void print_name(struct seq_file *m, struct lock_class *class) |
95 | { | 67 | { |
96 | char str[128]; | 68 | char str[128]; |
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v) | |||
124 | #ifdef CONFIG_DEBUG_LOCKDEP | 96 | #ifdef CONFIG_DEBUG_LOCKDEP |
125 | seq_printf(m, " OPS:%8ld", class->ops); | 97 | seq_printf(m, " OPS:%8ld", class->ops); |
126 | #endif | 98 | #endif |
127 | nr_forward_deps = count_forward_deps(class); | 99 | nr_forward_deps = lockdep_count_forward_deps(class); |
128 | seq_printf(m, " FD:%5ld", nr_forward_deps); | 100 | seq_printf(m, " FD:%5ld", nr_forward_deps); |
129 | 101 | ||
130 | nr_backward_deps = count_backward_deps(class); | 102 | nr_backward_deps = lockdep_count_backward_deps(class); |
131 | seq_printf(m, " BD:%5ld", nr_backward_deps); | 103 | seq_printf(m, " BD:%5ld", nr_backward_deps); |
132 | 104 | ||
133 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 105 | get_usage_chars(class, &c1, &c2, &c3, &c4); |
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v) | |||
229 | 201 | ||
230 | for (i = 0; i < chain->depth; i++) { | 202 | for (i = 0; i < chain->depth; i++) { |
231 | class = lock_chain_get_class(chain, i); | 203 | class = lock_chain_get_class(chain, i); |
204 | if (!class->key) | ||
205 | continue; | ||
206 | |||
232 | seq_printf(m, "[%p] ", class->key); | 207 | seq_printf(m, "[%p] ", class->key); |
233 | print_name(m, class); | 208 | print_name(m, class); |
234 | seq_puts(m, "\n"); | 209 | seq_puts(m, "\n"); |
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
350 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 325 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) |
351 | nr_hardirq_read_unsafe++; | 326 | nr_hardirq_read_unsafe++; |
352 | 327 | ||
353 | sum_forward_deps += count_forward_deps(class); | 328 | sum_forward_deps += lockdep_count_forward_deps(class); |
354 | } | 329 | } |
355 | #ifdef CONFIG_DEBUG_LOCKDEP | 330 | #ifdef CONFIG_DEBUG_LOCKDEP |
356 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); | 331 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); |
diff --git a/kernel/sched.c b/kernel/sched.c index 04160d277e7a..ace566bdfc68 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -600,7 +600,6 @@ struct rq { | |||
600 | /* BKL stats */ | 600 | /* BKL stats */ |
601 | unsigned int bkl_count; | 601 | unsigned int bkl_count; |
602 | #endif | 602 | #endif |
603 | struct lock_class_key rq_lock_key; | ||
604 | }; | 603 | }; |
605 | 604 | ||
606 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
2759 | } else { | 2758 | } else { |
2760 | if (rq1 < rq2) { | 2759 | if (rq1 < rq2) { |
2761 | spin_lock(&rq1->lock); | 2760 | spin_lock(&rq1->lock); |
2762 | spin_lock(&rq2->lock); | 2761 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
2763 | } else { | 2762 | } else { |
2764 | spin_lock(&rq2->lock); | 2763 | spin_lock(&rq2->lock); |
2765 | spin_lock(&rq1->lock); | 2764 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
2766 | } | 2765 | } |
2767 | } | 2766 | } |
2768 | update_rq_clock(rq1); | 2767 | update_rq_clock(rq1); |
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2805 | if (busiest < this_rq) { | 2804 | if (busiest < this_rq) { |
2806 | spin_unlock(&this_rq->lock); | 2805 | spin_unlock(&this_rq->lock); |
2807 | spin_lock(&busiest->lock); | 2806 | spin_lock(&busiest->lock); |
2808 | spin_lock(&this_rq->lock); | 2807 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); |
2809 | ret = 1; | 2808 | ret = 1; |
2810 | } else | 2809 | } else |
2811 | spin_lock(&busiest->lock); | 2810 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); |
2812 | } | 2811 | } |
2813 | return ret; | 2812 | return ret; |
2814 | } | 2813 | } |
2815 | 2814 | ||
2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2816 | __releases(busiest->lock) | ||
2817 | { | ||
2818 | spin_unlock(&busiest->lock); | ||
2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2820 | } | ||
2821 | |||
2816 | /* | 2822 | /* |
2817 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
2818 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
@@ -3637,7 +3643,7 @@ redo: | |||
3637 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
3638 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
3639 | &all_pinned); | 3645 | &all_pinned); |
3640 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
3641 | 3647 | ||
3642 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
3643 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3752 | else | 3758 | else |
3753 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
3754 | } | 3760 | } |
3755 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
3756 | } | 3762 | } |
3757 | 3763 | ||
3758 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |
@@ -8000,7 +8006,6 @@ void __init sched_init(void) | |||
8000 | 8006 | ||
8001 | rq = cpu_rq(i); | 8007 | rq = cpu_rq(i); |
8002 | spin_lock_init(&rq->lock); | 8008 | spin_lock_init(&rq->lock); |
8003 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | ||
8004 | rq->nr_running = 0; | 8009 | rq->nr_running = 0; |
8005 | init_cfs_rq(&rq->cfs, rq); | 8010 | init_cfs_rq(&rq->cfs, rq); |
8006 | init_rt_rq(&rq->rt, rq); | 8011 | init_rt_rq(&rq->rt, rq); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 908c04f9dad0..6163e4cf885b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
861 | #define RT_MAX_TRIES 3 | 861 | #define RT_MAX_TRIES 3 |
862 | 862 | ||
863 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | 863 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
864 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
865 | |||
864 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 866 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
865 | 867 | ||
866 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 868 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1022 | break; | 1024 | break; |
1023 | 1025 | ||
1024 | /* try again */ | 1026 | /* try again */ |
1025 | spin_unlock(&lowest_rq->lock); | 1027 | double_unlock_balance(rq, lowest_rq); |
1026 | lowest_rq = NULL; | 1028 | lowest_rq = NULL; |
1027 | } | 1029 | } |
1028 | 1030 | ||
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq) | |||
1091 | 1093 | ||
1092 | resched_task(lowest_rq->curr); | 1094 | resched_task(lowest_rq->curr); |
1093 | 1095 | ||
1094 | spin_unlock(&lowest_rq->lock); | 1096 | double_unlock_balance(rq, lowest_rq); |
1095 | 1097 | ||
1096 | ret = 1; | 1098 | ret = 1; |
1097 | out: | 1099 | out: |
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1197 | 1199 | ||
1198 | } | 1200 | } |
1199 | skip: | 1201 | skip: |
1200 | spin_unlock(&src_rq->lock); | 1202 | double_unlock_balance(this_rq, src_rq); |
1201 | } | 1203 | } |
1202 | 1204 | ||
1203 | return ret; | 1205 | return ret; |
diff --git a/kernel/smp.c b/kernel/smp.c index 96fc7c0edc59..e6084f6efb4d 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -260,6 +260,41 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
260 | generic_exec_single(cpu, data); | 260 | generic_exec_single(cpu, data); |
261 | } | 261 | } |
262 | 262 | ||
263 | /* Dummy function */ | ||
264 | static void quiesce_dummy(void *unused) | ||
265 | { | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * Ensure stack based data used in call function mask is safe to free. | ||
270 | * | ||
271 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
272 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
273 | * the data item on the queue at any time before it is deleted. So we need to | ||
274 | * ensure that all CPUs have transitioned through a quiescent state after | ||
275 | * this call. | ||
276 | * | ||
277 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
278 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
279 | * stack based data even in the case of synchronous calls. The stack based | ||
280 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
281 | * | ||
282 | * If a faster scheme can be made, we could go back to preferring stack based | ||
283 | * data -- the data allocation/free is non-zero cost. | ||
284 | */ | ||
285 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
286 | { | ||
287 | struct call_single_data data; | ||
288 | int cpu; | ||
289 | |||
290 | data.func = quiesce_dummy; | ||
291 | data.info = NULL; | ||
292 | data.flags = CSD_FLAG_WAIT; | ||
293 | |||
294 | for_each_cpu_mask(cpu, mask) | ||
295 | generic_exec_single(cpu, &data); | ||
296 | } | ||
297 | |||
263 | /** | 298 | /** |
264 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 299 | * smp_call_function_mask(): Run a function on a set of other CPUs. |
265 | * @mask: The set of cpus to run on. | 300 | * @mask: The set of cpus to run on. |
@@ -285,6 +320,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
285 | cpumask_t allbutself; | 320 | cpumask_t allbutself; |
286 | unsigned long flags; | 321 | unsigned long flags; |
287 | int cpu, num_cpus; | 322 | int cpu, num_cpus; |
323 | int slowpath = 0; | ||
288 | 324 | ||
289 | /* Can deadlock when called with interrupts disabled */ | 325 | /* Can deadlock when called with interrupts disabled */ |
290 | WARN_ON(irqs_disabled()); | 326 | WARN_ON(irqs_disabled()); |
@@ -306,15 +342,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
306 | return smp_call_function_single(cpu, func, info, wait); | 342 | return smp_call_function_single(cpu, func, info, wait); |
307 | } | 343 | } |
308 | 344 | ||
309 | if (!wait) { | 345 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
310 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 346 | if (data) { |
311 | if (data) | 347 | data->csd.flags = CSD_FLAG_ALLOC; |
312 | data->csd.flags = CSD_FLAG_ALLOC; | 348 | if (wait) |
313 | } | 349 | data->csd.flags |= CSD_FLAG_WAIT; |
314 | if (!data) { | 350 | } else { |
315 | data = &d; | 351 | data = &d; |
316 | data->csd.flags = CSD_FLAG_WAIT; | 352 | data->csd.flags = CSD_FLAG_WAIT; |
317 | wait = 1; | 353 | wait = 1; |
354 | slowpath = 1; | ||
318 | } | 355 | } |
319 | 356 | ||
320 | spin_lock_init(&data->lock); | 357 | spin_lock_init(&data->lock); |
@@ -331,8 +368,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
331 | arch_send_call_function_ipi(mask); | 368 | arch_send_call_function_ipi(mask); |
332 | 369 | ||
333 | /* optionally wait for the CPUs to complete */ | 370 | /* optionally wait for the CPUs to complete */ |
334 | if (wait) | 371 | if (wait) { |
335 | csd_flag_wait(&data->csd); | 372 | csd_flag_wait(&data->csd); |
373 | if (unlikely(slowpath)) | ||
374 | smp_call_function_mask_quiesce_stack(allbutself); | ||
375 | } | ||
336 | 376 | ||
337 | return 0; | 377 | return 0; |
338 | } | 378 | } |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index a1fb54c93cdd..44baeea94ab9 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
292 | } | 292 | } |
293 | 293 | ||
294 | EXPORT_SYMBOL(_spin_lock_nested); | 294 | EXPORT_SYMBOL(_spin_lock_nested); |
295 | |||
295 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 296 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
296 | { | 297 | { |
297 | unsigned long flags; | 298 | unsigned long flags; |
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
314 | 315 | ||
315 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 316 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
316 | 317 | ||
318 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | ||
319 | struct lockdep_map *nest_lock) | ||
320 | { | ||
321 | preempt_disable(); | ||
322 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | ||
323 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
324 | } | ||
325 | |||
326 | EXPORT_SYMBOL(_spin_lock_nest_lock); | ||
327 | |||
317 | #endif | 328 | #endif |
318 | 329 | ||
319 | void __lockfunc _spin_unlock(spinlock_t *lock) | 330 | void __lockfunc _spin_unlock(spinlock_t *lock) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4a26a1382df0..4048e92aa04f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
290 | 290 | ||
291 | BUG_ON(get_wq_data(work) != cwq); | 291 | BUG_ON(get_wq_data(work) != cwq); |
292 | work_clear_pending(work); | 292 | work_clear_pending(work); |
293 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 293 | lock_map_acquire(&cwq->wq->lockdep_map); |
294 | lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 294 | lock_map_acquire(&lockdep_map); |
295 | f(work); | 295 | f(work); |
296 | lock_release(&lockdep_map, 1, _THIS_IP_); | 296 | lock_map_release(&lockdep_map); |
297 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 297 | lock_map_release(&cwq->wq->lockdep_map); |
298 | 298 | ||
299 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 299 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
300 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 300 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " |
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
413 | int cpu; | 413 | int cpu; |
414 | 414 | ||
415 | might_sleep(); | 415 | might_sleep(); |
416 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 416 | lock_map_acquire(&wq->lockdep_map); |
417 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | 417 | lock_map_release(&wq->lockdep_map); |
418 | for_each_cpu_mask_nr(cpu, *cpu_map) | 418 | for_each_cpu_mask_nr(cpu, *cpu_map) |
419 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 419 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
420 | } | 420 | } |
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) | |||
441 | if (!cwq) | 441 | if (!cwq) |
442 | return 0; | 442 | return 0; |
443 | 443 | ||
444 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 444 | lock_map_acquire(&cwq->wq->lockdep_map); |
445 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 445 | lock_map_release(&cwq->wq->lockdep_map); |
446 | 446 | ||
447 | prev = NULL; | 447 | prev = NULL; |
448 | spin_lock_irq(&cwq->lock); | 448 | spin_lock_irq(&cwq->lock); |
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) | |||
536 | 536 | ||
537 | might_sleep(); | 537 | might_sleep(); |
538 | 538 | ||
539 | lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 539 | lock_map_acquire(&work->lockdep_map); |
540 | lock_release(&work->lockdep_map, 1, _THIS_IP_); | 540 | lock_map_release(&work->lockdep_map); |
541 | 541 | ||
542 | cwq = get_wq_data(work); | 542 | cwq = get_wq_data(work); |
543 | if (!cwq) | 543 | if (!cwq) |
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
872 | if (cwq->thread == NULL) | 872 | if (cwq->thread == NULL) |
873 | return; | 873 | return; |
874 | 874 | ||
875 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 875 | lock_map_acquire(&cwq->wq->lockdep_map); |
876 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 876 | lock_map_release(&cwq->wq->lockdep_map); |
877 | 877 | ||
878 | flush_cpu_workqueue(cwq); | 878 | flush_cpu_workqueue(cwq); |
879 | /* | 879 | /* |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0ef01d14727c..0218b4693dd8 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | ||
11 | #include <linux/rwsem.h> | 12 | #include <linux/rwsem.h> |
12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
@@ -37,6 +38,7 @@ int debug_locks_off(void) | |||
37 | { | 38 | { |
38 | if (xchg(&debug_locks, 0)) { | 39 | if (xchg(&debug_locks, 0)) { |
39 | if (!debug_locks_silent) { | 40 | if (!debug_locks_silent) { |
41 | oops_in_progress = 1; | ||
40 | console_verbose(); | 42 | console_verbose(); |
41 | return 1; | 43 | return 1; |
42 | } | 44 | } |
@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm, | |||
2273 | 2273 | ||
2274 | static DEFINE_MUTEX(mm_all_locks_mutex); | 2274 | static DEFINE_MUTEX(mm_all_locks_mutex); |
2275 | 2275 | ||
2276 | static void vm_lock_anon_vma(struct anon_vma *anon_vma) | 2276 | static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) |
2277 | { | 2277 | { |
2278 | if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) { | 2278 | if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) { |
2279 | /* | 2279 | /* |
2280 | * The LSB of head.next can't change from under us | 2280 | * The LSB of head.next can't change from under us |
2281 | * because we hold the mm_all_locks_mutex. | 2281 | * because we hold the mm_all_locks_mutex. |
2282 | */ | 2282 | */ |
2283 | spin_lock(&anon_vma->lock); | 2283 | spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem); |
2284 | /* | 2284 | /* |
2285 | * We can safely modify head.next after taking the | 2285 | * We can safely modify head.next after taking the |
2286 | * anon_vma->lock. If some other vma in this mm shares | 2286 | * anon_vma->lock. If some other vma in this mm shares |
@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma) | |||
2296 | } | 2296 | } |
2297 | } | 2297 | } |
2298 | 2298 | ||
2299 | static void vm_lock_mapping(struct address_space *mapping) | 2299 | static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) |
2300 | { | 2300 | { |
2301 | if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { | 2301 | if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { |
2302 | /* | 2302 | /* |
@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping) | |||
2310 | */ | 2310 | */ |
2311 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) | 2311 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) |
2312 | BUG(); | 2312 | BUG(); |
2313 | spin_lock(&mapping->i_mmap_lock); | 2313 | spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); |
2314 | } | 2314 | } |
2315 | } | 2315 | } |
2316 | 2316 | ||
@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm) | |||
2358 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 2358 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
2359 | if (signal_pending(current)) | 2359 | if (signal_pending(current)) |
2360 | goto out_unlock; | 2360 | goto out_unlock; |
2361 | if (vma->anon_vma) | ||
2362 | vm_lock_anon_vma(vma->anon_vma); | ||
2363 | if (vma->vm_file && vma->vm_file->f_mapping) | 2361 | if (vma->vm_file && vma->vm_file->f_mapping) |
2364 | vm_lock_mapping(vma->vm_file->f_mapping); | 2362 | vm_lock_mapping(mm, vma->vm_file->f_mapping); |
2363 | } | ||
2364 | |||
2365 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
2366 | if (signal_pending(current)) | ||
2367 | goto out_unlock; | ||
2368 | if (vma->anon_vma) | ||
2369 | vm_lock_anon_vma(mm, vma->anon_vma); | ||
2365 | } | 2370 | } |
2371 | |||
2366 | ret = 0; | 2372 | ret = 0; |
2367 | 2373 | ||
2368 | out_unlock: | 2374 | out_unlock: |