diff options
-rw-r--r-- | include/linux/lockdep.h | 34 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 26 |
3 files changed, 40 insertions, 22 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index e431d1d6eaf3..93a8cc02a033 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -211,6 +211,7 @@ struct held_lock { | |||
211 | u64 prev_chain_key; | 211 | u64 prev_chain_key; |
212 | unsigned long acquire_ip; | 212 | unsigned long acquire_ip; |
213 | struct lockdep_map *instance; | 213 | struct lockdep_map *instance; |
214 | struct lockdep_map *nest_lock; | ||
214 | #ifdef CONFIG_LOCK_STAT | 215 | #ifdef CONFIG_LOCK_STAT |
215 | u64 waittime_stamp; | 216 | u64 waittime_stamp; |
216 | u64 holdtime_stamp; | 217 | u64 holdtime_stamp; |
@@ -297,7 +298,8 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
297 | * 2: full validation | 298 | * 2: full validation |
298 | */ | 299 | */ |
299 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 300 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
300 | int trylock, int read, int check, unsigned long ip); | 301 | int trylock, int read, int check, |
302 | struct lockdep_map *nest_lock, unsigned long ip); | ||
301 | 303 | ||
302 | extern void lock_release(struct lockdep_map *lock, int nested, | 304 | extern void lock_release(struct lockdep_map *lock, int nested, |
303 | unsigned long ip); | 305 | unsigned long ip); |
@@ -319,7 +321,7 @@ static inline void lockdep_on(void) | |||
319 | { | 321 | { |
320 | } | 322 | } |
321 | 323 | ||
322 | # define lock_acquire(l, s, t, r, c, i) do { } while (0) | 324 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
323 | # define lock_release(l, n, i) do { } while (0) | 325 | # define lock_release(l, n, i) do { } while (0) |
324 | # define lock_set_subclass(l, s, i) do { } while (0) | 326 | # define lock_set_subclass(l, s, i) do { } while (0) |
325 | # define lockdep_init() do { } while (0) | 327 | # define lockdep_init() do { } while (0) |
@@ -407,9 +409,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
407 | 409 | ||
408 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 410 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
409 | # ifdef CONFIG_PROVE_LOCKING | 411 | # ifdef CONFIG_PROVE_LOCKING |
410 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 412 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
411 | # else | 413 | # else |
412 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 414 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
413 | # endif | 415 | # endif |
414 | # define spin_release(l, n, i) lock_release(l, n, i) | 416 | # define spin_release(l, n, i) lock_release(l, n, i) |
415 | #else | 417 | #else |
@@ -419,11 +421,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
419 | 421 | ||
420 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 422 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
421 | # ifdef CONFIG_PROVE_LOCKING | 423 | # ifdef CONFIG_PROVE_LOCKING |
422 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 424 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
423 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) | 425 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
424 | # else | 426 | # else |
425 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 427 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
426 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) | 428 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
427 | # endif | 429 | # endif |
428 | # define rwlock_release(l, n, i) lock_release(l, n, i) | 430 | # define rwlock_release(l, n, i) lock_release(l, n, i) |
429 | #else | 431 | #else |
@@ -434,9 +436,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
434 | 436 | ||
435 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 437 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
436 | # ifdef CONFIG_PROVE_LOCKING | 438 | # ifdef CONFIG_PROVE_LOCKING |
437 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 439 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
438 | # else | 440 | # else |
439 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 441 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
440 | # endif | 442 | # endif |
441 | # define mutex_release(l, n, i) lock_release(l, n, i) | 443 | # define mutex_release(l, n, i) lock_release(l, n, i) |
442 | #else | 444 | #else |
@@ -446,11 +448,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
446 | 448 | ||
447 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 449 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
448 | # ifdef CONFIG_PROVE_LOCKING | 450 | # ifdef CONFIG_PROVE_LOCKING |
449 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) | 451 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
450 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) | 452 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
451 | # else | 453 | # else |
452 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) | 454 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
453 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) | 455 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
454 | # endif | 456 | # endif |
455 | # define rwsem_release(l, n, i) lock_release(l, n, i) | 457 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
456 | #else | 458 | #else |
@@ -461,9 +463,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
461 | 463 | ||
462 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 464 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
463 | # ifdef CONFIG_PROVE_LOCKING | 465 | # ifdef CONFIG_PROVE_LOCKING |
464 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_) | 466 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
465 | # else | 467 | # else |
466 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_) | 468 | # define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
467 | # endif | 469 | # endif |
468 | # define map_release(l) lock_release(l, 1, _THIS_IP_) | 470 | # define map_release(l) lock_release(l, 1, _THIS_IP_) |
469 | #else | 471 | #else |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcfe..4ab843622727 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); | |||
117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 117 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
118 | extern struct lockdep_map rcu_lock_map; | 118 | extern struct lockdep_map rcu_lock_map; |
119 | # define rcu_read_acquire() \ | 119 | # define rcu_read_acquire() \ |
120 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | 120 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
121 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 121 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
122 | #else | 122 | #else |
123 | # define rcu_read_acquire() do { } while (0) | 123 | # define rcu_read_acquire() do { } while (0) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d3c72ad8d09e..410c3365ad8f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1372,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
1372 | struct lockdep_map *next_instance, int read) | 1372 | struct lockdep_map *next_instance, int read) |
1373 | { | 1373 | { |
1374 | struct held_lock *prev; | 1374 | struct held_lock *prev; |
1375 | struct held_lock *nest = NULL; | ||
1375 | int i; | 1376 | int i; |
1376 | 1377 | ||
1377 | for (i = 0; i < curr->lockdep_depth; i++) { | 1378 | for (i = 0; i < curr->lockdep_depth; i++) { |
1378 | prev = curr->held_locks + i; | 1379 | prev = curr->held_locks + i; |
1380 | |||
1381 | if (prev->instance == next->nest_lock) | ||
1382 | nest = prev; | ||
1383 | |||
1379 | if (hlock_class(prev) != hlock_class(next)) | 1384 | if (hlock_class(prev) != hlock_class(next)) |
1380 | continue; | 1385 | continue; |
1386 | |||
1381 | /* | 1387 | /* |
1382 | * Allow read-after-read recursion of the same | 1388 | * Allow read-after-read recursion of the same |
1383 | * lock class (i.e. read_lock(lock)+read_lock(lock)): | 1389 | * lock class (i.e. read_lock(lock)+read_lock(lock)): |
1384 | */ | 1390 | */ |
1385 | if ((read == 2) && prev->read) | 1391 | if ((read == 2) && prev->read) |
1386 | return 2; | 1392 | return 2; |
1393 | |||
1394 | /* | ||
1395 | * We're holding the nest_lock, which serializes this lock's | ||
1396 | * nesting behaviour. | ||
1397 | */ | ||
1398 | if (nest) | ||
1399 | return 2; | ||
1400 | |||
1387 | return print_deadlock_bug(curr, prev, next); | 1401 | return print_deadlock_bug(curr, prev, next); |
1388 | } | 1402 | } |
1389 | return 1; | 1403 | return 1; |
@@ -2507,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); | |||
2507 | */ | 2521 | */ |
2508 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2522 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2509 | int trylock, int read, int check, int hardirqs_off, | 2523 | int trylock, int read, int check, int hardirqs_off, |
2510 | unsigned long ip) | 2524 | struct lockdep_map *nest_lock, unsigned long ip) |
2511 | { | 2525 | { |
2512 | struct task_struct *curr = current; | 2526 | struct task_struct *curr = current; |
2513 | struct lock_class *class = NULL; | 2527 | struct lock_class *class = NULL; |
@@ -2566,6 +2580,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2566 | hlock->class_idx = class - lock_classes + 1; | 2580 | hlock->class_idx = class - lock_classes + 1; |
2567 | hlock->acquire_ip = ip; | 2581 | hlock->acquire_ip = ip; |
2568 | hlock->instance = lock; | 2582 | hlock->instance = lock; |
2583 | hlock->nest_lock = nest_lock; | ||
2569 | hlock->trylock = trylock; | 2584 | hlock->trylock = trylock; |
2570 | hlock->read = read; | 2585 | hlock->read = read; |
2571 | hlock->check = check; | 2586 | hlock->check = check; |
@@ -2717,7 +2732,7 @@ found_it: | |||
2717 | if (!__lock_acquire(hlock->instance, | 2732 | if (!__lock_acquire(hlock->instance, |
2718 | hlock_class(hlock)->subclass, hlock->trylock, | 2733 | hlock_class(hlock)->subclass, hlock->trylock, |
2719 | hlock->read, hlock->check, hlock->hardirqs_off, | 2734 | hlock->read, hlock->check, hlock->hardirqs_off, |
2720 | hlock->acquire_ip)) | 2735 | hlock->nest_lock, hlock->acquire_ip)) |
2721 | return 0; | 2736 | return 0; |
2722 | } | 2737 | } |
2723 | 2738 | ||
@@ -2778,7 +2793,7 @@ found_it: | |||
2778 | if (!__lock_acquire(hlock->instance, | 2793 | if (!__lock_acquire(hlock->instance, |
2779 | hlock_class(hlock)->subclass, hlock->trylock, | 2794 | hlock_class(hlock)->subclass, hlock->trylock, |
2780 | hlock->read, hlock->check, hlock->hardirqs_off, | 2795 | hlock->read, hlock->check, hlock->hardirqs_off, |
2781 | hlock->acquire_ip)) | 2796 | hlock->nest_lock, hlock->acquire_ip)) |
2782 | return 0; | 2797 | return 0; |
2783 | } | 2798 | } |
2784 | 2799 | ||
@@ -2915,7 +2930,8 @@ EXPORT_SYMBOL_GPL(lock_set_subclass); | |||
2915 | * and also avoid lockdep recursion: | 2930 | * and also avoid lockdep recursion: |
2916 | */ | 2931 | */ |
2917 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2932 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2918 | int trylock, int read, int check, unsigned long ip) | 2933 | int trylock, int read, int check, |
2934 | struct lockdep_map *nest_lock, unsigned long ip) | ||
2919 | { | 2935 | { |
2920 | unsigned long flags; | 2936 | unsigned long flags; |
2921 | 2937 | ||
@@ -2930,7 +2946,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2930 | 2946 | ||
2931 | current->lockdep_recursion = 1; | 2947 | current->lockdep_recursion = 1; |
2932 | __lock_acquire(lock, subclass, trylock, read, check, | 2948 | __lock_acquire(lock, subclass, trylock, read, check, |
2933 | irqs_disabled_flags(flags), ip); | 2949 | irqs_disabled_flags(flags), nest_lock, ip); |
2934 | current->lockdep_recursion = 0; | 2950 | current->lockdep_recursion = 0; |
2935 | raw_local_irq_restore(flags); | 2951 | raw_local_irq_restore(flags); |
2936 | } | 2952 | } |