diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 11:18:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 11:18:19 -0400 |
commit | 4689550bb278cb142979c313a0d608e802c6711b (patch) | |
tree | f8776c28f1328ab4077132c636c2706f12c793aa /include/linux | |
parent | b854e4de0bf88d094476af82c0d5a80f6f2af916 (diff) | |
parent | 15e71911fcc655508e02f767a3d9b8b138051d2b (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core/locking changes from Ingo Molnar:
"Main changes:
- another mutex optimization, from Davidlohr Bueso
- improved lglock lockdep tracking, from Michel Lespinasse
- [ assorted smaller updates, improvements, cleanups. ]"
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
generic-ipi/locking: Fix misleading smp_call_function_any() description
hung_task debugging: Print more info when reporting the problem
mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER
mutex: Do not unnecessarily deal with waiters
mutex: Fix/document access-once assumption in mutex_can_spin_on_owner()
lglock: Update lockdep annotations to report recursive local locks
lockdep: Introduce lock_acquire_exclusive()/shared() helper macros
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/lockdep.h | 92 |
1 files changed, 23 insertions, 69 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f1e877b79ed8..cfc2f119779a 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
365 | 365 | ||
366 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) | 366 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
367 | 367 | ||
368 | #else /* !LOCKDEP */ | 368 | #else /* !CONFIG_LOCKDEP */ |
369 | 369 | ||
370 | static inline void lockdep_off(void) | 370 | static inline void lockdep_off(void) |
371 | { | 371 | { |
@@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
479 | * on the per lock-class debug mode: | 479 | * on the per lock-class debug mode: |
480 | */ | 480 | */ |
481 | 481 | ||
482 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 482 | #ifdef CONFIG_PROVE_LOCKING |
483 | # ifdef CONFIG_PROVE_LOCKING | 483 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
484 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) | 484 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i) |
485 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | 485 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i) |
486 | # else | ||
487 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) | ||
488 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) | ||
489 | # endif | ||
490 | # define spin_release(l, n, i) lock_release(l, n, i) | ||
491 | #else | 486 | #else |
492 | # define spin_acquire(l, s, t, i) do { } while (0) | 487 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
493 | # define spin_release(l, n, i) do { } while (0) | 488 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
489 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) | ||
494 | #endif | 490 | #endif |
495 | 491 | ||
496 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 492 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
497 | # ifdef CONFIG_PROVE_LOCKING | 493 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
498 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) | 494 | #define spin_release(l, n, i) lock_release(l, n, i) |
499 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) | ||
500 | # else | ||
501 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) | ||
502 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) | ||
503 | # endif | ||
504 | # define rwlock_release(l, n, i) lock_release(l, n, i) | ||
505 | #else | ||
506 | # define rwlock_acquire(l, s, t, i) do { } while (0) | ||
507 | # define rwlock_acquire_read(l, s, t, i) do { } while (0) | ||
508 | # define rwlock_release(l, n, i) do { } while (0) | ||
509 | #endif | ||
510 | 495 | ||
511 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 496 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
512 | # ifdef CONFIG_PROVE_LOCKING | 497 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
513 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) | 498 | #define rwlock_release(l, n, i) lock_release(l, n, i) |
514 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | ||
515 | # else | ||
516 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) | ||
517 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) | ||
518 | # endif | ||
519 | # define mutex_release(l, n, i) lock_release(l, n, i) | ||
520 | #else | ||
521 | # define mutex_acquire(l, s, t, i) do { } while (0) | ||
522 | # define mutex_acquire_nest(l, s, t, n, i) do { } while (0) | ||
523 | # define mutex_release(l, n, i) do { } while (0) | ||
524 | #endif | ||
525 | 499 | ||
526 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 500 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
527 | # ifdef CONFIG_PROVE_LOCKING | 501 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
528 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) | 502 | #define mutex_release(l, n, i) lock_release(l, n, i) |
529 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | 503 | |
530 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) | 504 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
531 | # else | 505 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
532 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) | 506 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
533 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) | ||
534 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) | ||
535 | # endif | ||
536 | # define rwsem_release(l, n, i) lock_release(l, n, i) | 507 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
537 | #else | ||
538 | # define rwsem_acquire(l, s, t, i) do { } while (0) | ||
539 | # define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) | ||
540 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) | ||
541 | # define rwsem_release(l, n, i) do { } while (0) | ||
542 | #endif | ||
543 | 508 | ||
544 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 509 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
545 | # ifdef CONFIG_PROVE_LOCKING | 510 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
546 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) | ||
547 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) | ||
548 | # else | ||
549 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) | ||
550 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
551 | # endif | ||
552 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) | 511 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
553 | #else | ||
554 | # define lock_map_acquire(l) do { } while (0) | ||
555 | # define lock_map_acquire_read(l) do { } while (0) | ||
556 | # define lock_map_release(l) do { } while (0) | ||
557 | #endif | ||
558 | 512 | ||
559 | #ifdef CONFIG_PROVE_LOCKING | 513 | #ifdef CONFIG_PROVE_LOCKING |
560 | # define might_lock(lock) \ | 514 | # define might_lock(lock) \ |