diff options
-rw-r--r-- | include/linux/mutex.h | 8 | ||||
-rw-r--r-- | kernel/mutex.c | 44 | ||||
-rw-r--r-- | lib/Kconfig.debug | 13 |
3 files changed, 62 insertions, 3 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a56b0ccc8a6c..3793ed7feeeb 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -98,6 +98,10 @@ struct ww_acquire_ctx { | |||
98 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 98 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
99 | struct lockdep_map dep_map; | 99 | struct lockdep_map dep_map; |
100 | #endif | 100 | #endif |
101 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
102 | unsigned deadlock_inject_interval; | ||
103 | unsigned deadlock_inject_countdown; | ||
104 | #endif | ||
101 | }; | 105 | }; |
102 | 106 | ||
103 | struct ww_mutex { | 107 | struct ww_mutex { |
@@ -283,6 +287,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | |||
283 | &ww_class->acquire_key, 0); | 287 | &ww_class->acquire_key, 0); |
284 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); | 288 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); |
285 | #endif | 289 | #endif |
290 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
291 | ctx->deadlock_inject_interval = 1; | ||
292 | ctx->deadlock_inject_countdown = ctx->stamp & 0xf; | ||
293 | #endif | ||
286 | } | 294 | } |
287 | 295 | ||
288 | /** | 296 | /** |
diff --git a/kernel/mutex.c b/kernel/mutex.c index fc801aafe8fd..e581ada5faf4 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -651,22 +651,60 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |||
651 | 651 | ||
652 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 652 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
653 | 653 | ||
654 | static inline int | ||
655 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
656 | { | ||
657 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
658 | unsigned tmp; | ||
659 | |||
660 | if (ctx->deadlock_inject_countdown-- == 0) { | ||
661 | tmp = ctx->deadlock_inject_interval; | ||
662 | if (tmp > UINT_MAX/4) | ||
663 | tmp = UINT_MAX; | ||
664 | else | ||
665 | tmp = tmp*2 + tmp + tmp/2; | ||
666 | |||
667 | ctx->deadlock_inject_interval = tmp; | ||
668 | ctx->deadlock_inject_countdown = tmp; | ||
669 | ctx->contending_lock = lock; | ||
670 | |||
671 | ww_mutex_unlock(lock); | ||
672 | |||
673 | return -EDEADLK; | ||
674 | } | ||
675 | #endif | ||
676 | |||
677 | return 0; | ||
678 | } | ||
654 | 679 | ||
655 | int __sched | 680 | int __sched |
656 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | 681 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
657 | { | 682 | { |
683 | int ret; | ||
684 | |||
658 | might_sleep(); | 685 | might_sleep(); |
659 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 686 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
660 | 0, &ctx->dep_map, _RET_IP_, ctx); | 687 | 0, &ctx->dep_map, _RET_IP_, ctx); |
688 | if (!ret && ctx->acquired > 0) | ||
689 | return ww_mutex_deadlock_injection(lock, ctx); | ||
690 | |||
691 | return ret; | ||
661 | } | 692 | } |
662 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); | 693 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); |
663 | 694 | ||
664 | int __sched | 695 | int __sched |
665 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | 696 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
666 | { | 697 | { |
698 | int ret; | ||
699 | |||
667 | might_sleep(); | 700 | might_sleep(); |
668 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 701 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
669 | 0, &ctx->dep_map, _RET_IP_, ctx); | 702 | 0, &ctx->dep_map, _RET_IP_, ctx); |
703 | |||
704 | if (!ret && ctx->acquired > 0) | ||
705 | return ww_mutex_deadlock_injection(lock, ctx); | ||
706 | |||
707 | return ret; | ||
670 | } | 708 | } |
671 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); | 709 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); |
672 | 710 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 566cf2bc08ea..7154f799541a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -547,6 +547,19 @@ config DEBUG_MUTEXES | |||
547 | This feature allows mutex semantics violations to be detected and | 547 | This feature allows mutex semantics violations to be detected and |
548 | reported. | 548 | reported. |
549 | 549 | ||
550 | config DEBUG_WW_MUTEX_SLOWPATH | ||
551 | bool "Wait/wound mutex debugging: Slowpath testing" | ||
552 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | ||
553 | select DEBUG_LOCK_ALLOC | ||
554 | select DEBUG_SPINLOCK | ||
555 | select DEBUG_MUTEXES | ||
556 | help | ||
557 | This feature enables slowpath testing for w/w mutex users by | ||
558 | injecting additional -EDEADLK wound/backoff cases. Together with | ||
559 | the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this | ||
560 | will test all possible w/w mutex interface abuse with the | ||
561 | exception of simply not acquiring all the required locks. | ||
562 | |||
550 | config DEBUG_LOCK_ALLOC | 563 | config DEBUG_LOCK_ALLOC |
551 | bool "Lock debugging: detect incorrect freeing of live locks" | 564 | bool "Lock debugging: detect incorrect freeing of live locks" |
552 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 565 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |