diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-06-20 07:31:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-26 06:10:56 -0400 |
commit | 230100276955529d5a7c69207421756b9a61a8e5 (patch) | |
tree | e9cb48f8e43bd7e3d3cf38ee1f1e2838de112913 /kernel/mutex.c | |
parent | 040a0a37100563754bb1fee6ff6427420bcfa609 (diff) |
mutex: Add w/w mutex slowpath debugging
Injects EDEADLK conditions at pseudo-random interval, with
exponential backoff up to UINT_MAX (to ensure that every lock
operation still completes in a reasonable time).
This way we can test the wound slowpath even for ww mutex users
where contention is never expected, and the ww deadlock
avoidance algorithm is only needed for correctness against
malicious userspace. An example would be protecting kernel
modesetting properties, which thanks to single-threaded X isn't
really expected to contend, ever.
I've looked into using the CONFIG_FAULT_INJECTION
infrastructure, but decided against it for two reasons:
- EDEADLK handling is mandatory for ww mutex users and should
never affect the outcome of a syscall. This is in contrast to -ENOMEM
injection. So fine configurability isn't required.
- The fault injection framework only allows to set a simple
probability for failure. Now the probability that a ww mutex acquire
stage with N locks will never complete (due to too many injected
EDEADLK backoffs) is zero. But the expected number of ww_mutex_lock
operations for the completely uncontended case would be O(exp(N)).
The per-acuiqire ctx exponential backoff solution choosen here only
results in O(log N) overhead due to injection and so O(log N * N)
lock operations. This way we can fail with high probability (and so
have good test coverage even for fancy backoff and lock acquisition
paths) without running into patalogical cases.
Note that EDEADLK will only ever be injected when we managed to
acquire the lock. This prevents any behaviour changes for users
which rely on the EALREADY semantics.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: rostedt@goodmis.org
Cc: daniel@ffwll.ch
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20130620113117.4001.21681.stgit@patser
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 44 |
1 files changed, 41 insertions, 3 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index fc801aafe8fd..e581ada5faf4 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -651,22 +651,60 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |||
651 | 651 | ||
652 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 652 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
653 | 653 | ||
654 | static inline int | ||
655 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
656 | { | ||
657 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
658 | unsigned tmp; | ||
659 | |||
660 | if (ctx->deadlock_inject_countdown-- == 0) { | ||
661 | tmp = ctx->deadlock_inject_interval; | ||
662 | if (tmp > UINT_MAX/4) | ||
663 | tmp = UINT_MAX; | ||
664 | else | ||
665 | tmp = tmp*2 + tmp + tmp/2; | ||
666 | |||
667 | ctx->deadlock_inject_interval = tmp; | ||
668 | ctx->deadlock_inject_countdown = tmp; | ||
669 | ctx->contending_lock = lock; | ||
670 | |||
671 | ww_mutex_unlock(lock); | ||
672 | |||
673 | return -EDEADLK; | ||
674 | } | ||
675 | #endif | ||
676 | |||
677 | return 0; | ||
678 | } | ||
654 | 679 | ||
655 | int __sched | 680 | int __sched |
656 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | 681 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
657 | { | 682 | { |
683 | int ret; | ||
684 | |||
658 | might_sleep(); | 685 | might_sleep(); |
659 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 686 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
660 | 0, &ctx->dep_map, _RET_IP_, ctx); | 687 | 0, &ctx->dep_map, _RET_IP_, ctx); |
688 | if (!ret && ctx->acquired > 0) | ||
689 | return ww_mutex_deadlock_injection(lock, ctx); | ||
690 | |||
691 | return ret; | ||
661 | } | 692 | } |
662 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); | 693 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); |
663 | 694 | ||
664 | int __sched | 695 | int __sched |
665 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | 696 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
666 | { | 697 | { |
698 | int ret; | ||
699 | |||
667 | might_sleep(); | 700 | might_sleep(); |
668 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 701 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
669 | 0, &ctx->dep_map, _RET_IP_, ctx); | 702 | 0, &ctx->dep_map, _RET_IP_, ctx); |
703 | |||
704 | if (!ret && ctx->acquired > 0) | ||
705 | return ww_mutex_deadlock_injection(lock, ctx); | ||
706 | |||
707 | return ret; | ||
670 | } | 708 | } |
671 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); | 709 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); |
672 | 710 | ||