aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-01-06 14:45:06 -0500
committerIngo Molnar <mingo@kernel.org>2015-01-14 09:07:30 -0500
commit4bd19084faa61a8c68586e74f03f5776179f65c2 (patch)
treec240de51529bea98656f06fcdb9a0af383b6b29a /kernel/locking/mutex.c
parente42f678a0237f84f0004fbaf0fad0b844751eadd (diff)
locking/mutex: Introduce ww_mutex_set_context_slowpath()
... which is equivalent to the fastpath counter part. This mainly allows getting some WW specific code out of generic mutex paths. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1420573509-24774-4-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c44
1 files changed, 26 insertions, 18 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 6db3d0dea6da..c67a60b61625 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
147} 147}
148 148
149/* 149/*
150 * after acquiring lock with fastpath or when we lost out in contested 150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck. 151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 * 152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, 153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
@@ -191,6 +191,30 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
191 spin_unlock_mutex(&lock->base.wait_lock, flags); 191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192} 192}
193 193
194/*
195 * After acquiring lock in the slowpath set ctx and wake up any
196 * waiters so they can recheck.
197 *
198 * Callers must hold the mutex wait_lock.
199 */
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202 struct ww_acquire_ctx *ctx)
203{
204 struct mutex_waiter *cur;
205
206 ww_mutex_lock_acquired(lock, ctx);
207 lock->ctx = ctx;
208
209 /*
210 * Give any possible sleeping processes the chance to wake up,
211 * so they can recheck if they have to back off.
212 */
213 list_for_each_entry(cur, &lock->base.wait_list, list) {
214 debug_mutex_wake_waiter(&lock->base, cur);
215 wake_up_process(cur->task);
216 }
217}
194 218
195#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
196static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 220static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
@@ -576,23 +600,7 @@ skip_wait:
576 600
577 if (use_ww_ctx) { 601 if (use_ww_ctx) {
578 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 602 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
579 struct mutex_waiter *cur; 603 ww_mutex_set_context_slowpath(ww, ww_ctx);
580
581 /*
582 * This branch gets optimized out for the common case,
583 * and is only important for ww_mutex_lock.
584 */
585 ww_mutex_lock_acquired(ww, ww_ctx);
586 ww->ctx = ww_ctx;
587
588 /*
589 * Give any possible sleeping processes the chance to wake up,
590 * so they can recheck if they have to back off.
591 */
592 list_for_each_entry(cur, &lock->wait_list, list) {
593 debug_mutex_wake_waiter(lock, cur);
594 wake_up_process(cur->task);
595 }
596 } 604 }
597 605
598 spin_unlock_mutex(&lock->wait_lock, flags); 606 spin_unlock_mutex(&lock->wait_lock, flags);