aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-01-29 06:51:42 -0500
committerIngo Molnar <mingo@kernel.org>2014-03-11 07:14:56 -0400
commitfb0527bd5ea99bfeb2dd91e3c1433ecf745d6b99 (patch)
treeb3ab4c067c035688d4295fdcadf00170465db7df /kernel/locking/mutex.c
parent1d8fe7dc8078b23e060ec62ccb4cdc1ac3c41bf8 (diff)
locking/mutexes: Introduce cancelable MCS lock for adaptive spinning
Since we want a task waiting for a mutex_lock() to go to sleep and reschedule on need_resched() we must be able to abort the mcs_spin_lock() around the adaptive spin. Therefore implement a cancelable mcs lock. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: chegu_vinod@hp.com Cc: paulmck@linux.vnet.ibm.com Cc: Waiman.Long@hp.com Cc: torvalds@linux-foundation.org Cc: tglx@linutronix.de Cc: riel@redhat.com Cc: akpm@linux-foundation.org Cc: davidlohr@hp.com Cc: hpa@zytor.com Cc: andi@firstfloor.org Cc: aswin@hp.com Cc: scott.norton@hp.com Cc: Jason Low <jason.low2@hp.com> Link: http://lkml.kernel.org/n/tip-62hcl5wxydmjzd182zhvk89m@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index dc3d6f2bbe2a..2670b84067d6 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -53,7 +53,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
53 INIT_LIST_HEAD(&lock->wait_list); 53 INIT_LIST_HEAD(&lock->wait_list);
54 mutex_clear_owner(lock); 54 mutex_clear_owner(lock);
55#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 55#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
56 lock->mcs_lock = NULL; 56 lock->osq = NULL;
57#endif 57#endif
58 58
59 debug_mutex_init(lock, name, key); 59 debug_mutex_init(lock, name, key);
@@ -403,7 +403,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
403 if (!mutex_can_spin_on_owner(lock)) 403 if (!mutex_can_spin_on_owner(lock))
404 goto slowpath; 404 goto slowpath;
405 405
406 mcs_spin_lock(&lock->mcs_lock, &node); 406 if (!osq_lock(&lock->osq))
407 goto slowpath;
408
407 for (;;) { 409 for (;;) {
408 struct task_struct *owner; 410 struct task_struct *owner;
409 411
@@ -442,7 +444,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
442 } 444 }
443 445
444 mutex_set_owner(lock); 446 mutex_set_owner(lock);
445 mcs_spin_unlock(&lock->mcs_lock, &node); 447 osq_unlock(&lock->osq);
446 preempt_enable(); 448 preempt_enable();
447 return 0; 449 return 0;
448 } 450 }
@@ -464,7 +466,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
464 */ 466 */
465 arch_mutex_cpu_relax(); 467 arch_mutex_cpu_relax();
466 } 468 }
467 mcs_spin_unlock(&lock->mcs_lock, &node); 469 osq_unlock(&lock->osq);
468slowpath: 470slowpath:
469#endif 471#endif
470 spin_lock_mutex(&lock->wait_lock, flags); 472 spin_lock_mutex(&lock->wait_lock, flags);