summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2014-01-28 14:13:13 -0500
committerIngo Molnar <mingo@kernel.org>2014-03-11 07:14:53 -0400
commit47667fa1502e4d759df87e9cc7fbc0f202483361 (patch)
treed2355a95e004334147e00650ec4d638389d1ab87 /kernel/locking
parent46af29e479cc0c1c63633007993af5292c2c3e75 (diff)
locking/mutexes: Modify the way optimistic spinners are queued
The mutex->spin_mlock was introduced in order to ensure that only 1 thread spins for lock acquisition at a time to reduce cache line contention. When lock->owner is NULL and the lock->count is still not 1, the spinner(s) will continually release and obtain the lock->spin_mlock. This can generate quite a bit of overhead/contention, and also might just delay the spinner from getting the lock. This patch modifies the way optimistic spinners are queued by queuing before entering the optimistic spinning loop as oppose to acquiring before every call to mutex_spin_on_owner(). So in situations where the spinner requires a few extra spins before obtaining the lock, then there will only be 1 spinner trying to get the lock and it will avoid the overhead from unnecessarily unlocking and locking the spin_mlock. Signed-off-by: Jason Low <jason.low2@hp.com> Cc: tglx@linutronix.de Cc: riel@redhat.com Cc: akpm@linux-foundation.org Cc: davidlohr@hp.com Cc: hpa@zytor.com Cc: andi@firstfloor.org Cc: aswin@hp.com Cc: scott.norton@hp.com Cc: chegu_vinod@hp.com Cc: Waiman.Long@hp.com Cc: paulmck@linux.vnet.ibm.com Cc: torvalds@linux-foundation.org Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1390936396-3962-3-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/mutex.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index e6d646b18d6c..82dad2ccd40b 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -403,9 +403,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
403 if (!mutex_can_spin_on_owner(lock)) 403 if (!mutex_can_spin_on_owner(lock))
404 goto slowpath; 404 goto slowpath;
405 405
406 mcs_spin_lock(&lock->mcs_lock, &node);
406 for (;;) { 407 for (;;) {
407 struct task_struct *owner; 408 struct task_struct *owner;
408 struct mcs_spinlock node;
409 409
410 if (use_ww_ctx && ww_ctx->acquired > 0) { 410 if (use_ww_ctx && ww_ctx->acquired > 0) {
411 struct ww_mutex *ww; 411 struct ww_mutex *ww;
@@ -420,19 +420,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
420 * performed the optimistic spinning cannot be done. 420 * performed the optimistic spinning cannot be done.
421 */ 421 */
422 if (ACCESS_ONCE(ww->ctx)) 422 if (ACCESS_ONCE(ww->ctx))
423 goto slowpath; 423 break;
424 } 424 }
425 425
426 /* 426 /*
427 * If there's an owner, wait for it to either 427 * If there's an owner, wait for it to either
428 * release the lock or go to sleep. 428 * release the lock or go to sleep.
429 */ 429 */
430 mcs_spin_lock(&lock->mcs_lock, &node);
431 owner = ACCESS_ONCE(lock->owner); 430 owner = ACCESS_ONCE(lock->owner);
432 if (owner && !mutex_spin_on_owner(lock, owner)) { 431 if (owner && !mutex_spin_on_owner(lock, owner))
433 mcs_spin_unlock(&lock->mcs_lock, &node); 432 break;
434 goto slowpath;
435 }
436 433
437 if ((atomic_read(&lock->count) == 1) && 434 if ((atomic_read(&lock->count) == 1) &&
438 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 435 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
@@ -449,7 +446,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
449 preempt_enable(); 446 preempt_enable();
450 return 0; 447 return 0;
451 } 448 }
452 mcs_spin_unlock(&lock->mcs_lock, &node);
453 449
454 /* 450 /*
455 * When there's no owner, we might have preempted between the 451 * When there's no owner, we might have preempted between the
@@ -458,7 +454,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
458 * the owner complete. 454 * the owner complete.
459 */ 455 */
460 if (!owner && (need_resched() || rt_task(task))) 456 if (!owner && (need_resched() || rt_task(task)))
461 goto slowpath; 457 break;
462 458
463 /* 459 /*
464 * The cpu_relax() call is a compiler barrier which forces 460 * The cpu_relax() call is a compiler barrier which forces
@@ -468,6 +464,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
468 */ 464 */
469 arch_mutex_cpu_relax(); 465 arch_mutex_cpu_relax();
470 } 466 }
467 mcs_spin_unlock(&lock->mcs_lock, &node);
471slowpath: 468slowpath:
472#endif 469#endif
473 spin_lock_mutex(&lock->wait_lock, flags); 470 spin_lock_mutex(&lock->wait_lock, flags);