diff options
author | Davidlohr Bueso <davidlohr.bueso@hp.com> | 2013-06-28 16:13:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-23 05:48:37 -0400 |
commit | ec83f425dbca47e19c6737e8e7db0d0924a5de1b (patch) | |
tree | 43f42b0e69847b012d835bda331c7252584bfd46 /kernel/mutex.c | |
parent | b59f2b4d30c187160df597bae41cabe497b6acf4 (diff) |
mutex: Do not unnecessarily deal with waiters
Upon entering the slowpath, we immediately attempt to acquire
the lock by checking if it is already unlocked. If we are lucky
enough that this is the case, then we don't need to deal with
any waiter related logic.
Furthermore any checks for an empty wait_list are unnecessary as
we already know that count is non-negative and hence no one is
waiting for the lock.
Move the count check and xchg calls to be done before any
waiters are setup - including waiter debugging. Upon failure to
acquire the lock, the xchg sets the counter to 0, instead of -1
as it was originally. This can be done here since we set it back
to -1 right at the beginning of the loop so other waiters are
woken up when the lock is released.
When tested on a 8-socket (80 core) system against a vanilla
3.10-rc1 kernel, this patch provides some small performance
benefits (+2-6%). While it could be considered in the noise
level, the average percentages were stable across multiple runs
and no performance regressions were seen. Two big winners, for
small amounts of users (10-100), were the short and compute
workloads had a +19.36% and +%15.76% in jobs per minute.
Also change some break statements to 'goto slowpath', which IMO
makes a little more intuitive to read.
Signed-off-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1372450398.2106.1.camel@buesod1.americas.hpqcorp.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 41 |
1 files changed, 18 insertions, 23 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 7ff48c55a98b..386ad5da47a5 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -463,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
463 | * performed the optimistic spinning cannot be done. | 463 | * performed the optimistic spinning cannot be done. |
464 | */ | 464 | */ |
465 | if (ACCESS_ONCE(ww->ctx)) | 465 | if (ACCESS_ONCE(ww->ctx)) |
466 | break; | 466 | goto slowpath; |
467 | } | 467 | } |
468 | 468 | ||
469 | /* | 469 | /* |
@@ -474,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
474 | owner = ACCESS_ONCE(lock->owner); | 474 | owner = ACCESS_ONCE(lock->owner); |
475 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 475 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
476 | mspin_unlock(MLOCK(lock), &node); | 476 | mspin_unlock(MLOCK(lock), &node); |
477 | break; | 477 | goto slowpath; |
478 | } | 478 | } |
479 | 479 | ||
480 | if ((atomic_read(&lock->count) == 1) && | 480 | if ((atomic_read(&lock->count) == 1) && |
@@ -489,8 +489,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
489 | 489 | ||
490 | mutex_set_owner(lock); | 490 | mutex_set_owner(lock); |
491 | mspin_unlock(MLOCK(lock), &node); | 491 | mspin_unlock(MLOCK(lock), &node); |
492 | preempt_enable(); | 492 | goto done; |
493 | return 0; | ||
494 | } | 493 | } |
495 | mspin_unlock(MLOCK(lock), &node); | 494 | mspin_unlock(MLOCK(lock), &node); |
496 | 495 | ||
@@ -501,7 +500,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
501 | * the owner complete. | 500 | * the owner complete. |
502 | */ | 501 | */ |
503 | if (!owner && (need_resched() || rt_task(task))) | 502 | if (!owner && (need_resched() || rt_task(task))) |
504 | break; | 503 | goto slowpath; |
505 | 504 | ||
506 | /* | 505 | /* |
507 | * The cpu_relax() call is a compiler barrier which forces | 506 | * The cpu_relax() call is a compiler barrier which forces |
@@ -515,6 +514,10 @@ slowpath: | |||
515 | #endif | 514 | #endif |
516 | spin_lock_mutex(&lock->wait_lock, flags); | 515 | spin_lock_mutex(&lock->wait_lock, flags); |
517 | 516 | ||
517 | /* once more, can we acquire the lock? */ | ||
518 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) | ||
519 | goto skip_wait; | ||
520 | |||
518 | debug_mutex_lock_common(lock, &waiter); | 521 | debug_mutex_lock_common(lock, &waiter); |
519 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 522 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
520 | 523 | ||
@@ -522,9 +525,6 @@ slowpath: | |||
522 | list_add_tail(&waiter.list, &lock->wait_list); | 525 | list_add_tail(&waiter.list, &lock->wait_list); |
523 | waiter.task = task; | 526 | waiter.task = task; |
524 | 527 | ||
525 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) | ||
526 | goto done; | ||
527 | |||
528 | lock_contended(&lock->dep_map, ip); | 528 | lock_contended(&lock->dep_map, ip); |
529 | 529 | ||
530 | for (;;) { | 530 | for (;;) { |
@@ -538,7 +538,7 @@ slowpath: | |||
538 | * other waiters: | 538 | * other waiters: |
539 | */ | 539 | */ |
540 | if (MUTEX_SHOW_NO_WAITER(lock) && | 540 | if (MUTEX_SHOW_NO_WAITER(lock) && |
541 | (atomic_xchg(&lock->count, -1) == 1)) | 541 | (atomic_xchg(&lock->count, -1) == 1)) |
542 | break; | 542 | break; |
543 | 543 | ||
544 | /* | 544 | /* |
@@ -563,24 +563,25 @@ slowpath: | |||
563 | schedule_preempt_disabled(); | 563 | schedule_preempt_disabled(); |
564 | spin_lock_mutex(&lock->wait_lock, flags); | 564 | spin_lock_mutex(&lock->wait_lock, flags); |
565 | } | 565 | } |
566 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
567 | /* set it to 0 if there are no waiters left: */ | ||
568 | if (likely(list_empty(&lock->wait_list))) | ||
569 | atomic_set(&lock->count, 0); | ||
570 | debug_mutex_free_waiter(&waiter); | ||
566 | 571 | ||
567 | done: | 572 | skip_wait: |
573 | /* got the lock - cleanup and rejoice! */ | ||
568 | lock_acquired(&lock->dep_map, ip); | 574 | lock_acquired(&lock->dep_map, ip); |
569 | /* got the lock - rejoice! */ | ||
570 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
571 | mutex_set_owner(lock); | 575 | mutex_set_owner(lock); |
572 | 576 | ||
573 | if (!__builtin_constant_p(ww_ctx == NULL)) { | 577 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
574 | struct ww_mutex *ww = container_of(lock, | 578 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
575 | struct ww_mutex, | ||
576 | base); | ||
577 | struct mutex_waiter *cur; | 579 | struct mutex_waiter *cur; |
578 | 580 | ||
579 | /* | 581 | /* |
580 | * This branch gets optimized out for the common case, | 582 | * This branch gets optimized out for the common case, |
581 | * and is only important for ww_mutex_lock. | 583 | * and is only important for ww_mutex_lock. |
582 | */ | 584 | */ |
583 | |||
584 | ww_mutex_lock_acquired(ww, ww_ctx); | 585 | ww_mutex_lock_acquired(ww, ww_ctx); |
585 | ww->ctx = ww_ctx; | 586 | ww->ctx = ww_ctx; |
586 | 587 | ||
@@ -594,15 +595,9 @@ done: | |||
594 | } | 595 | } |
595 | } | 596 | } |
596 | 597 | ||
597 | /* set it to 0 if there are no waiters left: */ | ||
598 | if (likely(list_empty(&lock->wait_list))) | ||
599 | atomic_set(&lock->count, 0); | ||
600 | |||
601 | spin_unlock_mutex(&lock->wait_lock, flags); | 598 | spin_unlock_mutex(&lock->wait_lock, flags); |
602 | 599 | done: | |
603 | debug_mutex_free_waiter(&waiter); | ||
604 | preempt_enable(); | 600 | preempt_enable(); |
605 | |||
606 | return 0; | 601 | return 0; |
607 | 602 | ||
608 | err: | 603 | err: |