diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 11:18:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 11:18:19 -0400 |
commit | 4689550bb278cb142979c313a0d608e802c6711b (patch) | |
tree | f8776c28f1328ab4077132c636c2706f12c793aa /kernel/mutex.c | |
parent | b854e4de0bf88d094476af82c0d5a80f6f2af916 (diff) | |
parent | 15e71911fcc655508e02f767a3d9b8b138051d2b (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core/locking changes from Ingo Molnar:
"Main changes:
- another mutex optimization, from Davidlohr Bueso
- improved lglock lockdep tracking, from Michel Lespinasse
- [ assorted smaller updates, improvements, cleanups. ]"
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
generic-ipi/locking: Fix misleading smp_call_function_any() description
hung_task debugging: Print more info when reporting the problem
mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER
mutex: Do not unnecessarily deal with waiters
mutex: Fix/document access-once assumption in mutex_can_spin_on_owner()
lglock: Update lockdep annotations to report recursive local locks
lockdep: Introduce lock_acquire_exclusive()/shared() helper macros
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 43 |
1 files changed, 20 insertions, 23 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index a52ee7bb830d..6d647aedffea 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |||
209 | */ | 209 | */ |
210 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | 210 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
211 | { | 211 | { |
212 | struct task_struct *owner; | ||
212 | int retval = 1; | 213 | int retval = 1; |
213 | 214 | ||
214 | rcu_read_lock(); | 215 | rcu_read_lock(); |
215 | if (lock->owner) | 216 | owner = ACCESS_ONCE(lock->owner); |
216 | retval = lock->owner->on_cpu; | 217 | if (owner) |
218 | retval = owner->on_cpu; | ||
217 | rcu_read_unlock(); | 219 | rcu_read_unlock(); |
218 | /* | 220 | /* |
219 | * if lock->owner is not set, the mutex owner may have just acquired | 221 | * if lock->owner is not set, the mutex owner may have just acquired |
@@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
461 | * performed the optimistic spinning cannot be done. | 463 | * performed the optimistic spinning cannot be done. |
462 | */ | 464 | */ |
463 | if (ACCESS_ONCE(ww->ctx)) | 465 | if (ACCESS_ONCE(ww->ctx)) |
464 | break; | 466 | goto slowpath; |
465 | } | 467 | } |
466 | 468 | ||
467 | /* | 469 | /* |
@@ -472,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
472 | owner = ACCESS_ONCE(lock->owner); | 474 | owner = ACCESS_ONCE(lock->owner); |
473 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 475 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
474 | mspin_unlock(MLOCK(lock), &node); | 476 | mspin_unlock(MLOCK(lock), &node); |
475 | break; | 477 | goto slowpath; |
476 | } | 478 | } |
477 | 479 | ||
478 | if ((atomic_read(&lock->count) == 1) && | 480 | if ((atomic_read(&lock->count) == 1) && |
@@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
499 | * the owner complete. | 501 | * the owner complete. |
500 | */ | 502 | */ |
501 | if (!owner && (need_resched() || rt_task(task))) | 503 | if (!owner && (need_resched() || rt_task(task))) |
502 | break; | 504 | goto slowpath; |
503 | 505 | ||
504 | /* | 506 | /* |
505 | * The cpu_relax() call is a compiler barrier which forces | 507 | * The cpu_relax() call is a compiler barrier which forces |
@@ -513,6 +515,10 @@ slowpath: | |||
513 | #endif | 515 | #endif |
514 | spin_lock_mutex(&lock->wait_lock, flags); | 516 | spin_lock_mutex(&lock->wait_lock, flags); |
515 | 517 | ||
518 | /* once more, can we acquire the lock? */ | ||
519 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) | ||
520 | goto skip_wait; | ||
521 | |||
516 | debug_mutex_lock_common(lock, &waiter); | 522 | debug_mutex_lock_common(lock, &waiter); |
517 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 523 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
518 | 524 | ||
@@ -520,9 +526,6 @@ slowpath: | |||
520 | list_add_tail(&waiter.list, &lock->wait_list); | 526 | list_add_tail(&waiter.list, &lock->wait_list); |
521 | waiter.task = task; | 527 | waiter.task = task; |
522 | 528 | ||
523 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) | ||
524 | goto done; | ||
525 | |||
526 | lock_contended(&lock->dep_map, ip); | 529 | lock_contended(&lock->dep_map, ip); |
527 | 530 | ||
528 | for (;;) { | 531 | for (;;) { |
@@ -536,7 +539,7 @@ slowpath: | |||
536 | * other waiters: | 539 | * other waiters: |
537 | */ | 540 | */ |
538 | if (MUTEX_SHOW_NO_WAITER(lock) && | 541 | if (MUTEX_SHOW_NO_WAITER(lock) && |
539 | (atomic_xchg(&lock->count, -1) == 1)) | 542 | (atomic_xchg(&lock->count, -1) == 1)) |
540 | break; | 543 | break; |
541 | 544 | ||
542 | /* | 545 | /* |
@@ -561,24 +564,25 @@ slowpath: | |||
561 | schedule_preempt_disabled(); | 564 | schedule_preempt_disabled(); |
562 | spin_lock_mutex(&lock->wait_lock, flags); | 565 | spin_lock_mutex(&lock->wait_lock, flags); |
563 | } | 566 | } |
567 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
568 | /* set it to 0 if there are no waiters left: */ | ||
569 | if (likely(list_empty(&lock->wait_list))) | ||
570 | atomic_set(&lock->count, 0); | ||
571 | debug_mutex_free_waiter(&waiter); | ||
564 | 572 | ||
565 | done: | 573 | skip_wait: |
574 | /* got the lock - cleanup and rejoice! */ | ||
566 | lock_acquired(&lock->dep_map, ip); | 575 | lock_acquired(&lock->dep_map, ip); |
567 | /* got the lock - rejoice! */ | ||
568 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
569 | mutex_set_owner(lock); | 576 | mutex_set_owner(lock); |
570 | 577 | ||
571 | if (!__builtin_constant_p(ww_ctx == NULL)) { | 578 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
572 | struct ww_mutex *ww = container_of(lock, | 579 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
573 | struct ww_mutex, | ||
574 | base); | ||
575 | struct mutex_waiter *cur; | 580 | struct mutex_waiter *cur; |
576 | 581 | ||
577 | /* | 582 | /* |
578 | * This branch gets optimized out for the common case, | 583 | * This branch gets optimized out for the common case, |
579 | * and is only important for ww_mutex_lock. | 584 | * and is only important for ww_mutex_lock. |
580 | */ | 585 | */ |
581 | |||
582 | ww_mutex_lock_acquired(ww, ww_ctx); | 586 | ww_mutex_lock_acquired(ww, ww_ctx); |
583 | ww->ctx = ww_ctx; | 587 | ww->ctx = ww_ctx; |
584 | 588 | ||
@@ -592,15 +596,8 @@ done: | |||
592 | } | 596 | } |
593 | } | 597 | } |
594 | 598 | ||
595 | /* set it to 0 if there are no waiters left: */ | ||
596 | if (likely(list_empty(&lock->wait_list))) | ||
597 | atomic_set(&lock->count, 0); | ||
598 | |||
599 | spin_unlock_mutex(&lock->wait_lock, flags); | 599 | spin_unlock_mutex(&lock->wait_lock, flags); |
600 | |||
601 | debug_mutex_free_waiter(&waiter); | ||
602 | preempt_enable(); | 600 | preempt_enable(); |
603 | |||
604 | return 0; | 601 | return 0; |
605 | 602 | ||
606 | err: | 603 | err: |