diff options
Diffstat (limited to 'kernel/mutex.c')
| -rw-r--r-- | kernel/mutex.c | 47 |
1 files changed, 22 insertions, 25 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..6d647aedffea 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |||
| 209 | */ | 209 | */ |
| 210 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | 210 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 211 | { | 211 | { |
| 212 | struct task_struct *owner; | ||
| 212 | int retval = 1; | 213 | int retval = 1; |
| 213 | 214 | ||
| 214 | rcu_read_lock(); | 215 | rcu_read_lock(); |
| 215 | if (lock->owner) | 216 | owner = ACCESS_ONCE(lock->owner); |
| 216 | retval = lock->owner->on_cpu; | 217 | if (owner) |
| 218 | retval = owner->on_cpu; | ||
| 217 | rcu_read_unlock(); | 219 | rcu_read_unlock(); |
| 218 | /* | 220 | /* |
| 219 | * if lock->owner is not set, the mutex owner may have just acquired | 221 | * if lock->owner is not set, the mutex owner may have just acquired |
| @@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 461 | * performed the optimistic spinning cannot be done. | 463 | * performed the optimistic spinning cannot be done. |
| 462 | */ | 464 | */ |
| 463 | if (ACCESS_ONCE(ww->ctx)) | 465 | if (ACCESS_ONCE(ww->ctx)) |
| 464 | break; | 466 | goto slowpath; |
| 465 | } | 467 | } |
| 466 | 468 | ||
| 467 | /* | 469 | /* |
| @@ -472,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 472 | owner = ACCESS_ONCE(lock->owner); | 474 | owner = ACCESS_ONCE(lock->owner); |
| 473 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 475 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
| 474 | mspin_unlock(MLOCK(lock), &node); | 476 | mspin_unlock(MLOCK(lock), &node); |
| 475 | break; | 477 | goto slowpath; |
| 476 | } | 478 | } |
| 477 | 479 | ||
| 478 | if ((atomic_read(&lock->count) == 1) && | 480 | if ((atomic_read(&lock->count) == 1) && |
| @@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 499 | * the owner complete. | 501 | * the owner complete. |
| 500 | */ | 502 | */ |
| 501 | if (!owner && (need_resched() || rt_task(task))) | 503 | if (!owner && (need_resched() || rt_task(task))) |
| 502 | break; | 504 | goto slowpath; |
| 503 | 505 | ||
| 504 | /* | 506 | /* |
| 505 | * The cpu_relax() call is a compiler barrier which forces | 507 | * The cpu_relax() call is a compiler barrier which forces |
| @@ -513,6 +515,10 @@ slowpath: | |||
| 513 | #endif | 515 | #endif |
| 514 | spin_lock_mutex(&lock->wait_lock, flags); | 516 | spin_lock_mutex(&lock->wait_lock, flags); |
| 515 | 517 | ||
| 518 | /* once more, can we acquire the lock? */ | ||
| 519 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) | ||
| 520 | goto skip_wait; | ||
| 521 | |||
| 516 | debug_mutex_lock_common(lock, &waiter); | 522 | debug_mutex_lock_common(lock, &waiter); |
| 517 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 523 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
| 518 | 524 | ||
| @@ -520,9 +526,6 @@ slowpath: | |||
| 520 | list_add_tail(&waiter.list, &lock->wait_list); | 526 | list_add_tail(&waiter.list, &lock->wait_list); |
| 521 | waiter.task = task; | 527 | waiter.task = task; |
| 522 | 528 | ||
| 523 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) | ||
| 524 | goto done; | ||
| 525 | |||
| 526 | lock_contended(&lock->dep_map, ip); | 529 | lock_contended(&lock->dep_map, ip); |
| 527 | 530 | ||
| 528 | for (;;) { | 531 | for (;;) { |
| @@ -536,7 +539,7 @@ slowpath: | |||
| 536 | * other waiters: | 539 | * other waiters: |
| 537 | */ | 540 | */ |
| 538 | if (MUTEX_SHOW_NO_WAITER(lock) && | 541 | if (MUTEX_SHOW_NO_WAITER(lock) && |
| 539 | (atomic_xchg(&lock->count, -1) == 1)) | 542 | (atomic_xchg(&lock->count, -1) == 1)) |
| 540 | break; | 543 | break; |
| 541 | 544 | ||
| 542 | /* | 545 | /* |
| @@ -561,24 +564,25 @@ slowpath: | |||
| 561 | schedule_preempt_disabled(); | 564 | schedule_preempt_disabled(); |
| 562 | spin_lock_mutex(&lock->wait_lock, flags); | 565 | spin_lock_mutex(&lock->wait_lock, flags); |
| 563 | } | 566 | } |
| 567 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
| 568 | /* set it to 0 if there are no waiters left: */ | ||
| 569 | if (likely(list_empty(&lock->wait_list))) | ||
| 570 | atomic_set(&lock->count, 0); | ||
| 571 | debug_mutex_free_waiter(&waiter); | ||
| 564 | 572 | ||
| 565 | done: | 573 | skip_wait: |
| 574 | /* got the lock - cleanup and rejoice! */ | ||
| 566 | lock_acquired(&lock->dep_map, ip); | 575 | lock_acquired(&lock->dep_map, ip); |
| 567 | /* got the lock - rejoice! */ | ||
| 568 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
| 569 | mutex_set_owner(lock); | 576 | mutex_set_owner(lock); |
| 570 | 577 | ||
| 571 | if (!__builtin_constant_p(ww_ctx == NULL)) { | 578 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
| 572 | struct ww_mutex *ww = container_of(lock, | 579 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
| 573 | struct ww_mutex, | ||
| 574 | base); | ||
| 575 | struct mutex_waiter *cur; | 580 | struct mutex_waiter *cur; |
| 576 | 581 | ||
| 577 | /* | 582 | /* |
| 578 | * This branch gets optimized out for the common case, | 583 | * This branch gets optimized out for the common case, |
| 579 | * and is only important for ww_mutex_lock. | 584 | * and is only important for ww_mutex_lock. |
| 580 | */ | 585 | */ |
| 581 | |||
| 582 | ww_mutex_lock_acquired(ww, ww_ctx); | 586 | ww_mutex_lock_acquired(ww, ww_ctx); |
| 583 | ww->ctx = ww_ctx; | 587 | ww->ctx = ww_ctx; |
| 584 | 588 | ||
| @@ -592,15 +596,8 @@ done: | |||
| 592 | } | 596 | } |
| 593 | } | 597 | } |
| 594 | 598 | ||
| 595 | /* set it to 0 if there are no waiters left: */ | ||
| 596 | if (likely(list_empty(&lock->wait_list))) | ||
| 597 | atomic_set(&lock->count, 0); | ||
| 598 | |||
| 599 | spin_unlock_mutex(&lock->wait_lock, flags); | 599 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 600 | |||
| 601 | debug_mutex_free_waiter(&waiter); | ||
| 602 | preempt_enable(); | 600 | preempt_enable(); |
| 603 | |||
| 604 | return 0; | 601 | return 0; |
| 605 | 602 | ||
| 606 | err: | 603 | err: |
| @@ -686,7 +683,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 686 | might_sleep(); | 683 | might_sleep(); |
| 687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 684 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
| 688 | 0, &ctx->dep_map, _RET_IP_, ctx); | 685 | 0, &ctx->dep_map, _RET_IP_, ctx); |
| 689 | if (!ret && ctx->acquired > 0) | 686 | if (!ret && ctx->acquired > 1) |
| 690 | return ww_mutex_deadlock_injection(lock, ctx); | 687 | return ww_mutex_deadlock_injection(lock, ctx); |
| 691 | 688 | ||
| 692 | return ret; | 689 | return ret; |
| @@ -702,7 +699,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 699 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
| 703 | 0, &ctx->dep_map, _RET_IP_, ctx); | 700 | 0, &ctx->dep_map, _RET_IP_, ctx); |
| 704 | 701 | ||
| 705 | if (!ret && ctx->acquired > 0) | 702 | if (!ret && ctx->acquired > 1) |
| 706 | return ww_mutex_deadlock_injection(lock, ctx); | 703 | return ww_mutex_deadlock_injection(lock, ctx); |
| 707 | 704 | ||
| 708 | return ret; | 705 | return ret; |
