diff options
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 41 |
1 files changed, 18 insertions, 23 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 7ff48c55a98b..386ad5da47a5 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -463,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
463 | * performed the optimistic spinning cannot be done. | 463 | * performed the optimistic spinning cannot be done. |
464 | */ | 464 | */ |
465 | if (ACCESS_ONCE(ww->ctx)) | 465 | if (ACCESS_ONCE(ww->ctx)) |
466 | break; | 466 | goto slowpath; |
467 | } | 467 | } |
468 | 468 | ||
469 | /* | 469 | /* |
@@ -474,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
474 | owner = ACCESS_ONCE(lock->owner); | 474 | owner = ACCESS_ONCE(lock->owner); |
475 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 475 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
476 | mspin_unlock(MLOCK(lock), &node); | 476 | mspin_unlock(MLOCK(lock), &node); |
477 | break; | 477 | goto slowpath; |
478 | } | 478 | } |
479 | 479 | ||
480 | if ((atomic_read(&lock->count) == 1) && | 480 | if ((atomic_read(&lock->count) == 1) && |
@@ -489,8 +489,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
489 | 489 | ||
490 | mutex_set_owner(lock); | 490 | mutex_set_owner(lock); |
491 | mspin_unlock(MLOCK(lock), &node); | 491 | mspin_unlock(MLOCK(lock), &node); |
492 | preempt_enable(); | 492 | goto done; |
493 | return 0; | ||
494 | } | 493 | } |
495 | mspin_unlock(MLOCK(lock), &node); | 494 | mspin_unlock(MLOCK(lock), &node); |
496 | 495 | ||
@@ -501,7 +500,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
501 | * the owner complete. | 500 | * the owner complete. |
502 | */ | 501 | */ |
503 | if (!owner && (need_resched() || rt_task(task))) | 502 | if (!owner && (need_resched() || rt_task(task))) |
504 | break; | 503 | goto slowpath; |
505 | 504 | ||
506 | /* | 505 | /* |
507 | * The cpu_relax() call is a compiler barrier which forces | 506 | * The cpu_relax() call is a compiler barrier which forces |
@@ -515,6 +514,10 @@ slowpath: | |||
515 | #endif | 514 | #endif |
516 | spin_lock_mutex(&lock->wait_lock, flags); | 515 | spin_lock_mutex(&lock->wait_lock, flags); |
517 | 516 | ||
517 | /* once more, can we acquire the lock? */ | ||
518 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) | ||
519 | goto skip_wait; | ||
520 | |||
518 | debug_mutex_lock_common(lock, &waiter); | 521 | debug_mutex_lock_common(lock, &waiter); |
519 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 522 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
520 | 523 | ||
@@ -522,9 +525,6 @@ slowpath: | |||
522 | list_add_tail(&waiter.list, &lock->wait_list); | 525 | list_add_tail(&waiter.list, &lock->wait_list); |
523 | waiter.task = task; | 526 | waiter.task = task; |
524 | 527 | ||
525 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) | ||
526 | goto done; | ||
527 | |||
528 | lock_contended(&lock->dep_map, ip); | 528 | lock_contended(&lock->dep_map, ip); |
529 | 529 | ||
530 | for (;;) { | 530 | for (;;) { |
@@ -538,7 +538,7 @@ slowpath: | |||
538 | * other waiters: | 538 | * other waiters: |
539 | */ | 539 | */ |
540 | if (MUTEX_SHOW_NO_WAITER(lock) && | 540 | if (MUTEX_SHOW_NO_WAITER(lock) && |
541 | (atomic_xchg(&lock->count, -1) == 1)) | 541 | (atomic_xchg(&lock->count, -1) == 1)) |
542 | break; | 542 | break; |
543 | 543 | ||
544 | /* | 544 | /* |
@@ -563,24 +563,25 @@ slowpath: | |||
563 | schedule_preempt_disabled(); | 563 | schedule_preempt_disabled(); |
564 | spin_lock_mutex(&lock->wait_lock, flags); | 564 | spin_lock_mutex(&lock->wait_lock, flags); |
565 | } | 565 | } |
566 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
567 | /* set it to 0 if there are no waiters left: */ | ||
568 | if (likely(list_empty(&lock->wait_list))) | ||
569 | atomic_set(&lock->count, 0); | ||
570 | debug_mutex_free_waiter(&waiter); | ||
566 | 571 | ||
567 | done: | 572 | skip_wait: |
573 | /* got the lock - cleanup and rejoice! */ | ||
568 | lock_acquired(&lock->dep_map, ip); | 574 | lock_acquired(&lock->dep_map, ip); |
569 | /* got the lock - rejoice! */ | ||
570 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
571 | mutex_set_owner(lock); | 575 | mutex_set_owner(lock); |
572 | 576 | ||
573 | if (!__builtin_constant_p(ww_ctx == NULL)) { | 577 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
574 | struct ww_mutex *ww = container_of(lock, | 578 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
575 | struct ww_mutex, | ||
576 | base); | ||
577 | struct mutex_waiter *cur; | 579 | struct mutex_waiter *cur; |
578 | 580 | ||
579 | /* | 581 | /* |
580 | * This branch gets optimized out for the common case, | 582 | * This branch gets optimized out for the common case, |
581 | * and is only important for ww_mutex_lock. | 583 | * and is only important for ww_mutex_lock. |
582 | */ | 584 | */ |
583 | |||
584 | ww_mutex_lock_acquired(ww, ww_ctx); | 585 | ww_mutex_lock_acquired(ww, ww_ctx); |
585 | ww->ctx = ww_ctx; | 586 | ww->ctx = ww_ctx; |
586 | 587 | ||
@@ -594,15 +595,9 @@ done: | |||
594 | } | 595 | } |
595 | } | 596 | } |
596 | 597 | ||
597 | /* set it to 0 if there are no waiters left: */ | ||
598 | if (likely(list_empty(&lock->wait_list))) | ||
599 | atomic_set(&lock->count, 0); | ||
600 | |||
601 | spin_unlock_mutex(&lock->wait_lock, flags); | 598 | spin_unlock_mutex(&lock->wait_lock, flags); |
602 | 599 | done: | |
603 | debug_mutex_free_waiter(&waiter); | ||
604 | preempt_enable(); | 600 | preempt_enable(); |
605 | |||
606 | return 0; | 601 | return 0; |
607 | 602 | ||
608 | err: | 603 | err: |