diff options
-rw-r--r-- | kernel/locking/mutex.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index e6d646b18d6c..82dad2ccd40b 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -403,9 +403,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
403 | if (!mutex_can_spin_on_owner(lock)) | 403 | if (!mutex_can_spin_on_owner(lock)) |
404 | goto slowpath; | 404 | goto slowpath; |
405 | 405 | ||
406 | mcs_spin_lock(&lock->mcs_lock, &node); | ||
406 | for (;;) { | 407 | for (;;) { |
407 | struct task_struct *owner; | 408 | struct task_struct *owner; |
408 | struct mcs_spinlock node; | ||
409 | 409 | ||
410 | if (use_ww_ctx && ww_ctx->acquired > 0) { | 410 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
411 | struct ww_mutex *ww; | 411 | struct ww_mutex *ww; |
@@ -420,19 +420,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
420 | * performed the optimistic spinning cannot be done. | 420 | * performed the optimistic spinning cannot be done. |
421 | */ | 421 | */ |
422 | if (ACCESS_ONCE(ww->ctx)) | 422 | if (ACCESS_ONCE(ww->ctx)) |
423 | goto slowpath; | 423 | break; |
424 | } | 424 | } |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * If there's an owner, wait for it to either | 427 | * If there's an owner, wait for it to either |
428 | * release the lock or go to sleep. | 428 | * release the lock or go to sleep. |
429 | */ | 429 | */ |
430 | mcs_spin_lock(&lock->mcs_lock, &node); | ||
431 | owner = ACCESS_ONCE(lock->owner); | 430 | owner = ACCESS_ONCE(lock->owner); |
432 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 431 | if (owner && !mutex_spin_on_owner(lock, owner)) |
433 | mcs_spin_unlock(&lock->mcs_lock, &node); | 432 | break; |
434 | goto slowpath; | ||
435 | } | ||
436 | 433 | ||
437 | if ((atomic_read(&lock->count) == 1) && | 434 | if ((atomic_read(&lock->count) == 1) && |
438 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | 435 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
@@ -449,7 +446,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
449 | preempt_enable(); | 446 | preempt_enable(); |
450 | return 0; | 447 | return 0; |
451 | } | 448 | } |
452 | mcs_spin_unlock(&lock->mcs_lock, &node); | ||
453 | 449 | ||
454 | /* | 450 | /* |
455 | * When there's no owner, we might have preempted between the | 451 | * When there's no owner, we might have preempted between the |
@@ -458,7 +454,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
458 | * the owner complete. | 454 | * the owner complete. |
459 | */ | 455 | */ |
460 | if (!owner && (need_resched() || rt_task(task))) | 456 | if (!owner && (need_resched() || rt_task(task))) |
461 | goto slowpath; | 457 | break; |
462 | 458 | ||
463 | /* | 459 | /* |
464 | * The cpu_relax() call is a compiler barrier which forces | 460 | * The cpu_relax() call is a compiler barrier which forces |
@@ -468,6 +464,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
468 | */ | 464 | */ |
469 | arch_mutex_cpu_relax(); | 465 | arch_mutex_cpu_relax(); |
470 | } | 466 | } |
467 | mcs_spin_unlock(&lock->mcs_lock, &node); | ||
471 | slowpath: | 468 | slowpath: |
472 | #endif | 469 | #endif |
473 | spin_lock_mutex(&lock->wait_lock, flags); | 470 | spin_lock_mutex(&lock->wait_lock, flags); |