diff options
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r-- | kernel/locking/rtmutex.c | 55 |
1 files changed, 41 insertions, 14 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index bab66cbe3b37..7d63bc5dd9b2 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1488,15 +1488,23 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |||
1488 | 1488 | ||
1489 | /* | 1489 | /* |
1490 | * Futex variant with full deadlock detection. | 1490 | * Futex variant with full deadlock detection. |
1491 | * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock(). | ||
1491 | */ | 1492 | */ |
1492 | int rt_mutex_timed_futex_lock(struct rt_mutex *lock, | 1493 | int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock, |
1493 | struct hrtimer_sleeper *timeout) | 1494 | struct hrtimer_sleeper *timeout) |
1494 | { | 1495 | { |
1495 | might_sleep(); | 1496 | might_sleep(); |
1496 | 1497 | ||
1497 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | 1498 | return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, |
1498 | RT_MUTEX_FULL_CHAINWALK, | 1499 | timeout, RT_MUTEX_FULL_CHAINWALK); |
1499 | rt_mutex_slowlock); | 1500 | } |
1501 | |||
1502 | /* | ||
1503 | * Futex variant, must not use fastpath. | ||
1504 | */ | ||
1505 | int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) | ||
1506 | { | ||
1507 | return rt_mutex_slowtrylock(lock); | ||
1500 | } | 1508 | } |
1501 | 1509 | ||
1502 | /** | 1510 | /** |
@@ -1555,19 +1563,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) | |||
1555 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | 1563 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
1556 | 1564 | ||
1557 | /** | 1565 | /** |
1558 | * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock | 1566 | * Futex variant, that since futex variants do not use the fast-path, can be |
1559 | * @lock: the rt_mutex to be unlocked | 1567 | * simple and will not need to retry. |
1560 | * | ||
1561 | * Returns: true/false indicating whether priority adjustment is | ||
1562 | * required or not. | ||
1563 | */ | 1568 | */ |
1564 | bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, | 1569 | bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, |
1565 | struct wake_q_head *wqh) | 1570 | struct wake_q_head *wake_q) |
1566 | { | 1571 | { |
1567 | if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) | 1572 | lockdep_assert_held(&lock->wait_lock); |
1568 | return false; | 1573 | |
1574 | debug_rt_mutex_unlock(lock); | ||
1575 | |||
1576 | if (!rt_mutex_has_waiters(lock)) { | ||
1577 | lock->owner = NULL; | ||
1578 | return false; /* done */ | ||
1579 | } | ||
1580 | |||
1581 | mark_wakeup_next_waiter(wake_q, lock); | ||
1582 | return true; /* deboost and wakeups */ | ||
1583 | } | ||
1569 | 1584 | ||
1570 | return rt_mutex_slowunlock(lock, wqh); | 1585 | void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) |
1586 | { | ||
1587 | DEFINE_WAKE_Q(wake_q); | ||
1588 | bool deboost; | ||
1589 | |||
1590 | raw_spin_lock_irq(&lock->wait_lock); | ||
1591 | deboost = __rt_mutex_futex_unlock(lock, &wake_q); | ||
1592 | raw_spin_unlock_irq(&lock->wait_lock); | ||
1593 | |||
1594 | if (deboost) { | ||
1595 | wake_up_q(&wake_q); | ||
1596 | rt_mutex_adjust_prio(current); | ||
1597 | } | ||
1571 | } | 1598 | } |
1572 | 1599 | ||
1573 | /** | 1600 | /** |