aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-03-22 06:35:51 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-03-23 14:10:07 -0400
commit5293c2efda37775346885c7e924d4ef7018ea60b (patch)
tree77868b67d43f614989c465d06415d6e1a90f5c4b /kernel/locking/rtmutex.c
parentfffa954fb528963c2fb7b0c0084eb77e2be7ab52 (diff)
futex,rt_mutex: Provide futex specific rt_mutex API
Part of what makes futex_unlock_pi() intricate is that rt_mutex_futex_unlock() -> rt_mutex_slowunlock() can drop rt_mutex::wait_lock. This means it cannot rely on the atomicy of wait_lock, which would be preferred in order to not rely on hb->lock so much. The reason rt_mutex_slowunlock() needs to drop wait_lock is because it can race with the rt_mutex fastpath, however futexes have their own fast path. Since futexes already have a bunch of separate rt_mutex accessors, complete that set and implement a rt_mutex variant without fastpath for them. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: juri.lelli@arm.com Cc: bigeasy@linutronix.de Cc: xlpang@redhat.com Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: jdesfossez@efficios.com Cc: dvhart@infradead.org Cc: bristot@redhat.com Link: http://lkml.kernel.org/r/20170322104151.702962446@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c55
1 files changed, 41 insertions, 14 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index bab66cbe3b37..7d63bc5dd9b2 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1488,15 +1488,23 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1488 1488
1489/* 1489/*
1490 * Futex variant with full deadlock detection. 1490 * Futex variant with full deadlock detection.
1491 * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
1491 */ 1492 */
1492int rt_mutex_timed_futex_lock(struct rt_mutex *lock, 1493int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1493 struct hrtimer_sleeper *timeout) 1494 struct hrtimer_sleeper *timeout)
1494{ 1495{
1495 might_sleep(); 1496 might_sleep();
1496 1497
1497 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 1498 return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
1498 RT_MUTEX_FULL_CHAINWALK, 1499 timeout, RT_MUTEX_FULL_CHAINWALK);
1499 rt_mutex_slowlock); 1500}
1501
1502/*
1503 * Futex variant, must not use fastpath.
1504 */
1505int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1506{
1507 return rt_mutex_slowtrylock(lock);
1500} 1508}
1501 1509
1502/** 1510/**
@@ -1555,19 +1563,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
1555EXPORT_SYMBOL_GPL(rt_mutex_unlock); 1563EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1556 1564
1557/** 1565/**
1558 * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock 1566 * Futex variant, that since futex variants do not use the fast-path, can be
1559 * @lock: the rt_mutex to be unlocked 1567 * simple and will not need to retry.
1560 *
1561 * Returns: true/false indicating whether priority adjustment is
1562 * required or not.
1563 */ 1568 */
1564bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, 1569bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1565 struct wake_q_head *wqh) 1570 struct wake_q_head *wake_q)
1566{ 1571{
1567 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) 1572 lockdep_assert_held(&lock->wait_lock);
1568 return false; 1573
1574 debug_rt_mutex_unlock(lock);
1575
1576 if (!rt_mutex_has_waiters(lock)) {
1577 lock->owner = NULL;
1578 return false; /* done */
1579 }
1580
1581 mark_wakeup_next_waiter(wake_q, lock);
1582 return true; /* deboost and wakeups */
1583}
1569 1584
1570 return rt_mutex_slowunlock(lock, wqh); 1585void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1586{
1587 DEFINE_WAKE_Q(wake_q);
1588 bool deboost;
1589
1590 raw_spin_lock_irq(&lock->wait_lock);
1591 deboost = __rt_mutex_futex_unlock(lock, &wake_q);
1592 raw_spin_unlock_irq(&lock->wait_lock);
1593
1594 if (deboost) {
1595 wake_up_q(&wake_q);
1596 rt_mutex_adjust_prio(current);
1597 }
1571} 1598}
1572 1599
1573/** 1600/**