aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 8faf472c430f..4b1015ef0dc7 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1330,7 +1330,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1330 1330
1331/* 1331/*
1332 * Slow path to release a rt-mutex. 1332 * Slow path to release a rt-mutex.
1333 * Return whether the current task needs to undo a potential priority boosting. 1333 *
1334 * Return whether the current task needs to call rt_mutex_postunlock().
1334 */ 1335 */
1335static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, 1336static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1336 struct wake_q_head *wake_q) 1337 struct wake_q_head *wake_q)
@@ -1401,8 +1402,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1401 1402
1402 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1403 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1403 1404
1404 /* check PI boosting */ 1405 return true; /* call rt_mutex_postunlock() */
1405 return true;
1406} 1406}
1407 1407
1408/* 1408/*
@@ -1449,15 +1449,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
1449} 1449}
1450 1450
1451/* 1451/*
1452 * Undo pi boosting (if necessary) and wake top waiter. 1452 * Performs the wakeup of the the top-waiter and re-enables preemption.
1453 */ 1453 */
1454void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost) 1454void rt_mutex_postunlock(struct wake_q_head *wake_q)
1455{ 1455{
1456 wake_up_q(wake_q); 1456 wake_up_q(wake_q);
1457 1457
1458 /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ 1458 /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
1459 if (deboost) 1459 preempt_enable();
1460 preempt_enable();
1461} 1460}
1462 1461
1463static inline void 1462static inline void
@@ -1466,14 +1465,12 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1466 struct wake_q_head *wqh)) 1465 struct wake_q_head *wqh))
1467{ 1466{
1468 DEFINE_WAKE_Q(wake_q); 1467 DEFINE_WAKE_Q(wake_q);
1469 bool deboost;
1470 1468
1471 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) 1469 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1472 return; 1470 return;
1473 1471
1474 deboost = slowfn(lock, &wake_q); 1472 if (slowfn(lock, &wake_q))
1475 1473 rt_mutex_postunlock(&wake_q);
1476 rt_mutex_postunlock(&wake_q, deboost);
1477} 1474}
1478 1475
1479/** 1476/**
@@ -1593,19 +1590,20 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1593 */ 1590 */
1594 preempt_disable(); 1591 preempt_disable();
1595 1592
1596 return true; /* deboost and wakeups */ 1593 return true; /* call postunlock() */
1597} 1594}
1598 1595
1599void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) 1596void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1600{ 1597{
1601 DEFINE_WAKE_Q(wake_q); 1598 DEFINE_WAKE_Q(wake_q);
1602 bool deboost; 1599 bool postunlock;
1603 1600
1604 raw_spin_lock_irq(&lock->wait_lock); 1601 raw_spin_lock_irq(&lock->wait_lock);
1605 deboost = __rt_mutex_futex_unlock(lock, &wake_q); 1602 postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1606 raw_spin_unlock_irq(&lock->wait_lock); 1603 raw_spin_unlock_irq(&lock->wait_lock);
1607 1604
1608 rt_mutex_postunlock(&wake_q, deboost); 1605 if (postunlock)
1606 rt_mutex_postunlock(&wake_q);
1609} 1607}
1610 1608
1611/** 1609/**