diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2017-03-23 10:56:10 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2017-04-04 05:44:05 -0400 |
| commit | aa2bfe55366552cb7e93e8709d66e698d79ccc47 (patch) | |
| tree | c88ac6fbcf4a8ef48d40a1d9e40ff40ffeab0758 /kernel/locking | |
| parent | 85e2d4f992868ad78dc8bb2c077b652fcfb3661a (diff) | |
rtmutex: Clean up
Previous patches changed the meaning of the return value of
rt_mutex_slowunlock(); update comments and code to reflect this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: juri.lelli@arm.com
Cc: bigeasy@linutronix.de
Cc: xlpang@redhat.com
Cc: rostedt@goodmis.org
Cc: mathieu.desnoyers@efficios.com
Cc: jdesfossez@efficios.com
Cc: bristot@redhat.com
Link: http://lkml.kernel.org/r/20170323150216.255058238@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/rtmutex.c | 28 | ||||
| -rw-r--r-- | kernel/locking/rtmutex_common.h | 2 |
2 files changed, 14 insertions, 16 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8faf472c430f..4b1015ef0dc7 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -1330,7 +1330,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) | |||
| 1330 | 1330 | ||
| 1331 | /* | 1331 | /* |
| 1332 | * Slow path to release a rt-mutex. | 1332 | * Slow path to release a rt-mutex. |
| 1333 | * Return whether the current task needs to undo a potential priority boosting. | 1333 | * |
| 1334 | * Return whether the current task needs to call rt_mutex_postunlock(). | ||
| 1334 | */ | 1335 | */ |
| 1335 | static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, | 1336 | static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, |
| 1336 | struct wake_q_head *wake_q) | 1337 | struct wake_q_head *wake_q) |
| @@ -1401,8 +1402,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, | |||
| 1401 | 1402 | ||
| 1402 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); | 1403 | raw_spin_unlock_irqrestore(&lock->wait_lock, flags); |
| 1403 | 1404 | ||
| 1404 | /* check PI boosting */ | 1405 | return true; /* call rt_mutex_postunlock() */ |
| 1405 | return true; | ||
| 1406 | } | 1406 | } |
| 1407 | 1407 | ||
| 1408 | /* | 1408 | /* |
| @@ -1449,15 +1449,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, | |||
| 1449 | } | 1449 | } |
| 1450 | 1450 | ||
| 1451 | /* | 1451 | /* |
| 1452 | * Undo pi boosting (if necessary) and wake top waiter. | 1452 | * Performs the wakeup of the the top-waiter and re-enables preemption. |
| 1453 | */ | 1453 | */ |
| 1454 | void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost) | 1454 | void rt_mutex_postunlock(struct wake_q_head *wake_q) |
| 1455 | { | 1455 | { |
| 1456 | wake_up_q(wake_q); | 1456 | wake_up_q(wake_q); |
| 1457 | 1457 | ||
| 1458 | /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ | 1458 | /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ |
| 1459 | if (deboost) | 1459 | preempt_enable(); |
| 1460 | preempt_enable(); | ||
| 1461 | } | 1460 | } |
| 1462 | 1461 | ||
| 1463 | static inline void | 1462 | static inline void |
| @@ -1466,14 +1465,12 @@ rt_mutex_fastunlock(struct rt_mutex *lock, | |||
| 1466 | struct wake_q_head *wqh)) | 1465 | struct wake_q_head *wqh)) |
| 1467 | { | 1466 | { |
| 1468 | DEFINE_WAKE_Q(wake_q); | 1467 | DEFINE_WAKE_Q(wake_q); |
| 1469 | bool deboost; | ||
| 1470 | 1468 | ||
| 1471 | if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) | 1469 | if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) |
| 1472 | return; | 1470 | return; |
| 1473 | 1471 | ||
| 1474 | deboost = slowfn(lock, &wake_q); | 1472 | if (slowfn(lock, &wake_q)) |
| 1475 | 1473 | rt_mutex_postunlock(&wake_q); | |
| 1476 | rt_mutex_postunlock(&wake_q, deboost); | ||
| 1477 | } | 1474 | } |
| 1478 | 1475 | ||
| 1479 | /** | 1476 | /** |
| @@ -1593,19 +1590,20 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, | |||
| 1593 | */ | 1590 | */ |
| 1594 | preempt_disable(); | 1591 | preempt_disable(); |
| 1595 | 1592 | ||
| 1596 | return true; /* deboost and wakeups */ | 1593 | return true; /* call postunlock() */ |
| 1597 | } | 1594 | } |
| 1598 | 1595 | ||
| 1599 | void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) | 1596 | void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) |
| 1600 | { | 1597 | { |
| 1601 | DEFINE_WAKE_Q(wake_q); | 1598 | DEFINE_WAKE_Q(wake_q); |
| 1602 | bool deboost; | 1599 | bool postunlock; |
| 1603 | 1600 | ||
| 1604 | raw_spin_lock_irq(&lock->wait_lock); | 1601 | raw_spin_lock_irq(&lock->wait_lock); |
| 1605 | deboost = __rt_mutex_futex_unlock(lock, &wake_q); | 1602 | postunlock = __rt_mutex_futex_unlock(lock, &wake_q); |
| 1606 | raw_spin_unlock_irq(&lock->wait_lock); | 1603 | raw_spin_unlock_irq(&lock->wait_lock); |
| 1607 | 1604 | ||
| 1608 | rt_mutex_postunlock(&wake_q, deboost); | 1605 | if (postunlock) |
| 1606 | rt_mutex_postunlock(&wake_q); | ||
| 1609 | } | 1607 | } |
| 1610 | 1608 | ||
| 1611 | /** | 1609 | /** |
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index a09c02982391..9e36aeddce18 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h | |||
| @@ -122,7 +122,7 @@ extern void rt_mutex_futex_unlock(struct rt_mutex *lock); | |||
| 122 | extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, | 122 | extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, |
| 123 | struct wake_q_head *wqh); | 123 | struct wake_q_head *wqh); |
| 124 | 124 | ||
| 125 | extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost); | 125 | extern void rt_mutex_postunlock(struct wake_q_head *wake_q); |
| 126 | 126 | ||
| 127 | #ifdef CONFIG_DEBUG_RT_MUTEXES | 127 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
| 128 | # include "rtmutex-debug.h" | 128 | # include "rtmutex-debug.h" |
