diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-03-22 06:35:58 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-03-23 14:10:09 -0400 |
commit | cfafcd117da0216520568c195cb2f6cd1980c4bb (patch) | |
tree | cce98f12a6bfa27515fb1cabc5bbd6fd55a8459f /kernel/locking | |
parent | 38d589f2fd08f1296aea3ce62bebd185125c6d81 (diff) |
futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
By changing futex_lock_pi() to use rt_mutex_*_proxy_lock() all wait_list
modifications are done under both hb->lock and wait_lock.
This closes the obvious interleave pattern between futex_lock_pi() and
futex_unlock_pi(), but not entirely so. See below:
Before:
futex_lock_pi() futex_unlock_pi()
unlock hb->lock
lock hb->lock
unlock hb->lock
lock rt_mutex->wait_lock
unlock rt_mutex_wait_lock
-EAGAIN
lock rt_mutex->wait_lock
list_add
unlock rt_mutex->wait_lock
schedule()
lock rt_mutex->wait_lock
list_del
unlock rt_mutex->wait_lock
<idem>
-EAGAIN
lock hb->lock
After:
futex_lock_pi() futex_unlock_pi()
lock hb->lock
lock rt_mutex->wait_lock
list_add
unlock rt_mutex->wait_lock
unlock hb->lock
schedule()
lock hb->lock
unlock hb->lock
lock hb->lock
lock rt_mutex->wait_lock
list_del
unlock rt_mutex->wait_lock
lock rt_mutex->wait_lock
unlock rt_mutex_wait_lock
-EAGAIN
unlock hb->lock
It does however solve the earlier starvation/live-lock scenario which got
introduced with the -EAGAIN since unlike the before scenario; where the
-EAGAIN happens while futex_unlock_pi() doesn't hold any locks; in the
after scenario it happens while futex_unlock_pi() actually holds a lock,
and then it is serialized on that lock.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: juri.lelli@arm.com
Cc: bigeasy@linutronix.de
Cc: xlpang@redhat.com
Cc: rostedt@goodmis.org
Cc: mathieu.desnoyers@efficios.com
Cc: jdesfossez@efficios.com
Cc: dvhart@infradead.org
Cc: bristot@redhat.com
Link: http://lkml.kernel.org/r/20170322104152.062785528@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/rtmutex.c | 26 | ||||
-rw-r--r-- | kernel/locking/rtmutex_common.h | 1 |
2 files changed, 7 insertions, 20 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 1e8368db276e..48418a1733b8 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1493,19 +1493,6 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) | |||
1493 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | 1493 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); |
1494 | 1494 | ||
1495 | /* | 1495 | /* |
1496 | * Futex variant with full deadlock detection. | ||
1497 | * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock(). | ||
1498 | */ | ||
1499 | int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock, | ||
1500 | struct hrtimer_sleeper *timeout) | ||
1501 | { | ||
1502 | might_sleep(); | ||
1503 | |||
1504 | return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, | ||
1505 | timeout, RT_MUTEX_FULL_CHAINWALK); | ||
1506 | } | ||
1507 | |||
1508 | /* | ||
1509 | * Futex variant, must not use fastpath. | 1496 | * Futex variant, must not use fastpath. |
1510 | */ | 1497 | */ |
1511 | int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) | 1498 | int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) |
@@ -1782,12 +1769,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
1782 | /* sleep on the mutex */ | 1769 | /* sleep on the mutex */ |
1783 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); | 1770 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
1784 | 1771 | ||
1785 | /* | ||
1786 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | ||
1787 | * have to fix that up. | ||
1788 | */ | ||
1789 | fixup_rt_mutex_waiters(lock); | ||
1790 | |||
1791 | raw_spin_unlock_irq(&lock->wait_lock); | 1772 | raw_spin_unlock_irq(&lock->wait_lock); |
1792 | 1773 | ||
1793 | return ret; | 1774 | return ret; |
@@ -1827,6 +1808,13 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, | |||
1827 | fixup_rt_mutex_waiters(lock); | 1808 | fixup_rt_mutex_waiters(lock); |
1828 | cleanup = true; | 1809 | cleanup = true; |
1829 | } | 1810 | } |
1811 | |||
1812 | /* | ||
1813 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | ||
1814 | * have to fix that up. | ||
1815 | */ | ||
1816 | fixup_rt_mutex_waiters(lock); | ||
1817 | |||
1830 | raw_spin_unlock_irq(&lock->wait_lock); | 1818 | raw_spin_unlock_irq(&lock->wait_lock); |
1831 | 1819 | ||
1832 | return cleanup; | 1820 | return cleanup; |
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 35361e4dc773..1e93e15a0e45 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h | |||
@@ -113,7 +113,6 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
113 | extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, | 113 | extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, |
114 | struct rt_mutex_waiter *waiter); | 114 | struct rt_mutex_waiter *waiter); |
115 | 115 | ||
116 | extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); | ||
117 | extern int rt_mutex_futex_trylock(struct rt_mutex *l); | 116 | extern int rt_mutex_futex_trylock(struct rt_mutex *l); |
118 | 117 | ||
119 | extern void rt_mutex_futex_unlock(struct rt_mutex *lock); | 118 | extern void rt_mutex_futex_unlock(struct rt_mutex *lock); |