aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-03-22 06:35:59 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-03-23 14:10:10 -0400
commitbebe5b514345f09be2c15e414d076b02ecb9cce8 (patch)
tree6cc89f5210cc01abf5b0195bfd577e46d08bb8fd /kernel/futex.c
parentcfafcd117da0216520568c195cb2f6cd1980c4bb (diff)
futex: Futex_unlock_pi() determinism
The problem with returning -EAGAIN when the waiter state mismatches is that it becomes very hard to proof a bounded execution time on the operation. And seeing that this is a RT operation, this is somewhat important. While in practise; given the previous patch; it will be very unlikely to ever really take more than one or two rounds, proving so becomes rather hard. However, now that modifying wait_list is done while holding both hb->lock and wait_lock, the scenario can be avoided entirely by acquiring wait_lock while still holding hb-lock. Doing a hand-over, without leaving a hole. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: juri.lelli@arm.com Cc: bigeasy@linutronix.de Cc: xlpang@redhat.com Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: jdesfossez@efficios.com Cc: dvhart@infradead.org Cc: bristot@redhat.com Link: http://lkml.kernel.org/r/20170322104152.112378812@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index eecce7bab86d..4cdc603b00c3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1398,15 +1398,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
1398 DEFINE_WAKE_Q(wake_q); 1398 DEFINE_WAKE_Q(wake_q);
1399 int ret = 0; 1399 int ret = 0;
1400 1400
1401 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1402 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 1401 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1403 if (!new_owner) { 1402 if (WARN_ON_ONCE(!new_owner)) {
1404 /* 1403 /*
1405 * Since we held neither hb->lock nor wait_lock when coming 1404 * As per the comment in futex_unlock_pi() this should not happen.
1406 * into this function, we could have raced with futex_lock_pi()
1407 * such that we might observe @this futex_q waiter, but the
1408 * rt_mutex's wait_list can be empty (either still, or again,
1409 * depending on which side we land).
1410 * 1405 *
1411 * When this happens, give up our locks and try again, giving 1406 * When this happens, give up our locks and try again, giving
1412 * the futex_lock_pi() instance time to complete, either by 1407 * the futex_lock_pi() instance time to complete, either by
@@ -2794,15 +2789,18 @@ retry:
2794 if (pi_state->owner != current) 2789 if (pi_state->owner != current)
2795 goto out_unlock; 2790 goto out_unlock;
2796 2791
2792 get_pi_state(pi_state);
2797 /* 2793 /*
2798 * Grab a reference on the pi_state and drop hb->lock. 2794 * Since modifying the wait_list is done while holding both
2795 * hb->lock and wait_lock, holding either is sufficient to
2796 * observe it.
2799 * 2797 *
2800 * The reference ensures pi_state lives, dropping the hb->lock 2798 * By taking wait_lock while still holding hb->lock, we ensure
2801 * is tricky.. wake_futex_pi() will take rt_mutex::wait_lock to 2799 * there is no point where we hold neither; and therefore
2802 * close the races against futex_lock_pi(), but in case of 2800 * wake_futex_pi() must observe a state consistent with what we
2803 * _any_ fail we'll abort and retry the whole deal. 2801 * observed.
2804 */ 2802 */
2805 get_pi_state(pi_state); 2803 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2806 spin_unlock(&hb->lock); 2804 spin_unlock(&hb->lock);
2807 2805
2808 ret = wake_futex_pi(uaddr, uval, pi_state); 2806 ret = wake_futex_pi(uaddr, uval, pi_state);