aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index fdd312da0992..a0514e01c3eb 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2221,11 +2221,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2221 * decrement the counter at queue_unlock() when some error has 2221 * decrement the counter at queue_unlock() when some error has
2222 * occurred and we don't end up adding the task to the list. 2222 * occurred and we don't end up adding the task to the list.
2223 */ 2223 */
2224 hb_waiters_inc(hb); 2224 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2225 2225
2226 q->lock_ptr = &hb->lock; 2226 q->lock_ptr = &hb->lock;
2227 2227
2228 spin_lock(&hb->lock); /* implies smp_mb(); (A) */ 2228 spin_lock(&hb->lock);
2229 return hb; 2229 return hb;
2230} 2230}
2231 2231
@@ -2861,35 +2861,39 @@ retry_private:
2861 * and BUG when futex_unlock_pi() interleaves with this. 2861 * and BUG when futex_unlock_pi() interleaves with this.
2862 * 2862 *
2863 * Therefore acquire wait_lock while holding hb->lock, but drop the 2863 * Therefore acquire wait_lock while holding hb->lock, but drop the
2864 * latter before calling rt_mutex_start_proxy_lock(). This still fully 2864 * latter before calling __rt_mutex_start_proxy_lock(). This
2865 * serializes against futex_unlock_pi() as that does the exact same 2865 * interleaves with futex_unlock_pi() -- which does a similar lock
2866 * lock handoff sequence. 2866 * handoff -- such that the latter can observe the futex_q::pi_state
2867 * before __rt_mutex_start_proxy_lock() is done.
2867 */ 2868 */
2868 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); 2869 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2869 spin_unlock(q.lock_ptr); 2870 spin_unlock(q.lock_ptr);
2871 /*
2872 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2873 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2874 * it sees the futex_q::pi_state.
2875 */
2870 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); 2876 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2871 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); 2877 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2872 2878
2873 if (ret) { 2879 if (ret) {
2874 if (ret == 1) 2880 if (ret == 1)
2875 ret = 0; 2881 ret = 0;
2876 2882 goto cleanup;
2877 spin_lock(q.lock_ptr);
2878 goto no_block;
2879 } 2883 }
2880 2884
2881
2882 if (unlikely(to)) 2885 if (unlikely(to))
2883 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); 2886 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
2884 2887
2885 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); 2888 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2886 2889
2890cleanup:
2887 spin_lock(q.lock_ptr); 2891 spin_lock(q.lock_ptr);
2888 /* 2892 /*
2889 * If we failed to acquire the lock (signal/timeout), we must 2893 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2890 * first acquire the hb->lock before removing the lock from the 2894 * first acquire the hb->lock before removing the lock from the
2891 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex 2895 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2892 * wait lists consistent. 2896 * lists consistent.
2893 * 2897 *
2894 * In particular; it is important that futex_unlock_pi() can not 2898 * In particular; it is important that futex_unlock_pi() can not
2895 * observe this inconsistency. 2899 * observe this inconsistency.
@@ -3013,6 +3017,10 @@ retry:
3013 * there is no point where we hold neither; and therefore 3017 * there is no point where we hold neither; and therefore
3014 * wake_futex_pi() must observe a state consistent with what we 3018 * wake_futex_pi() must observe a state consistent with what we
3015 * observed. 3019 * observed.
3020 *
3021 * In particular; this forces __rt_mutex_start_proxy() to
3022 * complete such that we're guaranteed to observe the
3023 * rt_waiter. Also see the WARN in wake_futex_pi().
3016 */ 3024 */
3017 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 3025 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3018 spin_unlock(&hb->lock); 3026 spin_unlock(&hb->lock);