diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-03-22 06:35:54 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-03-23 14:10:08 -0400 |
commit | 73d786bd043ebc855f349c81ea805f6b11cbf2aa (patch) | |
tree | c3bf955843c550cafc47c102d590f647794039fc /kernel/futex.c | |
parent | bf92cf3a5100f5a0d5f9834787b130159397cb22 (diff) |
futex: Rework inconsistent rt_mutex/futex_q state
There is a weird state in the futex_unlock_pi() path when it interleaves
with a concurrent futex_lock_pi() at the point where it drops hb->lock.
In this case, it can happen that the rt_mutex wait_list and the futex_q
disagree on pending waiters, in particular rt_mutex will find no pending
waiters where futex_q thinks there are. In this case the rt_mutex unlock
code cannot assign an owner.
The futex side fixup code has to cleanup the inconsistencies with quite a
bunch of interesting corner cases.
Simplify all this by changing wake_futex_pi() to return -EAGAIN when this
situation occurs. This then gives the futex_lock_pi() code the opportunity
to continue and the retried futex_unlock_pi() will now observe a coherent
state.
The only problem is that this breaks RT timeliness guarantees. That
is, consider the following scenario:
T1 and T2 are both pinned to CPU0. prio(T2) > prio(T1)
CPU0
T1
lock_pi()
queue_me() <- Waiter is visible
preemption
T2
unlock_pi()
loops with -EAGAIN forever
Which is undesirable for PI primitives. Future patches will rectify
this.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: juri.lelli@arm.com
Cc: bigeasy@linutronix.de
Cc: xlpang@redhat.com
Cc: rostedt@goodmis.org
Cc: mathieu.desnoyers@efficios.com
Cc: jdesfossez@efficios.com
Cc: dvhart@infradead.org
Cc: bristot@redhat.com
Link: http://lkml.kernel.org/r/20170322104151.850383690@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/futex.c')
-rw-r--r-- | kernel/futex.c | 50 |
1 files changed, 14 insertions, 36 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 3b6dbeecd91b..51a248af1db9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1404,12 +1404,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *top_waiter | |||
1404 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 1404 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * It is possible that the next waiter (the one that brought | 1407 | * When we interleave with futex_lock_pi() where it does |
1408 | * top_waiter owner to the kernel) timed out and is no longer | 1408 | * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter, |
1409 | * waiting on the lock. | 1409 | * but the rt_mutex's wait_list can be empty (either still, or again, |
1410 | * depending on which side we land). | ||
1411 | * | ||
1412 | * When this happens, give up our locks and try again, giving the | ||
1413 | * futex_lock_pi() instance time to complete, either by waiting on the | ||
1414 | * rtmutex or removing itself from the futex queue. | ||
1410 | */ | 1415 | */ |
1411 | if (!new_owner) | 1416 | if (!new_owner) { |
1412 | new_owner = top_waiter->task; | 1417 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
1418 | return -EAGAIN; | ||
1419 | } | ||
1413 | 1420 | ||
1414 | /* | 1421 | /* |
1415 | * We pass it to the next owner. The WAITERS bit is always | 1422 | * We pass it to the next owner. The WAITERS bit is always |
@@ -2332,7 +2339,6 @@ static long futex_wait_restart(struct restart_block *restart); | |||
2332 | */ | 2339 | */ |
2333 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) | 2340 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
2334 | { | 2341 | { |
2335 | struct task_struct *owner; | ||
2336 | int ret = 0; | 2342 | int ret = 0; |
2337 | 2343 | ||
2338 | if (locked) { | 2344 | if (locked) { |
@@ -2346,43 +2352,15 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) | |||
2346 | } | 2352 | } |
2347 | 2353 | ||
2348 | /* | 2354 | /* |
2349 | * Catch the rare case, where the lock was released when we were on the | ||
2350 | * way back before we locked the hash bucket. | ||
2351 | */ | ||
2352 | if (q->pi_state->owner == current) { | ||
2353 | /* | ||
2354 | * Try to get the rt_mutex now. This might fail as some other | ||
2355 | * task acquired the rt_mutex after we removed ourself from the | ||
2356 | * rt_mutex waiters list. | ||
2357 | */ | ||
2358 | if (rt_mutex_futex_trylock(&q->pi_state->pi_mutex)) { | ||
2359 | locked = 1; | ||
2360 | goto out; | ||
2361 | } | ||
2362 | |||
2363 | /* | ||
2364 | * pi_state is incorrect, some other task did a lock steal and | ||
2365 | * we returned due to timeout or signal without taking the | ||
2366 | * rt_mutex. Too late. | ||
2367 | */ | ||
2368 | raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); | ||
2369 | owner = rt_mutex_owner(&q->pi_state->pi_mutex); | ||
2370 | if (!owner) | ||
2371 | owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); | ||
2372 | raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); | ||
2373 | ret = fixup_pi_state_owner(uaddr, q, owner); | ||
2374 | goto out; | ||
2375 | } | ||
2376 | |||
2377 | /* | ||
2378 | * Paranoia check. If we did not take the lock, then we should not be | 2355 | * Paranoia check. If we did not take the lock, then we should not be |
2379 | * the owner of the rt_mutex. | 2356 | * the owner of the rt_mutex. |
2380 | */ | 2357 | */ |
2381 | if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) | 2358 | if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { |
2382 | printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " | 2359 | printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " |
2383 | "pi-state %p\n", ret, | 2360 | "pi-state %p\n", ret, |
2384 | q->pi_state->pi_mutex.owner, | 2361 | q->pi_state->pi_mutex.owner, |
2385 | q->pi_state->owner); | 2362 | q->pi_state->owner); |
2363 | } | ||
2386 | 2364 | ||
2387 | out: | 2365 | out: |
2388 | return ret ? ret : locked; | 2366 | return ret ? ret : locked; |