summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-12-08 07:49:39 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-01-14 12:49:16 -0500
commitc1e2f0eaf015fb7076d51a339011f2383e6dd389 (patch)
treec993bc9680bf3297d147c1bae503e6e47362bad7
parentc92a9a461dff6140c539c61e457aa97df29517d6 (diff)
futex: Avoid violating the 10th rule of futex
Julia reported futex state corruption in the following scenario: waiter waker stealer (prio > waiter) futex(WAIT_REQUEUE_PI, uaddr, uaddr2, timeout=[N ms]) futex_wait_requeue_pi() futex_wait_queue_me() freezable_schedule() <scheduled out> futex(LOCK_PI, uaddr2) futex(CMP_REQUEUE_PI, uaddr, uaddr2, 1, 0) /* requeues waiter to uaddr2 */ futex(UNLOCK_PI, uaddr2) wake_futex_pi() cmp_futex_value_locked(uaddr2, waiter) wake_up_q() <woken by waker> <hrtimer_wakeup() fires, clears sleeper->task> futex(LOCK_PI, uaddr2) __rt_mutex_start_proxy_lock() try_to_take_rt_mutex() /* steals lock */ rt_mutex_set_owner(lock, stealer) <preempted> <scheduled in> rt_mutex_wait_proxy_lock() __rt_mutex_slowlock() try_to_take_rt_mutex() /* fails, lock held by stealer */ if (timeout && !timeout->task) return -ETIMEDOUT; fixup_owner() /* lock wasn't acquired, so, fixup_pi_state_owner skipped */ return -ETIMEDOUT; /* At this point, we've returned -ETIMEDOUT to userspace, but the * futex word shows waiter to be the owner, and the pi_mutex has * stealer as the owner */ futex_lock(LOCK_PI, uaddr2) -> bails with EDEADLK, futex word says we're owner. And suggested that what commit: 73d786bd043e ("futex: Rework inconsistent rt_mutex/futex_q state") removes from fixup_owner() looks to be just what is needed. And indeed it is -- I completely missed that requeue_pi could also result in this case. So we need to restore that, except that subsequent patches, like commit: 16ffa12d7425 ("futex: Pull rt_mutex_futex_unlock() out from under hb->lock") changed all the locking rules. Even without that, the sequence: - if (rt_mutex_futex_trylock(&q->pi_state->pi_mutex)) { - locked = 1; - goto out; - } - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); - owner = rt_mutex_owner(&q->pi_state->pi_mutex); - if (!owner) - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); - ret = fixup_pi_state_owner(uaddr, q, owner); already suggests there were races; otherwise we'd never have to look at next_owner. So instead of doing 3 consecutive wait_lock sections with who knows what races, we do it all in a single section. Additionally, the usage of pi_state->owner in fixup_owner() was only safe because only the rt_mutex owner would modify it, which this additional case wrecks. Luckily the values can only change away and not to the value we're testing, this means we can do a speculative test and double check once we have the wait_lock. Fixes: 73d786bd043e ("futex: Rework inconsistent rt_mutex/futex_q state") Reported-by: Julia Cartwright <julia@ni.com> Reported-by: Gratian Crisan <gratian.crisan@ni.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Julia Cartwright <julia@ni.com> Tested-by: Gratian Crisan <gratian.crisan@ni.com> Cc: Darren Hart <dvhart@infradead.org> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20171208124939.7livp7no2ov65rrc@hirez.programming.kicks-ass.net
-rw-r--r--kernel/futex.c83
-rw-r--r--kernel/locking/rtmutex.c26
-rw-r--r--kernel/locking/rtmutex_common.h1
3 files changed, 87 insertions, 23 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 57d0b3657e16..9e69589b9248 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2294,21 +2294,17 @@ static void unqueue_me_pi(struct futex_q *q)
2294 spin_unlock(q->lock_ptr); 2294 spin_unlock(q->lock_ptr);
2295} 2295}
2296 2296
2297/*
2298 * Fixup the pi_state owner with the new owner.
2299 *
2300 * Must be called with hash bucket lock held and mm->sem held for non
2301 * private futexes.
2302 */
2303static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 2297static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2304 struct task_struct *newowner) 2298 struct task_struct *argowner)
2305{ 2299{
2306 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2307 struct futex_pi_state *pi_state = q->pi_state; 2300 struct futex_pi_state *pi_state = q->pi_state;
2308 u32 uval, uninitialized_var(curval), newval; 2301 u32 uval, uninitialized_var(curval), newval;
2309 struct task_struct *oldowner; 2302 struct task_struct *oldowner, *newowner;
2303 u32 newtid;
2310 int ret; 2304 int ret;
2311 2305
2306 lockdep_assert_held(q->lock_ptr);
2307
2312 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 2308 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2313 2309
2314 oldowner = pi_state->owner; 2310 oldowner = pi_state->owner;
@@ -2317,11 +2313,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2317 newtid |= FUTEX_OWNER_DIED; 2313 newtid |= FUTEX_OWNER_DIED;
2318 2314
2319 /* 2315 /*
2320 * We are here either because we stole the rtmutex from the 2316 * We are here because either:
2321 * previous highest priority waiter or we are the highest priority 2317 *
2322 * waiter but have failed to get the rtmutex the first time. 2318 * - we stole the lock and pi_state->owner needs updating to reflect
2319 * that (@argowner == current),
2323 * 2320 *
2324 * We have to replace the newowner TID in the user space variable. 2321 * or:
2322 *
2323 * - someone stole our lock and we need to fix things to point to the
2324 * new owner (@argowner == NULL).
2325 *
2326 * Either way, we have to replace the TID in the user space variable.
2325 * This must be atomic as we have to preserve the owner died bit here. 2327 * This must be atomic as we have to preserve the owner died bit here.
2326 * 2328 *
2327 * Note: We write the user space value _before_ changing the pi_state 2329 * Note: We write the user space value _before_ changing the pi_state
@@ -2334,6 +2336,42 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2334 * in the PID check in lookup_pi_state. 2336 * in the PID check in lookup_pi_state.
2335 */ 2337 */
2336retry: 2338retry:
2339 if (!argowner) {
2340 if (oldowner != current) {
2341 /*
2342 * We raced against a concurrent self; things are
2343 * already fixed up. Nothing to do.
2344 */
2345 ret = 0;
2346 goto out_unlock;
2347 }
2348
2349 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2350 /* We got the lock after all, nothing to fix. */
2351 ret = 0;
2352 goto out_unlock;
2353 }
2354
2355 /*
2356 * Since we just failed the trylock; there must be an owner.
2357 */
2358 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2359 BUG_ON(!newowner);
2360 } else {
2361 WARN_ON_ONCE(argowner != current);
2362 if (oldowner == current) {
2363 /*
2364 * We raced against a concurrent self; things are
2365 * already fixed up. Nothing to do.
2366 */
2367 ret = 0;
2368 goto out_unlock;
2369 }
2370 newowner = argowner;
2371 }
2372
2373 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2374
2337 if (get_futex_value_locked(&uval, uaddr)) 2375 if (get_futex_value_locked(&uval, uaddr))
2338 goto handle_fault; 2376 goto handle_fault;
2339 2377
@@ -2434,9 +2472,9 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2434 * Got the lock. We might not be the anticipated owner if we 2472 * Got the lock. We might not be the anticipated owner if we
2435 * did a lock-steal - fix up the PI-state in that case: 2473 * did a lock-steal - fix up the PI-state in that case:
2436 * 2474 *
2437 * We can safely read pi_state->owner without holding wait_lock 2475 * Speculative pi_state->owner read (we don't hold wait_lock);
2438 * because we now own the rt_mutex, only the owner will attempt 2476 * since we own the lock pi_state->owner == current is the
2439 * to change it. 2477 * stable state, anything else needs more attention.
2440 */ 2478 */
2441 if (q->pi_state->owner != current) 2479 if (q->pi_state->owner != current)
2442 ret = fixup_pi_state_owner(uaddr, q, current); 2480 ret = fixup_pi_state_owner(uaddr, q, current);
@@ -2444,6 +2482,19 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2444 } 2482 }
2445 2483
2446 /* 2484 /*
2485 * If we didn't get the lock; check if anybody stole it from us. In
2486 * that case, we need to fix up the uval to point to them instead of
2487 * us, otherwise bad things happen. [10]
2488 *
2489 * Another speculative read; pi_state->owner == current is unstable
2490 * but needs our attention.
2491 */
2492 if (q->pi_state->owner == current) {
2493 ret = fixup_pi_state_owner(uaddr, q, NULL);
2494 goto out;
2495 }
2496
2497 /*
2447 * Paranoia check. If we did not take the lock, then we should not be 2498 * Paranoia check. If we did not take the lock, then we should not be
2448 * the owner of the rt_mutex. 2499 * the owner of the rt_mutex.
2449 */ 2500 */
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 6f3dba6e4e9e..65cc0cb984e6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1290 return ret; 1290 return ret;
1291} 1291}
1292 1292
1293static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1294{
1295 int ret = try_to_take_rt_mutex(lock, current, NULL);
1296
1297 /*
1298 * try_to_take_rt_mutex() sets the lock waiters bit
1299 * unconditionally. Clean this up.
1300 */
1301 fixup_rt_mutex_waiters(lock);
1302
1303 return ret;
1304}
1305
1293/* 1306/*
1294 * Slow path try-lock function: 1307 * Slow path try-lock function:
1295 */ 1308 */
@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1312 */ 1325 */
1313 raw_spin_lock_irqsave(&lock->wait_lock, flags); 1326 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1314 1327
1315 ret = try_to_take_rt_mutex(lock, current, NULL); 1328 ret = __rt_mutex_slowtrylock(lock);
1316
1317 /*
1318 * try_to_take_rt_mutex() sets the lock waiters bit
1319 * unconditionally. Clean this up.
1320 */
1321 fixup_rt_mutex_waiters(lock);
1322 1329
1323 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1330 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1324 1331
@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1505 return rt_mutex_slowtrylock(lock); 1512 return rt_mutex_slowtrylock(lock);
1506} 1513}
1507 1514
1515int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1516{
1517 return __rt_mutex_slowtrylock(lock);
1518}
1519
1508/** 1520/**
1509 * rt_mutex_timed_lock - lock a rt_mutex interruptible 1521 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1510 * the timeout structure is provided 1522 * the timeout structure is provided
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 124e98ca0b17..68686b3ec3c1 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
148 struct rt_mutex_waiter *waiter); 148 struct rt_mutex_waiter *waiter);
149 149
150extern int rt_mutex_futex_trylock(struct rt_mutex *l); 150extern int rt_mutex_futex_trylock(struct rt_mutex *l);
151extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
151 152
152extern void rt_mutex_futex_unlock(struct rt_mutex *lock); 153extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
153extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, 154extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,