aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-06-11 16:45:38 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-06-21 16:26:23 -0400
commitccf9e6a80d9e1b9df69c98e6b9745cf49869ee15 (patch)
tree37da57e39262f0852466b62930dcbc005ce1800b /kernel/futex.c
parent67792e2cabadbadd1a93f6790fa7bcbd47eca7c3 (diff)
futex: Make unlock_pi more robust
The kernel tries to atomically unlock the futex without checking whether there is kernel state associated to the futex. So if user space manipulated the user space value, this will leave kernel internal state around associated to the owner task. For robustness sake, lookup first whether there are waiters on the futex. If there are waiters, wake the top priority waiter with all the proper sanity checks applied. If there are no waiters, do the atomic release. We do not have to preserve the waiters bit in this case, because a potentially incoming waiter is blocked on the hb->lock and will acquire the futex atomically. We neither have to preserve the owner died bit. The caller is the owner and it was supposed to cleanup the mess. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Darren Hart <darren@dvhart.com> Cc: Davidlohr Bueso <davidlohr@hp.com> Cc: Kees Cook <kees@outflux.net> Cc: wad@chromium.org Link: http://lkml.kernel.org/r/20140611204237.016987332@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c76
1 files changed, 25 insertions, 51 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index e5c6c404be1d..346d5c280545 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1186,22 +1186,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
1186 return 0; 1186 return 0;
1187} 1187}
1188 1188
1189static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
1190{
1191 u32 uninitialized_var(oldval);
1192
1193 /*
1194 * There is no waiter, so we unlock the futex. The owner died
1195 * bit has not to be preserved here. We are the owner:
1196 */
1197 if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
1198 return -EFAULT;
1199 if (oldval != uval)
1200 return -EAGAIN;
1201
1202 return 0;
1203}
1204
1205/* 1189/*
1206 * Express the locking dependencies for lockdep: 1190 * Express the locking dependencies for lockdep:
1207 */ 1191 */
@@ -2401,10 +2385,10 @@ uaddr_faulted:
2401 */ 2385 */
2402static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) 2386static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2403{ 2387{
2404 struct futex_hash_bucket *hb; 2388 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2405 struct futex_q *this, *next;
2406 union futex_key key = FUTEX_KEY_INIT; 2389 union futex_key key = FUTEX_KEY_INIT;
2407 u32 uval, vpid = task_pid_vnr(current); 2390 struct futex_hash_bucket *hb;
2391 struct futex_q *match;
2408 int ret; 2392 int ret;
2409 2393
2410retry: 2394retry:
@@ -2417,57 +2401,47 @@ retry:
2417 return -EPERM; 2401 return -EPERM;
2418 2402
2419 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); 2403 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2420 if (unlikely(ret != 0)) 2404 if (ret)
2421 goto out; 2405 return ret;
2422 2406
2423 hb = hash_futex(&key); 2407 hb = hash_futex(&key);
2424 spin_lock(&hb->lock); 2408 spin_lock(&hb->lock);
2425 2409
2426 /* 2410 /*
2427 * To avoid races, try to do the TID -> 0 atomic transition 2411 * Check waiters first. We do not trust user space values at
2428 * again. If it succeeds then we can return without waking 2412 * all and we at least want to know if user space fiddled
2429 * anyone else up. We only try this if neither the waiters nor 2413 * with the futex value instead of blindly unlocking.
2430 * the owner died bit are set.
2431 */ 2414 */
2432 if (!(uval & ~FUTEX_TID_MASK) && 2415 match = futex_top_waiter(hb, &key);
2433 cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) 2416 if (match) {
2434 goto pi_faulted; 2417 ret = wake_futex_pi(uaddr, uval, match);
2435 /*
2436 * Rare case: we managed to release the lock atomically,
2437 * no need to wake anyone else up:
2438 */
2439 if (unlikely(uval == vpid))
2440 goto out_unlock;
2441
2442 /*
2443 * Ok, other tasks may need to be woken up - check waiters
2444 * and do the wakeup if necessary:
2445 */
2446 plist_for_each_entry_safe(this, next, &hb->chain, list) {
2447 if (!match_futex (&this->key, &key))
2448 continue;
2449 ret = wake_futex_pi(uaddr, uval, this);
2450 /* 2418 /*
2451 * The atomic access to the futex value 2419 * The atomic access to the futex value generated a
2452 * generated a pagefault, so retry the 2420 * pagefault, so retry the user-access and the wakeup:
2453 * user-access and the wakeup:
2454 */ 2421 */
2455 if (ret == -EFAULT) 2422 if (ret == -EFAULT)
2456 goto pi_faulted; 2423 goto pi_faulted;
2457 goto out_unlock; 2424 goto out_unlock;
2458 } 2425 }
2426
2459 /* 2427 /*
2460 * No waiters - kernel unlocks the futex: 2428 * We have no kernel internal state, i.e. no waiters in the
2429 * kernel. Waiters which are about to queue themselves are stuck
2430 * on hb->lock. So we can safely ignore them. We do neither
2431 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2432 * owner.
2461 */ 2433 */
2462 ret = unlock_futex_pi(uaddr, uval); 2434 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2463 if (ret == -EFAULT)
2464 goto pi_faulted; 2435 goto pi_faulted;
2465 2436
2437 /*
2438 * If uval has changed, let user space handle it.
2439 */
2440 ret = (curval == uval) ? 0 : -EAGAIN;
2441
2466out_unlock: 2442out_unlock:
2467 spin_unlock(&hb->lock); 2443 spin_unlock(&hb->lock);
2468 put_futex_key(&key); 2444 put_futex_key(&key);
2469
2470out:
2471 return ret; 2445 return ret;
2472 2446
2473pi_faulted: 2447pi_faulted: