aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-05-31 12:47:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-05-31 12:47:55 -0400
commita4bf79eb6a42e863e0fccf19f9383c618e8efc43 (patch)
treee147dd000ab188a3b13a2e5485013eb93c5d7bee /kernel
parent80e0679469a481ab8baa4fe982205f99004a0686 (diff)
parent397335f004f41e5fcf7a795e94eb3ab83411a17c (diff)
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core futex/rtmutex fixes from Thomas Gleixner: "Three fixlets for long standing issues in the futex/rtmutex code unearthed by Dave Jones syscall fuzzer: - Add missing early deadlock detection checks in the futex code - Prevent user space from attaching a futex to kernel threads - Make the deadlock detector of rtmutex work again Looks large, but is more comments than code change" * 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: rtmutex: Fix deadlock detector for real futex: Prevent attaching to kernel threads futex: Add another early deadlock detection check
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c52
-rw-r--r--kernel/locking/rtmutex.c32
2 files changed, 67 insertions, 17 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 5f589279e462..81dbe773ce4c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -745,7 +745,8 @@ void exit_pi_state_list(struct task_struct *curr)
745 745
746static int 746static int
747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, 747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
748 union futex_key *key, struct futex_pi_state **ps) 748 union futex_key *key, struct futex_pi_state **ps,
749 struct task_struct *task)
749{ 750{
750 struct futex_pi_state *pi_state = NULL; 751 struct futex_pi_state *pi_state = NULL;
751 struct futex_q *this, *next; 752 struct futex_q *this, *next;
@@ -786,6 +787,16 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
786 return -EINVAL; 787 return -EINVAL;
787 } 788 }
788 789
790 /*
791 * Protect against a corrupted uval. If uval
792 * is 0x80000000 then pid is 0 and the waiter
793 * bit is set. So the deadlock check in the
794 * calling code has failed and we did not fall
795 * into the check above due to !pid.
796 */
797 if (task && pi_state->owner == task)
798 return -EDEADLK;
799
789 atomic_inc(&pi_state->refcount); 800 atomic_inc(&pi_state->refcount);
790 *ps = pi_state; 801 *ps = pi_state;
791 802
@@ -803,6 +814,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
803 if (!p) 814 if (!p)
804 return -ESRCH; 815 return -ESRCH;
805 816
817 if (!p->mm) {
818 put_task_struct(p);
819 return -EPERM;
820 }
821
806 /* 822 /*
807 * We need to look at the task state flags to figure out, 823 * We need to look at the task state flags to figure out,
808 * whether the task is exiting. To protect against the do_exit 824 * whether the task is exiting. To protect against the do_exit
@@ -935,7 +951,7 @@ retry:
935 * We dont have the lock. Look up the PI state (or create it if 951 * We dont have the lock. Look up the PI state (or create it if
936 * we are the first waiter): 952 * we are the first waiter):
937 */ 953 */
938 ret = lookup_pi_state(uval, hb, key, ps); 954 ret = lookup_pi_state(uval, hb, key, ps, task);
939 955
940 if (unlikely(ret)) { 956 if (unlikely(ret)) {
941 switch (ret) { 957 switch (ret) {
@@ -1347,7 +1363,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1347 * 1363 *
1348 * Return: 1364 * Return:
1349 * 0 - failed to acquire the lock atomically; 1365 * 0 - failed to acquire the lock atomically;
1350 * 1 - acquired the lock; 1366 * >0 - acquired the lock, return value is vpid of the top_waiter
1351 * <0 - error 1367 * <0 - error
1352 */ 1368 */
1353static int futex_proxy_trylock_atomic(u32 __user *pifutex, 1369static int futex_proxy_trylock_atomic(u32 __user *pifutex,
@@ -1358,7 +1374,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1358{ 1374{
1359 struct futex_q *top_waiter = NULL; 1375 struct futex_q *top_waiter = NULL;
1360 u32 curval; 1376 u32 curval;
1361 int ret; 1377 int ret, vpid;
1362 1378
1363 if (get_futex_value_locked(&curval, pifutex)) 1379 if (get_futex_value_locked(&curval, pifutex))
1364 return -EFAULT; 1380 return -EFAULT;
@@ -1386,11 +1402,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1386 * the contended case or if set_waiters is 1. The pi_state is returned 1402 * the contended case or if set_waiters is 1. The pi_state is returned
1387 * in ps in contended cases. 1403 * in ps in contended cases.
1388 */ 1404 */
1405 vpid = task_pid_vnr(top_waiter->task);
1389 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 1406 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1390 set_waiters); 1407 set_waiters);
1391 if (ret == 1) 1408 if (ret == 1) {
1392 requeue_pi_wake_futex(top_waiter, key2, hb2); 1409 requeue_pi_wake_futex(top_waiter, key2, hb2);
1393 1410 return vpid;
1411 }
1394 return ret; 1412 return ret;
1395} 1413}
1396 1414
@@ -1421,7 +1439,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1421 struct futex_pi_state *pi_state = NULL; 1439 struct futex_pi_state *pi_state = NULL;
1422 struct futex_hash_bucket *hb1, *hb2; 1440 struct futex_hash_bucket *hb1, *hb2;
1423 struct futex_q *this, *next; 1441 struct futex_q *this, *next;
1424 u32 curval2;
1425 1442
1426 if (requeue_pi) { 1443 if (requeue_pi) {
1427 /* 1444 /*
@@ -1509,16 +1526,25 @@ retry_private:
1509 * At this point the top_waiter has either taken uaddr2 or is 1526 * At this point the top_waiter has either taken uaddr2 or is
1510 * waiting on it. If the former, then the pi_state will not 1527 * waiting on it. If the former, then the pi_state will not
1511 * exist yet, look it up one more time to ensure we have a 1528 * exist yet, look it up one more time to ensure we have a
1512 * reference to it. 1529 * reference to it. If the lock was taken, ret contains the
1530 * vpid of the top waiter task.
1513 */ 1531 */
1514 if (ret == 1) { 1532 if (ret > 0) {
1515 WARN_ON(pi_state); 1533 WARN_ON(pi_state);
1516 drop_count++; 1534 drop_count++;
1517 task_count++; 1535 task_count++;
1518 ret = get_futex_value_locked(&curval2, uaddr2); 1536 /*
1519 if (!ret) 1537 * If we acquired the lock, then the user
1520 ret = lookup_pi_state(curval2, hb2, &key2, 1538 * space value of uaddr2 should be vpid. It
1521 &pi_state); 1539 * cannot be changed by the top waiter as it
1540 * is blocked on hb2 lock if it tries to do
1541 * so. If something fiddled with it behind our
1542 * back the pi state lookup might unearth
1543 * it. So we rather use the known value than
1544 * rereading and handing potential crap to
1545 * lookup_pi_state.
1546 */
1547 ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
1522 } 1548 }
1523 1549
1524 switch (ret) { 1550 switch (ret) {
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index aa4dff04b594..a620d4d08ca6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
343 * top_waiter can be NULL, when we are in the deboosting 343 * top_waiter can be NULL, when we are in the deboosting
344 * mode! 344 * mode!
345 */ 345 */
346 if (top_waiter && (!task_has_pi_waiters(task) || 346 if (top_waiter) {
347 top_waiter != task_top_pi_waiter(task))) 347 if (!task_has_pi_waiters(task))
348 goto out_unlock_pi; 348 goto out_unlock_pi;
349 /*
350 * If deadlock detection is off, we stop here if we
351 * are not the top pi waiter of the task.
352 */
353 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
354 goto out_unlock_pi;
355 }
349 356
350 /* 357 /*
351 * When deadlock detection is off then we check, if further 358 * When deadlock detection is off then we check, if further
@@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
361 goto retry; 368 goto retry;
362 } 369 }
363 370
364 /* Deadlock detection */ 371 /*
372 * Deadlock detection. If the lock is the same as the original
373 * lock which caused us to walk the lock chain or if the
374 * current lock is owned by the task which initiated the chain
375 * walk, we detected a deadlock.
376 */
365 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 377 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
366 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 378 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
367 raw_spin_unlock(&lock->wait_lock); 379 raw_spin_unlock(&lock->wait_lock);
@@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
527 unsigned long flags; 539 unsigned long flags;
528 int chain_walk = 0, res; 540 int chain_walk = 0, res;
529 541
542 /*
543 * Early deadlock detection. We really don't want the task to
544 * enqueue on itself just to untangle the mess later. It's not
545 * only an optimization. We drop the locks, so another waiter
546 * can come in before the chain walk detects the deadlock. So
547 * the other will detect the deadlock and return -EDEADLOCK,
548 * which is wrong, as the other waiter is not in a deadlock
549 * situation.
550 */
551 if (detect_deadlock && owner == task)
552 return -EDEADLK;
553
530 raw_spin_lock_irqsave(&task->pi_lock, flags); 554 raw_spin_lock_irqsave(&task->pi_lock, flags);
531 __rt_mutex_adjust_prio(task); 555 __rt_mutex_adjust_prio(task);
532 waiter->task = task; 556 waiter->task = task;