aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDarren Hart <dvhltc@us.ibm.com>2009-04-03 16:39:52 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-04-06 05:14:01 -0400
commit1a52084d0919c2799258737c21fb328a9de159b5 (patch)
tree1b5351a83913d1c583912b933ad838d2a024286b /kernel
parent4b1c486b3587d2abf50bee4a05eb488cd4045f2c (diff)
futex: split out atomic logic from futex_lock_pi()
Refactor the atomic portion of futex_lock_pi() into futex_lock_pi_atomic(). This logic will be needed by requeue_pi, so modularize it to reduce code duplication. The only significant change is passing of the task to try and take the lock for. This simplifies the -EDEADLK test as if the lock is owned by task t, it's a deadlock, regardless of if we are doing requeue pi or not. This patch updates the corresponding comment accordingly. Signed-off-by: Darren Hart <dvhltc@us.ibm.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c224
1 files changed, 130 insertions, 94 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 421fb5e42a10..986b16e44534 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -556,6 +556,127 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
556 return 0; 556 return 0;
557} 557}
558 558
559/**
560 * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
561 * @uaddr: the pi futex user address
562 * @hb: the pi futex hash bucket
563 * @key: the futex key associated with uaddr and hb
564 * @ps: the pi_state pointer where we store the result of the lookup
565 * @task: the task to perform the atomic lock work for. This will be
566 * "current" except in the case of requeue pi.
567 *
568 * Returns:
569 * 0 - ready to wait
570 * 1 - acquired the lock
571 * <0 - error
572 *
573 * The hb->lock and futex_key refs shall be held by the caller.
574 */
575static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
576 union futex_key *key,
577 struct futex_pi_state **ps,
578 struct task_struct *task)
579{
580 int lock_taken, ret, ownerdied = 0;
581 u32 uval, newval, curval;
582
583retry:
584 ret = lock_taken = 0;
585
586 /*
587 * To avoid races, we attempt to take the lock here again
588 * (by doing a 0 -> TID atomic cmpxchg), while holding all
589 * the locks. It will most likely not succeed.
590 */
591 newval = task_pid_vnr(task);
592
593 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
594
595 if (unlikely(curval == -EFAULT))
596 return -EFAULT;
597
598 /*
599 * Detect deadlocks.
600 */
601 if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
602 return -EDEADLK;
603
604 /*
605 * Surprise - we got the lock. Just return to userspace:
606 */
607 if (unlikely(!curval))
608 return 1;
609
610 uval = curval;
611
612 /*
613 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
614 * to wake at the next unlock.
615 */
616 newval = curval | FUTEX_WAITERS;
617
618 /*
619 * There are two cases, where a futex might have no owner (the
620 * owner TID is 0): OWNER_DIED. We take over the futex in this
621 * case. We also do an unconditional take over, when the owner
622 * of the futex died.
623 *
624 * This is safe as we are protected by the hash bucket lock !
625 */
626 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
627 /* Keep the OWNER_DIED bit */
628 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
629 ownerdied = 0;
630 lock_taken = 1;
631 }
632
633 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
634
635 if (unlikely(curval == -EFAULT))
636 return -EFAULT;
637 if (unlikely(curval != uval))
638 goto retry;
639
640 /*
641 * We took the lock due to owner died take over.
642 */
643 if (unlikely(lock_taken))
644 return 1;
645
646 /*
647 * We dont have the lock. Look up the PI state (or create it if
648 * we are the first waiter):
649 */
650 ret = lookup_pi_state(uval, hb, key, ps);
651
652 if (unlikely(ret)) {
653 switch (ret) {
654 case -ESRCH:
655 /*
656 * No owner found for this futex. Check if the
657 * OWNER_DIED bit is set to figure out whether
658 * this is a robust futex or not.
659 */
660 if (get_futex_value_locked(&curval, uaddr))
661 return -EFAULT;
662
663 /*
664 * We simply start over in case of a robust
665 * futex. The code above will take the futex
666 * and return happy.
667 */
668 if (curval & FUTEX_OWNER_DIED) {
669 ownerdied = 1;
670 goto retry;
671 }
672 default:
673 break;
674 }
675 }
676
677 return ret;
678}
679
559/* 680/*
560 * The hash bucket lock must be held when this is called. 681 * The hash bucket lock must be held when this is called.
561 * Afterwards, the futex_q must not be accessed. 682 * Afterwards, the futex_q must not be accessed.
@@ -1340,9 +1461,9 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1340 struct hrtimer_sleeper timeout, *to = NULL; 1461 struct hrtimer_sleeper timeout, *to = NULL;
1341 struct task_struct *curr = current; 1462 struct task_struct *curr = current;
1342 struct futex_hash_bucket *hb; 1463 struct futex_hash_bucket *hb;
1343 u32 uval, newval, curval; 1464 u32 uval;
1344 struct futex_q q; 1465 struct futex_q q;
1345 int ret, lock_taken, ownerdied = 0; 1466 int ret;
1346 1467
1347 if (refill_pi_state_cache()) 1468 if (refill_pi_state_cache())
1348 return -ENOMEM; 1469 return -ENOMEM;
@@ -1365,81 +1486,15 @@ retry:
1365retry_private: 1486retry_private:
1366 hb = queue_lock(&q); 1487 hb = queue_lock(&q);
1367 1488
1368retry_locked: 1489 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current);
1369 ret = lock_taken = 0;
1370
1371 /*
1372 * To avoid races, we attempt to take the lock here again
1373 * (by doing a 0 -> TID atomic cmpxchg), while holding all
1374 * the locks. It will most likely not succeed.
1375 */
1376 newval = task_pid_vnr(current);
1377
1378 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
1379
1380 if (unlikely(curval == -EFAULT))
1381 goto uaddr_faulted;
1382
1383 /*
1384 * Detect deadlocks. In case of REQUEUE_PI this is a valid
1385 * situation and we return success to user space.
1386 */
1387 if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1388 ret = -EDEADLK;
1389 goto out_unlock_put_key;
1390 }
1391
1392 /*
1393 * Surprise - we got the lock. Just return to userspace:
1394 */
1395 if (unlikely(!curval))
1396 goto out_unlock_put_key;
1397
1398 uval = curval;
1399
1400 /*
1401 * Set the WAITERS flag, so the owner will know it has someone
1402 * to wake at next unlock
1403 */
1404 newval = curval | FUTEX_WAITERS;
1405
1406 /*
1407 * There are two cases, where a futex might have no owner (the
1408 * owner TID is 0): OWNER_DIED. We take over the futex in this
1409 * case. We also do an unconditional take over, when the owner
1410 * of the futex died.
1411 *
1412 * This is safe as we are protected by the hash bucket lock !
1413 */
1414 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
1415 /* Keep the OWNER_DIED bit */
1416 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
1417 ownerdied = 0;
1418 lock_taken = 1;
1419 }
1420
1421 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1422
1423 if (unlikely(curval == -EFAULT))
1424 goto uaddr_faulted;
1425 if (unlikely(curval != uval))
1426 goto retry_locked;
1427
1428 /*
1429 * We took the lock due to owner died take over.
1430 */
1431 if (unlikely(lock_taken))
1432 goto out_unlock_put_key;
1433
1434 /*
1435 * We dont have the lock. Look up the PI state (or create it if
1436 * we are the first waiter):
1437 */
1438 ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
1439
1440 if (unlikely(ret)) { 1490 if (unlikely(ret)) {
1441 switch (ret) { 1491 switch (ret) {
1442 1492 case 1:
1493 /* We got the lock. */
1494 ret = 0;
1495 goto out_unlock_put_key;
1496 case -EFAULT:
1497 goto uaddr_faulted;
1443 case -EAGAIN: 1498 case -EAGAIN:
1444 /* 1499 /*
1445 * Task is exiting and we just wait for the 1500 * Task is exiting and we just wait for the
@@ -1449,25 +1504,6 @@ retry_locked:
1449 put_futex_key(fshared, &q.key); 1504 put_futex_key(fshared, &q.key);
1450 cond_resched(); 1505 cond_resched();
1451 goto retry; 1506 goto retry;
1452
1453 case -ESRCH:
1454 /*
1455 * No owner found for this futex. Check if the
1456 * OWNER_DIED bit is set to figure out whether
1457 * this is a robust futex or not.
1458 */
1459 if (get_futex_value_locked(&curval, uaddr))
1460 goto uaddr_faulted;
1461
1462 /*
1463 * We simply start over in case of a robust
1464 * futex. The code above will take the futex
1465 * and return happy.
1466 */
1467 if (curval & FUTEX_OWNER_DIED) {
1468 ownerdied = 1;
1469 goto retry_locked;
1470 }
1471 default: 1507 default:
1472 goto out_unlock_put_key; 1508 goto out_unlock_put_key;
1473 } 1509 }