aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
authorDarren Hart <dvhltc@us.ibm.com>2009-04-03 16:40:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-04-06 05:14:02 -0400
commitdd9739980b50c8cde33e1f8eb08b7e0140bcd61e (patch)
treed263d1632397e74c60bc3102853ccc437a65aabf /kernel/futex.c
parent1a52084d0919c2799258737c21fb328a9de159b5 (diff)
futex: split out fixup owner logic from futex_lock_pi()
Refactor the post lock acquisition logic from futex_lock_pi(). This code will be reused in futex_wait_requeue_pi(). Signed-off-by: Darren Hart <dvhltc@us.ibm.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c158
1 files changed, 89 insertions, 69 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 986b16e4453..af831fbb7fb 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1256,6 +1256,79 @@ handle_fault:
1256static long futex_wait_restart(struct restart_block *restart); 1256static long futex_wait_restart(struct restart_block *restart);
1257 1257
1258/** 1258/**
1259 * fixup_owner() - Post lock pi_state and corner case management
1260 * @uaddr: user address of the futex
1261 * @fshared: whether the futex is shared (1) or not (0)
1262 * @q: futex_q (contains pi_state and access to the rt_mutex)
1263 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1264 *
1265 * After attempting to lock an rt_mutex, this function is called to cleanup
1266 * the pi_state owner as well as handle race conditions that may allow us to
1267 * acquire the lock. Must be called with the hb lock held.
1268 *
1269 * Returns:
1270 * 1 - success, lock taken
1271 * 0 - success, lock not taken
1272 * <0 - on error (-EFAULT)
1273 */
1274static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1275 int locked)
1276{
1277 struct task_struct *owner;
1278 int ret = 0;
1279
1280 if (locked) {
1281 /*
1282 * Got the lock. We might not be the anticipated owner if we
1283 * did a lock-steal - fix up the PI-state in that case:
1284 */
1285 if (q->pi_state->owner != current)
1286 ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1287 goto out;
1288 }
1289
1290 /*
1291 * Catch the rare case, where the lock was released when we were on the
1292 * way back before we locked the hash bucket.
1293 */
1294 if (q->pi_state->owner == current) {
1295 /*
1296 * Try to get the rt_mutex now. This might fail as some other
1297 * task acquired the rt_mutex after we removed ourself from the
1298 * rt_mutex waiters list.
1299 */
1300 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1301 locked = 1;
1302 goto out;
1303 }
1304
1305 /*
1306 * pi_state is incorrect, some other task did a lock steal and
1307 * we returned due to timeout or signal without taking the
1308 * rt_mutex. Too late. We can access the rt_mutex_owner without
1309 * locking, as the other task is now blocked on the hash bucket
1310 * lock. Fix the state up.
1311 */
1312 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1313 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1314 goto out;
1315 }
1316
1317 /*
1318 * Paranoia check. If we did not take the lock, then we should not be
1319 * the owner, nor the pending owner, of the rt_mutex.
1320 */
1321 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1322 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1323 "pi-state %p\n", ret,
1324 q->pi_state->pi_mutex.owner,
1325 q->pi_state->owner);
1326
1327out:
1328 return ret ? ret : locked;
1329}
1330
1331/**
1259 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal 1332 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1260 * @hb: the futex hash bucket, must be locked by the caller 1333 * @hb: the futex hash bucket, must be locked by the caller
1261 * @q: the futex_q to queue up on 1334 * @q: the futex_q to queue up on
@@ -1459,11 +1532,10 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1459 int detect, ktime_t *time, int trylock) 1532 int detect, ktime_t *time, int trylock)
1460{ 1533{
1461 struct hrtimer_sleeper timeout, *to = NULL; 1534 struct hrtimer_sleeper timeout, *to = NULL;
1462 struct task_struct *curr = current;
1463 struct futex_hash_bucket *hb; 1535 struct futex_hash_bucket *hb;
1464 u32 uval; 1536 u32 uval;
1465 struct futex_q q; 1537 struct futex_q q;
1466 int ret; 1538 int res, ret;
1467 1539
1468 if (refill_pi_state_cache()) 1540 if (refill_pi_state_cache())
1469 return -ENOMEM; 1541 return -ENOMEM;
@@ -1527,71 +1599,21 @@ retry_private:
1527 } 1599 }
1528 1600
1529 spin_lock(q.lock_ptr); 1601 spin_lock(q.lock_ptr);
1530 1602 /*
1531 if (!ret) { 1603 * Fixup the pi_state owner and possibly acquire the lock if we
1532 /* 1604 * haven't already.
1533 * Got the lock. We might not be the anticipated owner 1605 */
1534 * if we did a lock-steal - fix up the PI-state in 1606 res = fixup_owner(uaddr, fshared, &q, !ret);
1535 * that case: 1607 /*
1536 */ 1608 * If fixup_owner() returned an error, proprogate that. If it acquired
1537 if (q.pi_state->owner != curr) 1609 * the lock, clear our -ETIMEDOUT or -EINTR.
1538 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared); 1610 */
1539 } else { 1611 if (res)
1540 /* 1612 ret = (res < 0) ? res : 0;
1541 * Catch the rare case, where the lock was released
1542 * when we were on the way back before we locked the
1543 * hash bucket.
1544 */
1545 if (q.pi_state->owner == curr) {
1546 /*
1547 * Try to get the rt_mutex now. This might
1548 * fail as some other task acquired the
1549 * rt_mutex after we removed ourself from the
1550 * rt_mutex waiters list.
1551 */
1552 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1553 ret = 0;
1554 else {
1555 /*
1556 * pi_state is incorrect, some other
1557 * task did a lock steal and we
1558 * returned due to timeout or signal
1559 * without taking the rt_mutex. Too
1560 * late. We can access the
1561 * rt_mutex_owner without locking, as
1562 * the other task is now blocked on
1563 * the hash bucket lock. Fix the state
1564 * up.
1565 */
1566 struct task_struct *owner;
1567 int res;
1568
1569 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1570 res = fixup_pi_state_owner(uaddr, &q, owner,
1571 fshared);
1572
1573 /* propagate -EFAULT, if the fixup failed */
1574 if (res)
1575 ret = res;
1576 }
1577 } else {
1578 /*
1579 * Paranoia check. If we did not take the lock
1580 * in the trylock above, then we should not be
1581 * the owner of the rtmutex, neither the real
1582 * nor the pending one:
1583 */
1584 if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1585 printk(KERN_ERR "futex_lock_pi: ret = %d "
1586 "pi-mutex: %p pi-state %p\n", ret,
1587 q.pi_state->pi_mutex.owner,
1588 q.pi_state->owner);
1589 }
1590 }
1591 1613
1592 /* 1614 /*
1593 * If fixup_pi_state_owner() faulted and was unable to handle the 1615 * If fixup_owner() faulted and was unable to handle the fault, unlock
1594 * fault, unlock it and return the fault to userspace. 1616 * it and return the fault to userspace.
1595 */ 1617 */
1596 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) 1618 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1597 rt_mutex_unlock(&q.pi_state->pi_mutex); 1619 rt_mutex_unlock(&q.pi_state->pi_mutex);
@@ -1599,9 +1621,7 @@ retry_private:
1599 /* Unqueue and drop the lock */ 1621 /* Unqueue and drop the lock */
1600 unqueue_me_pi(&q); 1622 unqueue_me_pi(&q);
1601 1623
1602 if (to) 1624 goto out;
1603 destroy_hrtimer_on_stack(&to->timer);
1604 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1605 1625
1606out_unlock_put_key: 1626out_unlock_put_key:
1607 queue_unlock(&q, hb); 1627 queue_unlock(&q, hb);
@@ -1611,7 +1631,7 @@ out_put_key:
1611out: 1631out:
1612 if (to) 1632 if (to)
1613 destroy_hrtimer_on_stack(&to->timer); 1633 destroy_hrtimer_on_stack(&to->timer);
1614 return ret; 1634 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1615 1635
1616uaddr_faulted: 1636uaddr_faulted:
1617 /* 1637 /*