diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-25 07:54:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-25 07:54:14 -0500 |
commit | cc37d3d20604f3759d269247b022616f710aa52d (patch) | |
tree | 72758a50bb9352b992842e9a8f9901aa6193b71d | |
parent | b594deb0cc54d857828d2e33b2e9d5a9f02f0e89 (diff) | |
parent | b56863630ddbdea6e22df8835f78f0b1da037103 (diff) |
Merge branch 'core/futexes' into core/core
-rw-r--r-- | include/linux/futex.h | 3 | ||||
-rw-r--r-- | kernel/futex.c | 61 |
2 files changed, 37 insertions, 27 deletions
diff --git a/include/linux/futex.h b/include/linux/futex.h index 8f627b9ae2b1..3bf5bb5a34f9 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -25,7 +25,8 @@ union ktime; | |||
25 | #define FUTEX_WAKE_BITSET 10 | 25 | #define FUTEX_WAKE_BITSET 10 |
26 | 26 | ||
27 | #define FUTEX_PRIVATE_FLAG 128 | 27 | #define FUTEX_PRIVATE_FLAG 128 |
28 | #define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG | 28 | #define FUTEX_CLOCK_REALTIME 256 |
29 | #define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) | ||
29 | 30 | ||
30 | #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) | 31 | #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) |
31 | #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) | 32 | #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) |
diff --git a/kernel/futex.c b/kernel/futex.c index e10c5c8786a6..b4f87bac91c1 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -92,11 +92,12 @@ struct futex_pi_state { | |||
92 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | 92 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
93 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. | 93 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
94 | * The order of wakup is always to make the first condition true, then | 94 | * The order of wakup is always to make the first condition true, then |
95 | * wake up q->waiters, then make the second condition true. | 95 | * wake up q->waiter, then make the second condition true. |
96 | */ | 96 | */ |
97 | struct futex_q { | 97 | struct futex_q { |
98 | struct plist_node list; | 98 | struct plist_node list; |
99 | wait_queue_head_t waiters; | 99 | /* There can only be a single waiter */ |
100 | wait_queue_head_t waiter; | ||
100 | 101 | ||
101 | /* Which hash list lock to use: */ | 102 | /* Which hash list lock to use: */ |
102 | spinlock_t *lock_ptr; | 103 | spinlock_t *lock_ptr; |
@@ -573,7 +574,7 @@ static void wake_futex(struct futex_q *q) | |||
573 | * The lock in wake_up_all() is a crucial memory barrier after the | 574 | * The lock in wake_up_all() is a crucial memory barrier after the |
574 | * plist_del() and also before assigning to q->lock_ptr. | 575 | * plist_del() and also before assigning to q->lock_ptr. |
575 | */ | 576 | */ |
576 | wake_up_all(&q->waiters); | 577 | wake_up(&q->waiter); |
577 | /* | 578 | /* |
578 | * The waiting task can free the futex_q as soon as this is written, | 579 | * The waiting task can free the futex_q as soon as this is written, |
579 | * without taking any locks. This must come last. | 580 | * without taking any locks. This must come last. |
@@ -930,7 +931,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
930 | { | 931 | { |
931 | struct futex_hash_bucket *hb; | 932 | struct futex_hash_bucket *hb; |
932 | 933 | ||
933 | init_waitqueue_head(&q->waiters); | 934 | init_waitqueue_head(&q->waiter); |
934 | 935 | ||
935 | get_futex_key_refs(&q->key); | 936 | get_futex_key_refs(&q->key); |
936 | hb = hash_futex(&q->key); | 937 | hb = hash_futex(&q->key); |
@@ -1142,12 +1143,13 @@ handle_fault: | |||
1142 | * In case we must use restart_block to restart a futex_wait, | 1143 | * In case we must use restart_block to restart a futex_wait, |
1143 | * we encode in the 'flags' shared capability | 1144 | * we encode in the 'flags' shared capability |
1144 | */ | 1145 | */ |
1145 | #define FLAGS_SHARED 1 | 1146 | #define FLAGS_SHARED 0x01 |
1147 | #define FLAGS_CLOCKRT 0x02 | ||
1146 | 1148 | ||
1147 | static long futex_wait_restart(struct restart_block *restart); | 1149 | static long futex_wait_restart(struct restart_block *restart); |
1148 | 1150 | ||
1149 | static int futex_wait(u32 __user *uaddr, int fshared, | 1151 | static int futex_wait(u32 __user *uaddr, int fshared, |
1150 | u32 val, ktime_t *abs_time, u32 bitset) | 1152 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) |
1151 | { | 1153 | { |
1152 | struct task_struct *curr = current; | 1154 | struct task_struct *curr = current; |
1153 | DECLARE_WAITQUEUE(wait, curr); | 1155 | DECLARE_WAITQUEUE(wait, curr); |
@@ -1220,7 +1222,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1220 | 1222 | ||
1221 | /* add_wait_queue is the barrier after __set_current_state. */ | 1223 | /* add_wait_queue is the barrier after __set_current_state. */ |
1222 | __set_current_state(TASK_INTERRUPTIBLE); | 1224 | __set_current_state(TASK_INTERRUPTIBLE); |
1223 | add_wait_queue(&q.waiters, &wait); | 1225 | add_wait_queue(&q.waiter, &wait); |
1224 | /* | 1226 | /* |
1225 | * !plist_node_empty() is safe here without any lock. | 1227 | * !plist_node_empty() is safe here without any lock. |
1226 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | 1228 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. |
@@ -1233,8 +1235,10 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1233 | slack = current->timer_slack_ns; | 1235 | slack = current->timer_slack_ns; |
1234 | if (rt_task(current)) | 1236 | if (rt_task(current)) |
1235 | slack = 0; | 1237 | slack = 0; |
1236 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, | 1238 | hrtimer_init_on_stack(&t.timer, |
1237 | HRTIMER_MODE_ABS); | 1239 | clockrt ? CLOCK_REALTIME : |
1240 | CLOCK_MONOTONIC, | ||
1241 | HRTIMER_MODE_ABS); | ||
1238 | hrtimer_init_sleeper(&t, current); | 1242 | hrtimer_init_sleeper(&t, current); |
1239 | hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); | 1243 | hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); |
1240 | 1244 | ||
@@ -1289,6 +1293,8 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1289 | 1293 | ||
1290 | if (fshared) | 1294 | if (fshared) |
1291 | restart->futex.flags |= FLAGS_SHARED; | 1295 | restart->futex.flags |= FLAGS_SHARED; |
1296 | if (clockrt) | ||
1297 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
1292 | return -ERESTART_RESTARTBLOCK; | 1298 | return -ERESTART_RESTARTBLOCK; |
1293 | } | 1299 | } |
1294 | 1300 | ||
@@ -1312,7 +1318,8 @@ static long futex_wait_restart(struct restart_block *restart) | |||
1312 | if (restart->futex.flags & FLAGS_SHARED) | 1318 | if (restart->futex.flags & FLAGS_SHARED) |
1313 | fshared = 1; | 1319 | fshared = 1; |
1314 | return (long)futex_wait(uaddr, fshared, restart->futex.val, &t, | 1320 | return (long)futex_wait(uaddr, fshared, restart->futex.val, &t, |
1315 | restart->futex.bitset); | 1321 | restart->futex.bitset, |
1322 | restart->futex.flags & FLAGS_CLOCKRT); | ||
1316 | } | 1323 | } |
1317 | 1324 | ||
1318 | 1325 | ||
@@ -1558,12 +1565,11 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
1558 | 1565 | ||
1559 | uaddr_faulted: | 1566 | uaddr_faulted: |
1560 | /* | 1567 | /* |
1561 | * We have to r/w *(int __user *)uaddr, but we can't modify it | 1568 | * We have to r/w *(int __user *)uaddr, and we have to modify it |
1562 | * non-atomically. Therefore, if get_user below is not | 1569 | * atomically. Therefore, if we continue to fault after get_user() |
1563 | * enough, we need to handle the fault ourselves, while | 1570 | * below, we need to handle the fault ourselves, while still holding |
1564 | * still holding the mmap_sem. | 1571 | * the mmap_sem. This can occur if the uaddr is under contention as |
1565 | * | 1572 | * we have to drop the mmap_sem in order to call get_user(). |
1566 | * ... and hb->lock. :-) --ANK | ||
1567 | */ | 1573 | */ |
1568 | queue_unlock(&q, hb); | 1574 | queue_unlock(&q, hb); |
1569 | 1575 | ||
@@ -1575,7 +1581,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
1575 | } | 1581 | } |
1576 | 1582 | ||
1577 | ret = get_user(uval, uaddr); | 1583 | ret = get_user(uval, uaddr); |
1578 | if (!ret && (uval != -EFAULT)) | 1584 | if (!ret) |
1579 | goto retry; | 1585 | goto retry; |
1580 | 1586 | ||
1581 | if (to) | 1587 | if (to) |
@@ -1669,12 +1675,11 @@ out: | |||
1669 | 1675 | ||
1670 | pi_faulted: | 1676 | pi_faulted: |
1671 | /* | 1677 | /* |
1672 | * We have to r/w *(int __user *)uaddr, but we can't modify it | 1678 | * We have to r/w *(int __user *)uaddr, and we have to modify it |
1673 | * non-atomically. Therefore, if get_user below is not | 1679 | * atomically. Therefore, if we continue to fault after get_user() |
1674 | * enough, we need to handle the fault ourselves, while | 1680 | * below, we need to handle the fault ourselves, while still holding |
1675 | * still holding the mmap_sem. | 1681 | * the mmap_sem. This can occur if the uaddr is under contention as |
1676 | * | 1682 | * we have to drop the mmap_sem in order to call get_user(). |
1677 | * ... and hb->lock. --ANK | ||
1678 | */ | 1683 | */ |
1679 | spin_unlock(&hb->lock); | 1684 | spin_unlock(&hb->lock); |
1680 | 1685 | ||
@@ -1687,7 +1692,7 @@ pi_faulted: | |||
1687 | } | 1692 | } |
1688 | 1693 | ||
1689 | ret = get_user(uval, uaddr); | 1694 | ret = get_user(uval, uaddr); |
1690 | if (!ret && (uval != -EFAULT)) | 1695 | if (!ret) |
1691 | goto retry; | 1696 | goto retry; |
1692 | 1697 | ||
1693 | return ret; | 1698 | return ret; |
@@ -1905,18 +1910,22 @@ void exit_robust_list(struct task_struct *curr) | |||
1905 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 1910 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
1906 | u32 __user *uaddr2, u32 val2, u32 val3) | 1911 | u32 __user *uaddr2, u32 val2, u32 val3) |
1907 | { | 1912 | { |
1908 | int ret = -ENOSYS; | 1913 | int clockrt, ret = -ENOSYS; |
1909 | int cmd = op & FUTEX_CMD_MASK; | 1914 | int cmd = op & FUTEX_CMD_MASK; |
1910 | int fshared = 0; | 1915 | int fshared = 0; |
1911 | 1916 | ||
1912 | if (!(op & FUTEX_PRIVATE_FLAG)) | 1917 | if (!(op & FUTEX_PRIVATE_FLAG)) |
1913 | fshared = 1; | 1918 | fshared = 1; |
1914 | 1919 | ||
1920 | clockrt = op & FUTEX_CLOCK_REALTIME; | ||
1921 | if (clockrt && cmd != FUTEX_WAIT_BITSET) | ||
1922 | return -ENOSYS; | ||
1923 | |||
1915 | switch (cmd) { | 1924 | switch (cmd) { |
1916 | case FUTEX_WAIT: | 1925 | case FUTEX_WAIT: |
1917 | val3 = FUTEX_BITSET_MATCH_ANY; | 1926 | val3 = FUTEX_BITSET_MATCH_ANY; |
1918 | case FUTEX_WAIT_BITSET: | 1927 | case FUTEX_WAIT_BITSET: |
1919 | ret = futex_wait(uaddr, fshared, val, timeout, val3); | 1928 | ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt); |
1920 | break; | 1929 | break; |
1921 | case FUTEX_WAKE: | 1930 | case FUTEX_WAKE: |
1922 | val3 = FUTEX_BITSET_MATCH_ANY; | 1931 | val3 = FUTEX_BITSET_MATCH_ANY; |