diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 17:06:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 17:06:17 -0400 |
commit | b61f6a57f1919ef8dbd33f864df9b8b361c65b11 (patch) | |
tree | 9341bc1a3f6a8fdcd19f97d5bcf1a658ab7fc92c /kernel | |
parent | 0575db881d18a4791013fc93ba756ad08b18fb48 (diff) | |
parent | fb62db2ba943b1683f1d7181bb2988fce4c60870 (diff) |
Merge branch 'futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
futex: Fix kernel-doc notation & typos
futex: Add lock context annotations
futex: Mark restart_block.futex.uaddr[2] __user
futex: Change 3rd arg of fetch_robust_entry() to unsigned int*
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 35 | ||||
-rw-r--r-- | kernel/futex_compat.c | 2 |
2 files changed, 22 insertions, 15 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index e328f574c97c..a118bf160e0b 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -91,6 +91,7 @@ struct futex_pi_state { | |||
91 | 91 | ||
92 | /** | 92 | /** |
93 | * struct futex_q - The hashed futex queue entry, one per waiting task | 93 | * struct futex_q - The hashed futex queue entry, one per waiting task |
94 | * @list: priority-sorted list of tasks waiting on this futex | ||
94 | * @task: the task waiting on the futex | 95 | * @task: the task waiting on the futex |
95 | * @lock_ptr: the hash bucket lock | 96 | * @lock_ptr: the hash bucket lock |
96 | * @key: the key the futex is hashed on | 97 | * @key: the key the futex is hashed on |
@@ -104,7 +105,7 @@ struct futex_pi_state { | |||
104 | * | 105 | * |
105 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | 106 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
106 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. | 107 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
107 | * The order of wakup is always to make the first condition true, then | 108 | * The order of wakeup is always to make the first condition true, then |
108 | * the second. | 109 | * the second. |
109 | * | 110 | * |
110 | * PI futexes are typically woken before they are removed from the hash list via | 111 | * PI futexes are typically woken before they are removed from the hash list via |
@@ -295,7 +296,7 @@ void put_futex_key(int fshared, union futex_key *key) | |||
295 | * Slow path to fixup the fault we just took in the atomic write | 296 | * Slow path to fixup the fault we just took in the atomic write |
296 | * access to @uaddr. | 297 | * access to @uaddr. |
297 | * | 298 | * |
298 | * We have no generic implementation of a non destructive write to the | 299 | * We have no generic implementation of a non-destructive write to the |
299 | * user address. We know that we faulted in the atomic pagefault | 300 | * user address. We know that we faulted in the atomic pagefault |
300 | * disabled section so we can as well avoid the #PF overhead by | 301 | * disabled section so we can as well avoid the #PF overhead by |
301 | * calling get_user_pages() right away. | 302 | * calling get_user_pages() right away. |
@@ -515,7 +516,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
515 | */ | 516 | */ |
516 | pi_state = this->pi_state; | 517 | pi_state = this->pi_state; |
517 | /* | 518 | /* |
518 | * Userspace might have messed up non PI and PI futexes | 519 | * Userspace might have messed up non-PI and PI futexes |
519 | */ | 520 | */ |
520 | if (unlikely(!pi_state)) | 521 | if (unlikely(!pi_state)) |
521 | return -EINVAL; | 522 | return -EINVAL; |
@@ -736,8 +737,8 @@ static void wake_futex(struct futex_q *q) | |||
736 | 737 | ||
737 | /* | 738 | /* |
738 | * We set q->lock_ptr = NULL _before_ we wake up the task. If | 739 | * We set q->lock_ptr = NULL _before_ we wake up the task. If |
739 | * a non futex wake up happens on another CPU then the task | 740 | * a non-futex wake up happens on another CPU then the task |
740 | * might exit and p would dereference a non existing task | 741 | * might exit and p would dereference a non-existing task |
741 | * struct. Prevent this by holding a reference on p across the | 742 | * struct. Prevent this by holding a reference on p across the |
742 | * wake up. | 743 | * wake up. |
743 | */ | 744 | */ |
@@ -1131,11 +1132,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
1131 | 1132 | ||
1132 | /** | 1133 | /** |
1133 | * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 | 1134 | * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
1134 | * uaddr1: source futex user address | 1135 | * @uaddr1: source futex user address |
1135 | * uaddr2: target futex user address | 1136 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
1136 | * nr_wake: number of waiters to wake (must be 1 for requeue_pi) | 1137 | * @uaddr2: target futex user address |
1137 | * nr_requeue: number of waiters to requeue (0-INT_MAX) | 1138 | * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) |
1138 | * requeue_pi: if we are attempting to requeue from a non-pi futex to a | 1139 | * @nr_requeue: number of waiters to requeue (0-INT_MAX) |
1140 | * @cmpval: @uaddr1 expected value (or %NULL) | ||
1141 | * @requeue_pi: if we are attempting to requeue from a non-pi futex to a | ||
1139 | * pi futex (pi to pi requeue is not supported) | 1142 | * pi futex (pi to pi requeue is not supported) |
1140 | * | 1143 | * |
1141 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire | 1144 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
@@ -1360,6 +1363,7 @@ out: | |||
1360 | 1363 | ||
1361 | /* The key must be already stored in q->key. */ | 1364 | /* The key must be already stored in q->key. */ |
1362 | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | 1365 | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) |
1366 | __acquires(&hb->lock) | ||
1363 | { | 1367 | { |
1364 | struct futex_hash_bucket *hb; | 1368 | struct futex_hash_bucket *hb; |
1365 | 1369 | ||
@@ -1372,6 +1376,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
1372 | 1376 | ||
1373 | static inline void | 1377 | static inline void |
1374 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) | 1378 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) |
1379 | __releases(&hb->lock) | ||
1375 | { | 1380 | { |
1376 | spin_unlock(&hb->lock); | 1381 | spin_unlock(&hb->lock); |
1377 | } | 1382 | } |
@@ -1389,6 +1394,7 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) | |||
1389 | * an example). | 1394 | * an example). |
1390 | */ | 1395 | */ |
1391 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | 1396 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
1397 | __releases(&hb->lock) | ||
1392 | { | 1398 | { |
1393 | int prio; | 1399 | int prio; |
1394 | 1400 | ||
@@ -1469,6 +1475,7 @@ retry: | |||
1469 | * and dropped here. | 1475 | * and dropped here. |
1470 | */ | 1476 | */ |
1471 | static void unqueue_me_pi(struct futex_q *q) | 1477 | static void unqueue_me_pi(struct futex_q *q) |
1478 | __releases(q->lock_ptr) | ||
1472 | { | 1479 | { |
1473 | WARN_ON(plist_node_empty(&q->list)); | 1480 | WARN_ON(plist_node_empty(&q->list)); |
1474 | plist_del(&q->list, &q->list.plist); | 1481 | plist_del(&q->list, &q->list.plist); |
@@ -1841,7 +1848,7 @@ retry: | |||
1841 | 1848 | ||
1842 | restart = ¤t_thread_info()->restart_block; | 1849 | restart = ¤t_thread_info()->restart_block; |
1843 | restart->fn = futex_wait_restart; | 1850 | restart->fn = futex_wait_restart; |
1844 | restart->futex.uaddr = (u32 *)uaddr; | 1851 | restart->futex.uaddr = uaddr; |
1845 | restart->futex.val = val; | 1852 | restart->futex.val = val; |
1846 | restart->futex.time = abs_time->tv64; | 1853 | restart->futex.time = abs_time->tv64; |
1847 | restart->futex.bitset = bitset; | 1854 | restart->futex.bitset = bitset; |
@@ -1865,7 +1872,7 @@ out: | |||
1865 | 1872 | ||
1866 | static long futex_wait_restart(struct restart_block *restart) | 1873 | static long futex_wait_restart(struct restart_block *restart) |
1867 | { | 1874 | { |
1868 | u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; | 1875 | u32 __user *uaddr = restart->futex.uaddr; |
1869 | int fshared = 0; | 1876 | int fshared = 0; |
1870 | ktime_t t, *tp = NULL; | 1877 | ktime_t t, *tp = NULL; |
1871 | 1878 | ||
@@ -2459,7 +2466,7 @@ retry: | |||
2459 | */ | 2466 | */ |
2460 | static inline int fetch_robust_entry(struct robust_list __user **entry, | 2467 | static inline int fetch_robust_entry(struct robust_list __user **entry, |
2461 | struct robust_list __user * __user *head, | 2468 | struct robust_list __user * __user *head, |
2462 | int *pi) | 2469 | unsigned int *pi) |
2463 | { | 2470 | { |
2464 | unsigned long uentry; | 2471 | unsigned long uentry; |
2465 | 2472 | ||
@@ -2648,7 +2655,7 @@ static int __init futex_init(void) | |||
2648 | * of the complex code paths. Also we want to prevent | 2655 | * of the complex code paths. Also we want to prevent |
2649 | * registration of robust lists in that case. NULL is | 2656 | * registration of robust lists in that case. NULL is |
2650 | * guaranteed to fault and we get -EFAULT on functional | 2657 | * guaranteed to fault and we get -EFAULT on functional |
2651 | * implementation, the non functional ones will return | 2658 | * implementation, the non-functional ones will return |
2652 | * -ENOSYS. | 2659 | * -ENOSYS. |
2653 | */ | 2660 | */ |
2654 | curval = cmpxchg_futex_value_locked(NULL, 0, 0); | 2661 | curval = cmpxchg_futex_value_locked(NULL, 0, 0); |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d49afb2395e5..06da4dfc339b 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -19,7 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | static inline int | 20 | static inline int |
21 | fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | 21 | fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
22 | compat_uptr_t __user *head, int *pi) | 22 | compat_uptr_t __user *head, unsigned int *pi) |
23 | { | 23 | { |
24 | if (get_user(*uentry, head)) | 24 | if (get_user(*uentry, head)) |
25 | return -EFAULT; | 25 | return -EFAULT; |