aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-07 02:19:51 -0400
commita1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch)
tree0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/futex.c
parent75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff)
parentd28daf923ac5e4a0d7cecebae56f3e339189366b (diff)
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts: arch/Kconfig kernel/trace/trace.h Merge reason: resolve the conflicts, plus adopt to the new ring-buffer APIs. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c74
1 files changed, 47 insertions, 27 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 80b5ce716596..e18cfbdc7190 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -247,6 +247,7 @@ again:
247 if (err < 0) 247 if (err < 0)
248 return err; 248 return err;
249 249
250 page = compound_head(page);
250 lock_page(page); 251 lock_page(page);
251 if (!page->mapping) { 252 if (!page->mapping) {
252 unlock_page(page); 253 unlock_page(page);
@@ -284,6 +285,25 @@ void put_futex_key(int fshared, union futex_key *key)
284 drop_futex_key_refs(key); 285 drop_futex_key_refs(key);
285} 286}
286 287
288/*
289 * fault_in_user_writeable - fault in user address and verify RW access
290 * @uaddr: pointer to faulting user space address
291 *
292 * Slow path to fixup the fault we just took in the atomic write
293 * access to @uaddr.
294 *
295 * We have no generic implementation of a non destructive write to the
296 * user address. We know that we faulted in the atomic pagefault
297 * disabled section so we can as well avoid the #PF overhead by
298 * calling get_user_pages() right away.
299 */
300static int fault_in_user_writeable(u32 __user *uaddr)
301{
302 int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
303 1, 1, 0, NULL, NULL);
304 return ret < 0 ? ret : 0;
305}
306
287/** 307/**
288 * futex_top_waiter() - Return the highest priority waiter on a futex 308 * futex_top_waiter() - Return the highest priority waiter on a futex
289 * @hb: the hash bucket the futex_q's reside in 309 * @hb: the hash bucket the futex_q's reside in
@@ -896,7 +916,6 @@ retry:
896retry_private: 916retry_private:
897 op_ret = futex_atomic_op_inuser(op, uaddr2); 917 op_ret = futex_atomic_op_inuser(op, uaddr2);
898 if (unlikely(op_ret < 0)) { 918 if (unlikely(op_ret < 0)) {
899 u32 dummy;
900 919
901 double_unlock_hb(hb1, hb2); 920 double_unlock_hb(hb1, hb2);
902 921
@@ -914,7 +933,7 @@ retry_private:
914 goto out_put_keys; 933 goto out_put_keys;
915 } 934 }
916 935
917 ret = get_user(dummy, uaddr2); 936 ret = fault_in_user_writeable(uaddr2);
918 if (ret) 937 if (ret)
919 goto out_put_keys; 938 goto out_put_keys;
920 939
@@ -991,15 +1010,19 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
991 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 1010 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
992 * q: the futex_q 1011 * q: the futex_q
993 * key: the key of the requeue target futex 1012 * key: the key of the requeue target futex
1013 * hb: the hash_bucket of the requeue target futex
994 * 1014 *
995 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 1015 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
996 * target futex if it is uncontended or via a lock steal. Set the futex_q key 1016 * target futex if it is uncontended or via a lock steal. Set the futex_q key
997 * to the requeue target futex so the waiter can detect the wakeup on the right 1017 * to the requeue target futex so the waiter can detect the wakeup on the right
998 * futex, but remove it from the hb and NULL the rt_waiter so it can detect 1018 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
999 * atomic lock acquisition. Must be called with the q->lock_ptr held. 1019 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1020 * to protect access to the pi_state to fixup the owner later. Must be called
1021 * with both q->lock_ptr and hb->lock held.
1000 */ 1022 */
1001static inline 1023static inline
1002void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) 1024void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1025 struct futex_hash_bucket *hb)
1003{ 1026{
1004 drop_futex_key_refs(&q->key); 1027 drop_futex_key_refs(&q->key);
1005 get_futex_key_refs(key); 1028 get_futex_key_refs(key);
@@ -1011,6 +1034,11 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
1011 WARN_ON(!q->rt_waiter); 1034 WARN_ON(!q->rt_waiter);
1012 q->rt_waiter = NULL; 1035 q->rt_waiter = NULL;
1013 1036
1037 q->lock_ptr = &hb->lock;
1038#ifdef CONFIG_DEBUG_PI_LIST
1039 q->list.plist.lock = &hb->lock;
1040#endif
1041
1014 wake_up_state(q->task, TASK_NORMAL); 1042 wake_up_state(q->task, TASK_NORMAL);
1015} 1043}
1016 1044
@@ -1069,7 +1097,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1069 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 1097 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1070 set_waiters); 1098 set_waiters);
1071 if (ret == 1) 1099 if (ret == 1)
1072 requeue_pi_wake_futex(top_waiter, key2); 1100 requeue_pi_wake_futex(top_waiter, key2, hb2);
1073 1101
1074 return ret; 1102 return ret;
1075} 1103}
@@ -1204,7 +1232,7 @@ retry_private:
1204 double_unlock_hb(hb1, hb2); 1232 double_unlock_hb(hb1, hb2);
1205 put_futex_key(fshared, &key2); 1233 put_futex_key(fshared, &key2);
1206 put_futex_key(fshared, &key1); 1234 put_futex_key(fshared, &key1);
1207 ret = get_user(curval2, uaddr2); 1235 ret = fault_in_user_writeable(uaddr2);
1208 if (!ret) 1236 if (!ret)
1209 goto retry; 1237 goto retry;
1210 goto out; 1238 goto out;
@@ -1228,8 +1256,15 @@ retry_private:
1228 if (!match_futex(&this->key, &key1)) 1256 if (!match_futex(&this->key, &key1))
1229 continue; 1257 continue;
1230 1258
1231 WARN_ON(!requeue_pi && this->rt_waiter); 1259 /*
1232 WARN_ON(requeue_pi && !this->rt_waiter); 1260 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1261 * be paired with each other and no other futex ops.
1262 */
1263 if ((requeue_pi && !this->rt_waiter) ||
1264 (!requeue_pi && this->rt_waiter)) {
1265 ret = -EINVAL;
1266 break;
1267 }
1233 1268
1234 /* 1269 /*
1235 * Wake nr_wake waiters. For requeue_pi, if we acquired the 1270 * Wake nr_wake waiters. For requeue_pi, if we acquired the
@@ -1254,7 +1289,7 @@ retry_private:
1254 this->task, 1); 1289 this->task, 1);
1255 if (ret == 1) { 1290 if (ret == 1) {
1256 /* We got the lock. */ 1291 /* We got the lock. */
1257 requeue_pi_wake_futex(this, &key2); 1292 requeue_pi_wake_futex(this, &key2, hb2);
1258 continue; 1293 continue;
1259 } else if (ret) { 1294 } else if (ret) {
1260 /* -EDEADLK */ 1295 /* -EDEADLK */
@@ -1482,7 +1517,7 @@ retry:
1482handle_fault: 1517handle_fault:
1483 spin_unlock(q->lock_ptr); 1518 spin_unlock(q->lock_ptr);
1484 1519
1485 ret = get_user(uval, uaddr); 1520 ret = fault_in_user_writeable(uaddr);
1486 1521
1487 spin_lock(q->lock_ptr); 1522 spin_lock(q->lock_ptr);
1488 1523
@@ -1807,7 +1842,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1807{ 1842{
1808 struct hrtimer_sleeper timeout, *to = NULL; 1843 struct hrtimer_sleeper timeout, *to = NULL;
1809 struct futex_hash_bucket *hb; 1844 struct futex_hash_bucket *hb;
1810 u32 uval;
1811 struct futex_q q; 1845 struct futex_q q;
1812 int res, ret; 1846 int res, ret;
1813 1847
@@ -1909,16 +1943,9 @@ out:
1909 return ret != -EINTR ? ret : -ERESTARTNOINTR; 1943 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1910 1944
1911uaddr_faulted: 1945uaddr_faulted:
1912 /*
1913 * We have to r/w *(int __user *)uaddr, and we have to modify it
1914 * atomically. Therefore, if we continue to fault after get_user()
1915 * below, we need to handle the fault ourselves, while still holding
1916 * the mmap_sem. This can occur if the uaddr is under contention as
1917 * we have to drop the mmap_sem in order to call get_user().
1918 */
1919 queue_unlock(&q, hb); 1946 queue_unlock(&q, hb);
1920 1947
1921 ret = get_user(uval, uaddr); 1948 ret = fault_in_user_writeable(uaddr);
1922 if (ret) 1949 if (ret)
1923 goto out_put_key; 1950 goto out_put_key;
1924 1951
@@ -2013,17 +2040,10 @@ out:
2013 return ret; 2040 return ret;
2014 2041
2015pi_faulted: 2042pi_faulted:
2016 /*
2017 * We have to r/w *(int __user *)uaddr, and we have to modify it
2018 * atomically. Therefore, if we continue to fault after get_user()
2019 * below, we need to handle the fault ourselves, while still holding
2020 * the mmap_sem. This can occur if the uaddr is under contention as
2021 * we have to drop the mmap_sem in order to call get_user().
2022 */
2023 spin_unlock(&hb->lock); 2043 spin_unlock(&hb->lock);
2024 put_futex_key(fshared, &key); 2044 put_futex_key(fshared, &key);
2025 2045
2026 ret = get_user(uval, uaddr); 2046 ret = fault_in_user_writeable(uaddr);
2027 if (!ret) 2047 if (!ret)
2028 goto retry; 2048 goto retry;
2029 2049