aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 80b5ce716596..0672ff88f159 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -247,6 +247,7 @@ again:
247 if (err < 0) 247 if (err < 0)
248 return err; 248 return err;
249 249
250 page = compound_head(page);
250 lock_page(page); 251 lock_page(page);
251 if (!page->mapping) { 252 if (!page->mapping) {
252 unlock_page(page); 253 unlock_page(page);
@@ -284,6 +285,25 @@ void put_futex_key(int fshared, union futex_key *key)
284 drop_futex_key_refs(key); 285 drop_futex_key_refs(key);
285} 286}
286 287
288/*
289 * fault_in_user_writeable - fault in user address and verify RW access
290 * @uaddr: pointer to faulting user space address
291 *
292 * Slow path to fixup the fault we just took in the atomic write
293 * access to @uaddr.
294 *
295 * We have no generic implementation of a non destructive write to the
296 * user address. We know that we faulted in the atomic pagefault
297 * disabled section so we can as well avoid the #PF overhead by
298 * calling get_user_pages() right away.
299 */
300static int fault_in_user_writeable(u32 __user *uaddr)
301{
302 int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
303 1, 1, 0, NULL, NULL);
304 return ret < 0 ? ret : 0;
305}
306
287/** 307/**
288 * futex_top_waiter() - Return the highest priority waiter on a futex 308 * futex_top_waiter() - Return the highest priority waiter on a futex
289 * @hb: the hash bucket the futex_q's reside in 309 * @hb: the hash bucket the futex_q's reside in
@@ -896,7 +916,6 @@ retry:
896retry_private: 916retry_private:
897 op_ret = futex_atomic_op_inuser(op, uaddr2); 917 op_ret = futex_atomic_op_inuser(op, uaddr2);
898 if (unlikely(op_ret < 0)) { 918 if (unlikely(op_ret < 0)) {
899 u32 dummy;
900 919
901 double_unlock_hb(hb1, hb2); 920 double_unlock_hb(hb1, hb2);
902 921
@@ -914,7 +933,7 @@ retry_private:
914 goto out_put_keys; 933 goto out_put_keys;
915 } 934 }
916 935
917 ret = get_user(dummy, uaddr2); 936 ret = fault_in_user_writeable(uaddr2);
918 if (ret) 937 if (ret)
919 goto out_put_keys; 938 goto out_put_keys;
920 939
@@ -1204,7 +1223,7 @@ retry_private:
1204 double_unlock_hb(hb1, hb2); 1223 double_unlock_hb(hb1, hb2);
1205 put_futex_key(fshared, &key2); 1224 put_futex_key(fshared, &key2);
1206 put_futex_key(fshared, &key1); 1225 put_futex_key(fshared, &key1);
1207 ret = get_user(curval2, uaddr2); 1226 ret = fault_in_user_writeable(uaddr2);
1208 if (!ret) 1227 if (!ret)
1209 goto retry; 1228 goto retry;
1210 goto out; 1229 goto out;
@@ -1482,7 +1501,7 @@ retry:
1482handle_fault: 1501handle_fault:
1483 spin_unlock(q->lock_ptr); 1502 spin_unlock(q->lock_ptr);
1484 1503
1485 ret = get_user(uval, uaddr); 1504 ret = fault_in_user_writeable(uaddr);
1486 1505
1487 spin_lock(q->lock_ptr); 1506 spin_lock(q->lock_ptr);
1488 1507
@@ -1807,7 +1826,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1807{ 1826{
1808 struct hrtimer_sleeper timeout, *to = NULL; 1827 struct hrtimer_sleeper timeout, *to = NULL;
1809 struct futex_hash_bucket *hb; 1828 struct futex_hash_bucket *hb;
1810 u32 uval;
1811 struct futex_q q; 1829 struct futex_q q;
1812 int res, ret; 1830 int res, ret;
1813 1831
@@ -1909,16 +1927,9 @@ out:
1909 return ret != -EINTR ? ret : -ERESTARTNOINTR; 1927 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1910 1928
1911uaddr_faulted: 1929uaddr_faulted:
1912 /*
1913 * We have to r/w *(int __user *)uaddr, and we have to modify it
1914 * atomically. Therefore, if we continue to fault after get_user()
1915 * below, we need to handle the fault ourselves, while still holding
1916 * the mmap_sem. This can occur if the uaddr is under contention as
1917 * we have to drop the mmap_sem in order to call get_user().
1918 */
1919 queue_unlock(&q, hb); 1930 queue_unlock(&q, hb);
1920 1931
1921 ret = get_user(uval, uaddr); 1932 ret = fault_in_user_writeable(uaddr);
1922 if (ret) 1933 if (ret)
1923 goto out_put_key; 1934 goto out_put_key;
1924 1935
@@ -2013,17 +2024,10 @@ out:
2013 return ret; 2024 return ret;
2014 2025
2015pi_faulted: 2026pi_faulted:
2016 /*
2017 * We have to r/w *(int __user *)uaddr, and we have to modify it
2018 * atomically. Therefore, if we continue to fault after get_user()
2019 * below, we need to handle the fault ourselves, while still holding
2020 * the mmap_sem. This can occur if the uaddr is under contention as
2021 * we have to drop the mmap_sem in order to call get_user().
2022 */
2023 spin_unlock(&hb->lock); 2027 spin_unlock(&hb->lock);
2024 put_futex_key(fshared, &key); 2028 put_futex_key(fshared, &key);
2025 2029
2026 ret = get_user(uval, uaddr); 2030 ret = fault_in_user_writeable(uaddr);
2027 if (!ret) 2031 if (!ret)
2028 goto retry; 2032 goto retry;
2029 2033