diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-09-26 13:32:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-30 06:35:36 -0400 |
commit | 61270708ecf1cda148e84fbf6e0703ee5aa81814 (patch) | |
tree | 4c5bf27f99e5371da667e8c4105d2839e664409a /kernel | |
parent | 38d47c1b7075bd7ec3881141bb3629da58f88dab (diff) |
futex: reduce mmap_sem usage
now that we rely on get_user_pages() for the shared key handling
move all the mmap_sem stuff closely around the slow paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 83 |
1 files changed, 4 insertions, 79 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index a4c39fa0a7a3..6a726684217e 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -123,24 +123,6 @@ struct futex_hash_bucket { | |||
123 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; | 123 | static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * Take mm->mmap_sem, when futex is shared | ||
127 | */ | ||
128 | static inline void futex_lock_mm(struct rw_semaphore *fshared) | ||
129 | { | ||
130 | if (fshared) | ||
131 | down_read(fshared); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Release mm->mmap_sem, when the futex is shared | ||
136 | */ | ||
137 | static inline void futex_unlock_mm(struct rw_semaphore *fshared) | ||
138 | { | ||
139 | if (fshared) | ||
140 | up_read(fshared); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * We hash on the keys returned from get_futex_key (see below). | 126 | * We hash on the keys returned from get_futex_key (see below). |
145 | */ | 127 | */ |
146 | static struct futex_hash_bucket *hash_futex(union futex_key *key) | 128 | static struct futex_hash_bucket *hash_futex(union futex_key *key) |
@@ -250,7 +232,9 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
250 | } | 232 | } |
251 | 233 | ||
252 | again: | 234 | again: |
235 | down_read(&mm->mmap_sem); | ||
253 | err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); | 236 | err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); |
237 | up_read(&mm->mmap_sem); | ||
254 | if (err < 0) | 238 | if (err < 0) |
255 | return err; | 239 | return err; |
256 | 240 | ||
@@ -327,8 +311,7 @@ static int futex_handle_fault(unsigned long address, | |||
327 | if (attempt > 2) | 311 | if (attempt > 2) |
328 | return ret; | 312 | return ret; |
329 | 313 | ||
330 | if (!fshared) | 314 | down_read(&mm->mmap_sem); |
331 | down_read(&mm->mmap_sem); | ||
332 | vma = find_vma(mm, address); | 315 | vma = find_vma(mm, address); |
333 | if (vma && address >= vma->vm_start && | 316 | if (vma && address >= vma->vm_start && |
334 | (vma->vm_flags & VM_WRITE)) { | 317 | (vma->vm_flags & VM_WRITE)) { |
@@ -348,8 +331,7 @@ static int futex_handle_fault(unsigned long address, | |||
348 | current->min_flt++; | 331 | current->min_flt++; |
349 | } | 332 | } |
350 | } | 333 | } |
351 | if (!fshared) | 334 | up_read(&mm->mmap_sem); |
352 | up_read(&mm->mmap_sem); | ||
353 | return ret; | 335 | return ret; |
354 | } | 336 | } |
355 | 337 | ||
@@ -719,8 +701,6 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
719 | if (!bitset) | 701 | if (!bitset) |
720 | return -EINVAL; | 702 | return -EINVAL; |
721 | 703 | ||
722 | futex_lock_mm(fshared); | ||
723 | |||
724 | ret = get_futex_key(uaddr, fshared, &key); | 704 | ret = get_futex_key(uaddr, fshared, &key); |
725 | if (unlikely(ret != 0)) | 705 | if (unlikely(ret != 0)) |
726 | goto out; | 706 | goto out; |
@@ -749,7 +729,6 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
749 | spin_unlock(&hb->lock); | 729 | spin_unlock(&hb->lock); |
750 | out: | 730 | out: |
751 | put_futex_key(fshared, &key); | 731 | put_futex_key(fshared, &key); |
752 | futex_unlock_mm(fshared); | ||
753 | return ret; | 732 | return ret; |
754 | } | 733 | } |
755 | 734 | ||
@@ -769,8 +748,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, | |||
769 | int ret, op_ret, attempt = 0; | 748 | int ret, op_ret, attempt = 0; |
770 | 749 | ||
771 | retryfull: | 750 | retryfull: |
772 | futex_lock_mm(fshared); | ||
773 | |||
774 | ret = get_futex_key(uaddr1, fshared, &key1); | 751 | ret = get_futex_key(uaddr1, fshared, &key1); |
775 | if (unlikely(ret != 0)) | 752 | if (unlikely(ret != 0)) |
776 | goto out; | 753 | goto out; |
@@ -821,12 +798,6 @@ retry: | |||
821 | goto retry; | 798 | goto retry; |
822 | } | 799 | } |
823 | 800 | ||
824 | /* | ||
825 | * If we would have faulted, release mmap_sem, | ||
826 | * fault it in and start all over again. | ||
827 | */ | ||
828 | futex_unlock_mm(fshared); | ||
829 | |||
830 | ret = get_user(dummy, uaddr2); | 801 | ret = get_user(dummy, uaddr2); |
831 | if (ret) | 802 | if (ret) |
832 | return ret; | 803 | return ret; |
@@ -864,7 +835,6 @@ retry: | |||
864 | out: | 835 | out: |
865 | put_futex_key(fshared, &key2); | 836 | put_futex_key(fshared, &key2); |
866 | put_futex_key(fshared, &key1); | 837 | put_futex_key(fshared, &key1); |
867 | futex_unlock_mm(fshared); | ||
868 | 838 | ||
869 | return ret; | 839 | return ret; |
870 | } | 840 | } |
@@ -884,8 +854,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, | |||
884 | int ret, drop_count = 0; | 854 | int ret, drop_count = 0; |
885 | 855 | ||
886 | retry: | 856 | retry: |
887 | futex_lock_mm(fshared); | ||
888 | |||
889 | ret = get_futex_key(uaddr1, fshared, &key1); | 857 | ret = get_futex_key(uaddr1, fshared, &key1); |
890 | if (unlikely(ret != 0)) | 858 | if (unlikely(ret != 0)) |
891 | goto out; | 859 | goto out; |
@@ -908,12 +876,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, | |||
908 | if (hb1 != hb2) | 876 | if (hb1 != hb2) |
909 | spin_unlock(&hb2->lock); | 877 | spin_unlock(&hb2->lock); |
910 | 878 | ||
911 | /* | ||
912 | * If we would have faulted, release mmap_sem, fault | ||
913 | * it in and start all over again. | ||
914 | */ | ||
915 | futex_unlock_mm(fshared); | ||
916 | |||
917 | ret = get_user(curval, uaddr1); | 879 | ret = get_user(curval, uaddr1); |
918 | 880 | ||
919 | if (!ret) | 881 | if (!ret) |
@@ -967,7 +929,6 @@ out_unlock: | |||
967 | out: | 929 | out: |
968 | put_futex_key(fshared, &key2); | 930 | put_futex_key(fshared, &key2); |
969 | put_futex_key(fshared, &key1); | 931 | put_futex_key(fshared, &key1); |
970 | futex_unlock_mm(fshared); | ||
971 | return ret; | 932 | return ret; |
972 | } | 933 | } |
973 | 934 | ||
@@ -1211,8 +1172,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1211 | q.pi_state = NULL; | 1172 | q.pi_state = NULL; |
1212 | q.bitset = bitset; | 1173 | q.bitset = bitset; |
1213 | retry: | 1174 | retry: |
1214 | futex_lock_mm(fshared); | ||
1215 | |||
1216 | q.key = FUTEX_KEY_INIT; | 1175 | q.key = FUTEX_KEY_INIT; |
1217 | ret = get_futex_key(uaddr, fshared, &q.key); | 1176 | ret = get_futex_key(uaddr, fshared, &q.key); |
1218 | if (unlikely(ret != 0)) | 1177 | if (unlikely(ret != 0)) |
@@ -1245,12 +1204,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1245 | if (unlikely(ret)) { | 1204 | if (unlikely(ret)) { |
1246 | queue_unlock(&q, hb); | 1205 | queue_unlock(&q, hb); |
1247 | 1206 | ||
1248 | /* | ||
1249 | * If we would have faulted, release mmap_sem, fault it in and | ||
1250 | * start all over again. | ||
1251 | */ | ||
1252 | futex_unlock_mm(fshared); | ||
1253 | |||
1254 | ret = get_user(uval, uaddr); | 1207 | ret = get_user(uval, uaddr); |
1255 | 1208 | ||
1256 | if (!ret) | 1209 | if (!ret) |
@@ -1265,12 +1218,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1265 | queue_me(&q, hb); | 1218 | queue_me(&q, hb); |
1266 | 1219 | ||
1267 | /* | 1220 | /* |
1268 | * Now the futex is queued and we have checked the data, we | ||
1269 | * don't want to hold mmap_sem while we sleep. | ||
1270 | */ | ||
1271 | futex_unlock_mm(fshared); | ||
1272 | |||
1273 | /* | ||
1274 | * There might have been scheduling since the queue_me(), as we | 1221 | * There might have been scheduling since the queue_me(), as we |
1275 | * cannot hold a spinlock across the get_user() in case it | 1222 | * cannot hold a spinlock across the get_user() in case it |
1276 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | 1223 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when |
@@ -1355,7 +1302,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1355 | 1302 | ||
1356 | out_release_sem: | 1303 | out_release_sem: |
1357 | put_futex_key(fshared, &q.key); | 1304 | put_futex_key(fshared, &q.key); |
1358 | futex_unlock_mm(fshared); | ||
1359 | return ret; | 1305 | return ret; |
1360 | } | 1306 | } |
1361 | 1307 | ||
@@ -1404,8 +1350,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1404 | 1350 | ||
1405 | q.pi_state = NULL; | 1351 | q.pi_state = NULL; |
1406 | retry: | 1352 | retry: |
1407 | futex_lock_mm(fshared); | ||
1408 | |||
1409 | q.key = FUTEX_KEY_INIT; | 1353 | q.key = FUTEX_KEY_INIT; |
1410 | ret = get_futex_key(uaddr, fshared, &q.key); | 1354 | ret = get_futex_key(uaddr, fshared, &q.key); |
1411 | if (unlikely(ret != 0)) | 1355 | if (unlikely(ret != 0)) |
@@ -1495,7 +1439,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1495 | * exit to complete. | 1439 | * exit to complete. |
1496 | */ | 1440 | */ |
1497 | queue_unlock(&q, hb); | 1441 | queue_unlock(&q, hb); |
1498 | futex_unlock_mm(fshared); | ||
1499 | cond_resched(); | 1442 | cond_resched(); |
1500 | goto retry; | 1443 | goto retry; |
1501 | 1444 | ||
@@ -1527,12 +1470,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1527 | */ | 1470 | */ |
1528 | queue_me(&q, hb); | 1471 | queue_me(&q, hb); |
1529 | 1472 | ||
1530 | /* | ||
1531 | * Now the futex is queued and we have checked the data, we | ||
1532 | * don't want to hold mmap_sem while we sleep. | ||
1533 | */ | ||
1534 | futex_unlock_mm(fshared); | ||
1535 | |||
1536 | WARN_ON(!q.pi_state); | 1473 | WARN_ON(!q.pi_state); |
1537 | /* | 1474 | /* |
1538 | * Block on the PI mutex: | 1475 | * Block on the PI mutex: |
@@ -1545,7 +1482,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1545 | ret = ret ? 0 : -EWOULDBLOCK; | 1482 | ret = ret ? 0 : -EWOULDBLOCK; |
1546 | } | 1483 | } |
1547 | 1484 | ||
1548 | futex_lock_mm(fshared); | ||
1549 | spin_lock(q.lock_ptr); | 1485 | spin_lock(q.lock_ptr); |
1550 | 1486 | ||
1551 | if (!ret) { | 1487 | if (!ret) { |
@@ -1611,7 +1547,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1611 | 1547 | ||
1612 | /* Unqueue and drop the lock */ | 1548 | /* Unqueue and drop the lock */ |
1613 | unqueue_me_pi(&q); | 1549 | unqueue_me_pi(&q); |
1614 | futex_unlock_mm(fshared); | ||
1615 | 1550 | ||
1616 | if (to) | 1551 | if (to) |
1617 | destroy_hrtimer_on_stack(&to->timer); | 1552 | destroy_hrtimer_on_stack(&to->timer); |
@@ -1622,7 +1557,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1622 | 1557 | ||
1623 | out_release_sem: | 1558 | out_release_sem: |
1624 | put_futex_key(fshared, &q.key); | 1559 | put_futex_key(fshared, &q.key); |
1625 | futex_unlock_mm(fshared); | ||
1626 | if (to) | 1560 | if (to) |
1627 | destroy_hrtimer_on_stack(&to->timer); | 1561 | destroy_hrtimer_on_stack(&to->timer); |
1628 | return ret; | 1562 | return ret; |
@@ -1646,8 +1580,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
1646 | goto retry_unlocked; | 1580 | goto retry_unlocked; |
1647 | } | 1581 | } |
1648 | 1582 | ||
1649 | futex_unlock_mm(fshared); | ||
1650 | |||
1651 | ret = get_user(uval, uaddr); | 1583 | ret = get_user(uval, uaddr); |
1652 | if (!ret && (uval != -EFAULT)) | 1584 | if (!ret && (uval != -EFAULT)) |
1653 | goto retry; | 1585 | goto retry; |
@@ -1679,10 +1611,6 @@ retry: | |||
1679 | */ | 1611 | */ |
1680 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 1612 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
1681 | return -EPERM; | 1613 | return -EPERM; |
1682 | /* | ||
1683 | * First take all the futex related locks: | ||
1684 | */ | ||
1685 | futex_lock_mm(fshared); | ||
1686 | 1614 | ||
1687 | ret = get_futex_key(uaddr, fshared, &key); | 1615 | ret = get_futex_key(uaddr, fshared, &key); |
1688 | if (unlikely(ret != 0)) | 1616 | if (unlikely(ret != 0)) |
@@ -1742,7 +1670,6 @@ out_unlock: | |||
1742 | spin_unlock(&hb->lock); | 1670 | spin_unlock(&hb->lock); |
1743 | out: | 1671 | out: |
1744 | put_futex_key(fshared, &key); | 1672 | put_futex_key(fshared, &key); |
1745 | futex_unlock_mm(fshared); | ||
1746 | 1673 | ||
1747 | return ret; | 1674 | return ret; |
1748 | 1675 | ||
@@ -1766,8 +1693,6 @@ pi_faulted: | |||
1766 | goto retry_unlocked; | 1693 | goto retry_unlocked; |
1767 | } | 1694 | } |
1768 | 1695 | ||
1769 | futex_unlock_mm(fshared); | ||
1770 | |||
1771 | ret = get_user(uval, uaddr); | 1696 | ret = get_user(uval, uaddr); |
1772 | if (!ret && (uval != -EFAULT)) | 1697 | if (!ret && (uval != -EFAULT)) |
1773 | goto retry; | 1698 | goto retry; |