diff options
Diffstat (limited to 'kernel/futex.c')
| -rw-r--r-- | kernel/futex.c | 138 |
1 files changed, 66 insertions, 72 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 45490bec5831..5c3f45d07c53 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -121,6 +121,24 @@ static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; | |||
| 121 | static struct vfsmount *futex_mnt; | 121 | static struct vfsmount *futex_mnt; |
| 122 | 122 | ||
| 123 | /* | 123 | /* |
| 124 | * Take mm->mmap_sem, when futex is shared | ||
| 125 | */ | ||
| 126 | static inline void futex_lock_mm(struct rw_semaphore *fshared) | ||
| 127 | { | ||
| 128 | if (fshared) | ||
| 129 | down_read(fshared); | ||
| 130 | } | ||
| 131 | |||
| 132 | /* | ||
| 133 | * Release mm->mmap_sem, when the futex is shared | ||
| 134 | */ | ||
| 135 | static inline void futex_unlock_mm(struct rw_semaphore *fshared) | ||
| 136 | { | ||
| 137 | if (fshared) | ||
| 138 | up_read(fshared); | ||
| 139 | } | ||
| 140 | |||
| 141 | /* | ||
| 124 | * We hash on the keys returned from get_futex_key (see below). | 142 | * We hash on the keys returned from get_futex_key (see below). |
| 125 | */ | 143 | */ |
| 126 | static struct futex_hash_bucket *hash_futex(union futex_key *key) | 144 | static struct futex_hash_bucket *hash_futex(union futex_key *key) |
| @@ -287,7 +305,18 @@ void drop_futex_key_refs(union futex_key *key) | |||
| 287 | } | 305 | } |
| 288 | EXPORT_SYMBOL_GPL(drop_futex_key_refs); | 306 | EXPORT_SYMBOL_GPL(drop_futex_key_refs); |
| 289 | 307 | ||
| 290 | static inline int get_futex_value_locked(u32 *dest, u32 __user *from) | 308 | static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) |
| 309 | { | ||
| 310 | u32 curval; | ||
| 311 | |||
| 312 | pagefault_disable(); | ||
| 313 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | ||
| 314 | pagefault_enable(); | ||
| 315 | |||
| 316 | return curval; | ||
| 317 | } | ||
| 318 | |||
| 319 | static int get_futex_value_locked(u32 *dest, u32 __user *from) | ||
| 291 | { | 320 | { |
| 292 | int ret; | 321 | int ret; |
| 293 | 322 | ||
| @@ -620,9 +649,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
| 620 | 649 | ||
| 621 | newval = FUTEX_WAITERS | new_owner->pid; | 650 | newval = FUTEX_WAITERS | new_owner->pid; |
| 622 | 651 | ||
| 623 | pagefault_disable(); | 652 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); |
| 624 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | ||
| 625 | pagefault_enable(); | ||
| 626 | 653 | ||
| 627 | if (curval == -EFAULT) | 654 | if (curval == -EFAULT) |
| 628 | ret = -EFAULT; | 655 | ret = -EFAULT; |
| @@ -659,9 +686,7 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) | |||
| 659 | * There is no waiter, so we unlock the futex. The owner died | 686 | * There is no waiter, so we unlock the futex. The owner died |
| 660 | * bit has not to be preserved here. We are the owner: | 687 | * bit has not to be preserved here. We are the owner: |
| 661 | */ | 688 | */ |
| 662 | pagefault_disable(); | 689 | oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); |
| 663 | oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0); | ||
| 664 | pagefault_enable(); | ||
| 665 | 690 | ||
| 666 | if (oldval == -EFAULT) | 691 | if (oldval == -EFAULT) |
| 667 | return oldval; | 692 | return oldval; |
| @@ -700,8 +725,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 700 | union futex_key key; | 725 | union futex_key key; |
| 701 | int ret; | 726 | int ret; |
| 702 | 727 | ||
| 703 | if (fshared) | 728 | futex_lock_mm(fshared); |
| 704 | down_read(fshared); | ||
| 705 | 729 | ||
| 706 | ret = get_futex_key(uaddr, fshared, &key); | 730 | ret = get_futex_key(uaddr, fshared, &key); |
| 707 | if (unlikely(ret != 0)) | 731 | if (unlikely(ret != 0)) |
| @@ -725,8 +749,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 725 | 749 | ||
| 726 | spin_unlock(&hb->lock); | 750 | spin_unlock(&hb->lock); |
| 727 | out: | 751 | out: |
| 728 | if (fshared) | 752 | futex_unlock_mm(fshared); |
| 729 | up_read(fshared); | ||
| 730 | return ret; | 753 | return ret; |
| 731 | } | 754 | } |
| 732 | 755 | ||
| @@ -746,8 +769,7 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, | |||
| 746 | int ret, op_ret, attempt = 0; | 769 | int ret, op_ret, attempt = 0; |
| 747 | 770 | ||
| 748 | retryfull: | 771 | retryfull: |
| 749 | if (fshared) | 772 | futex_lock_mm(fshared); |
| 750 | down_read(fshared); | ||
| 751 | 773 | ||
| 752 | ret = get_futex_key(uaddr1, fshared, &key1); | 774 | ret = get_futex_key(uaddr1, fshared, &key1); |
| 753 | if (unlikely(ret != 0)) | 775 | if (unlikely(ret != 0)) |
| @@ -793,7 +815,7 @@ retry: | |||
| 793 | */ | 815 | */ |
| 794 | if (attempt++) { | 816 | if (attempt++) { |
| 795 | ret = futex_handle_fault((unsigned long)uaddr2, | 817 | ret = futex_handle_fault((unsigned long)uaddr2, |
| 796 | fshared, attempt); | 818 | fshared, attempt); |
| 797 | if (ret) | 819 | if (ret) |
| 798 | goto out; | 820 | goto out; |
| 799 | goto retry; | 821 | goto retry; |
| @@ -803,8 +825,7 @@ retry: | |||
| 803 | * If we would have faulted, release mmap_sem, | 825 | * If we would have faulted, release mmap_sem, |
| 804 | * fault it in and start all over again. | 826 | * fault it in and start all over again. |
| 805 | */ | 827 | */ |
| 806 | if (fshared) | 828 | futex_unlock_mm(fshared); |
| 807 | up_read(fshared); | ||
| 808 | 829 | ||
| 809 | ret = get_user(dummy, uaddr2); | 830 | ret = get_user(dummy, uaddr2); |
| 810 | if (ret) | 831 | if (ret) |
| @@ -841,8 +862,8 @@ retry: | |||
| 841 | if (hb1 != hb2) | 862 | if (hb1 != hb2) |
| 842 | spin_unlock(&hb2->lock); | 863 | spin_unlock(&hb2->lock); |
| 843 | out: | 864 | out: |
| 844 | if (fshared) | 865 | futex_unlock_mm(fshared); |
| 845 | up_read(fshared); | 866 | |
| 846 | return ret; | 867 | return ret; |
| 847 | } | 868 | } |
| 848 | 869 | ||
| @@ -861,8 +882,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, | |||
| 861 | int ret, drop_count = 0; | 882 | int ret, drop_count = 0; |
| 862 | 883 | ||
| 863 | retry: | 884 | retry: |
| 864 | if (fshared) | 885 | futex_lock_mm(fshared); |
| 865 | down_read(fshared); | ||
| 866 | 886 | ||
| 867 | ret = get_futex_key(uaddr1, fshared, &key1); | 887 | ret = get_futex_key(uaddr1, fshared, &key1); |
| 868 | if (unlikely(ret != 0)) | 888 | if (unlikely(ret != 0)) |
| @@ -890,8 +910,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, | |||
| 890 | * If we would have faulted, release mmap_sem, fault | 910 | * If we would have faulted, release mmap_sem, fault |
| 891 | * it in and start all over again. | 911 | * it in and start all over again. |
| 892 | */ | 912 | */ |
| 893 | if (fshared) | 913 | futex_unlock_mm(fshared); |
| 894 | up_read(fshared); | ||
| 895 | 914 | ||
| 896 | ret = get_user(curval, uaddr1); | 915 | ret = get_user(curval, uaddr1); |
| 897 | 916 | ||
| @@ -944,8 +963,7 @@ out_unlock: | |||
| 944 | drop_futex_key_refs(&key1); | 963 | drop_futex_key_refs(&key1); |
| 945 | 964 | ||
| 946 | out: | 965 | out: |
| 947 | if (fshared) | 966 | futex_unlock_mm(fshared); |
| 948 | up_read(fshared); | ||
| 949 | return ret; | 967 | return ret; |
| 950 | } | 968 | } |
| 951 | 969 | ||
| @@ -1113,10 +1131,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | |||
| 1113 | while (!ret) { | 1131 | while (!ret) { |
| 1114 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | 1132 | newval = (uval & FUTEX_OWNER_DIED) | newtid; |
| 1115 | 1133 | ||
| 1116 | pagefault_disable(); | 1134 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); |
| 1117 | curval = futex_atomic_cmpxchg_inatomic(uaddr, | ||
| 1118 | uval, newval); | ||
| 1119 | pagefault_enable(); | ||
| 1120 | 1135 | ||
| 1121 | if (curval == -EFAULT) | 1136 | if (curval == -EFAULT) |
| 1122 | ret = -EFAULT; | 1137 | ret = -EFAULT; |
| @@ -1134,6 +1149,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | |||
| 1134 | #define ARG3_SHARED 1 | 1149 | #define ARG3_SHARED 1 |
| 1135 | 1150 | ||
| 1136 | static long futex_wait_restart(struct restart_block *restart); | 1151 | static long futex_wait_restart(struct restart_block *restart); |
| 1152 | |||
| 1137 | static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | 1153 | static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, |
| 1138 | u32 val, ktime_t *abs_time) | 1154 | u32 val, ktime_t *abs_time) |
| 1139 | { | 1155 | { |
| @@ -1148,8 +1164,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1148 | 1164 | ||
| 1149 | q.pi_state = NULL; | 1165 | q.pi_state = NULL; |
| 1150 | retry: | 1166 | retry: |
| 1151 | if (fshared) | 1167 | futex_lock_mm(fshared); |
| 1152 | down_read(fshared); | ||
| 1153 | 1168 | ||
| 1154 | ret = get_futex_key(uaddr, fshared, &q.key); | 1169 | ret = get_futex_key(uaddr, fshared, &q.key); |
| 1155 | if (unlikely(ret != 0)) | 1170 | if (unlikely(ret != 0)) |
| @@ -1186,8 +1201,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1186 | * If we would have faulted, release mmap_sem, fault it in and | 1201 | * If we would have faulted, release mmap_sem, fault it in and |
| 1187 | * start all over again. | 1202 | * start all over again. |
| 1188 | */ | 1203 | */ |
| 1189 | if (fshared) | 1204 | futex_unlock_mm(fshared); |
| 1190 | up_read(fshared); | ||
| 1191 | 1205 | ||
| 1192 | ret = get_user(uval, uaddr); | 1206 | ret = get_user(uval, uaddr); |
| 1193 | 1207 | ||
| @@ -1206,8 +1220,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1206 | * Now the futex is queued and we have checked the data, we | 1220 | * Now the futex is queued and we have checked the data, we |
| 1207 | * don't want to hold mmap_sem while we sleep. | 1221 | * don't want to hold mmap_sem while we sleep. |
| 1208 | */ | 1222 | */ |
| 1209 | if (fshared) | 1223 | futex_unlock_mm(fshared); |
| 1210 | up_read(fshared); | ||
| 1211 | 1224 | ||
| 1212 | /* | 1225 | /* |
| 1213 | * There might have been scheduling since the queue_me(), as we | 1226 | * There might have been scheduling since the queue_me(), as we |
| @@ -1285,8 +1298,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1285 | queue_unlock(&q, hb); | 1298 | queue_unlock(&q, hb); |
| 1286 | 1299 | ||
| 1287 | out_release_sem: | 1300 | out_release_sem: |
| 1288 | if (fshared) | 1301 | futex_unlock_mm(fshared); |
| 1289 | up_read(fshared); | ||
| 1290 | return ret; | 1302 | return ret; |
| 1291 | } | 1303 | } |
| 1292 | 1304 | ||
| @@ -1333,8 +1345,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1333 | 1345 | ||
| 1334 | q.pi_state = NULL; | 1346 | q.pi_state = NULL; |
| 1335 | retry: | 1347 | retry: |
| 1336 | if (fshared) | 1348 | futex_lock_mm(fshared); |
| 1337 | down_read(fshared); | ||
| 1338 | 1349 | ||
| 1339 | ret = get_futex_key(uaddr, fshared, &q.key); | 1350 | ret = get_futex_key(uaddr, fshared, &q.key); |
| 1340 | if (unlikely(ret != 0)) | 1351 | if (unlikely(ret != 0)) |
| @@ -1353,9 +1364,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1353 | */ | 1364 | */ |
| 1354 | newval = current->pid; | 1365 | newval = current->pid; |
| 1355 | 1366 | ||
| 1356 | pagefault_disable(); | 1367 | curval = cmpxchg_futex_value_locked(uaddr, 0, newval); |
| 1357 | curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval); | ||
| 1358 | pagefault_enable(); | ||
| 1359 | 1368 | ||
| 1360 | if (unlikely(curval == -EFAULT)) | 1369 | if (unlikely(curval == -EFAULT)) |
| 1361 | goto uaddr_faulted; | 1370 | goto uaddr_faulted; |
| @@ -1398,9 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1398 | lock_taken = 1; | 1407 | lock_taken = 1; |
| 1399 | } | 1408 | } |
| 1400 | 1409 | ||
| 1401 | pagefault_disable(); | 1410 | curval = cmpxchg_futex_value_locked(uaddr, uval, newval); |
| 1402 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | ||
| 1403 | pagefault_enable(); | ||
| 1404 | 1411 | ||
| 1405 | if (unlikely(curval == -EFAULT)) | 1412 | if (unlikely(curval == -EFAULT)) |
| 1406 | goto uaddr_faulted; | 1413 | goto uaddr_faulted; |
| @@ -1428,8 +1435,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1428 | * exit to complete. | 1435 | * exit to complete. |
| 1429 | */ | 1436 | */ |
| 1430 | queue_unlock(&q, hb); | 1437 | queue_unlock(&q, hb); |
| 1431 | if (fshared) | 1438 | futex_unlock_mm(fshared); |
| 1432 | up_read(fshared); | ||
| 1433 | cond_resched(); | 1439 | cond_resched(); |
| 1434 | goto retry; | 1440 | goto retry; |
| 1435 | 1441 | ||
| @@ -1465,8 +1471,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1465 | * Now the futex is queued and we have checked the data, we | 1471 | * Now the futex is queued and we have checked the data, we |
| 1466 | * don't want to hold mmap_sem while we sleep. | 1472 | * don't want to hold mmap_sem while we sleep. |
| 1467 | */ | 1473 | */ |
| 1468 | if (fshared) | 1474 | futex_unlock_mm(fshared); |
| 1469 | up_read(fshared); | ||
| 1470 | 1475 | ||
| 1471 | WARN_ON(!q.pi_state); | 1476 | WARN_ON(!q.pi_state); |
| 1472 | /* | 1477 | /* |
| @@ -1480,8 +1485,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1480 | ret = ret ? 0 : -EWOULDBLOCK; | 1485 | ret = ret ? 0 : -EWOULDBLOCK; |
| 1481 | } | 1486 | } |
| 1482 | 1487 | ||
| 1483 | if (fshared) | 1488 | futex_lock_mm(fshared); |
| 1484 | down_read(fshared); | ||
| 1485 | spin_lock(q.lock_ptr); | 1489 | spin_lock(q.lock_ptr); |
| 1486 | 1490 | ||
| 1487 | if (!ret) { | 1491 | if (!ret) { |
| @@ -1518,8 +1522,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1518 | 1522 | ||
| 1519 | /* Unqueue and drop the lock */ | 1523 | /* Unqueue and drop the lock */ |
| 1520 | unqueue_me_pi(&q); | 1524 | unqueue_me_pi(&q); |
| 1521 | if (fshared) | 1525 | futex_unlock_mm(fshared); |
| 1522 | up_read(fshared); | ||
| 1523 | 1526 | ||
| 1524 | return ret != -EINTR ? ret : -ERESTARTNOINTR; | 1527 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
| 1525 | 1528 | ||
| @@ -1527,8 +1530,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1527 | queue_unlock(&q, hb); | 1530 | queue_unlock(&q, hb); |
| 1528 | 1531 | ||
| 1529 | out_release_sem: | 1532 | out_release_sem: |
| 1530 | if (fshared) | 1533 | futex_unlock_mm(fshared); |
| 1531 | up_read(fshared); | ||
| 1532 | return ret; | 1534 | return ret; |
| 1533 | 1535 | ||
| 1534 | uaddr_faulted: | 1536 | uaddr_faulted: |
| @@ -1550,8 +1552,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, | |||
| 1550 | goto retry_unlocked; | 1552 | goto retry_unlocked; |
| 1551 | } | 1553 | } |
| 1552 | 1554 | ||
| 1553 | if (fshared) | 1555 | futex_unlock_mm(fshared); |
| 1554 | up_read(fshared); | ||
| 1555 | 1556 | ||
| 1556 | ret = get_user(uval, uaddr); | 1557 | ret = get_user(uval, uaddr); |
| 1557 | if (!ret && (uval != -EFAULT)) | 1558 | if (!ret && (uval != -EFAULT)) |
| @@ -1585,8 +1586,7 @@ retry: | |||
| 1585 | /* | 1586 | /* |
| 1586 | * First take all the futex related locks: | 1587 | * First take all the futex related locks: |
| 1587 | */ | 1588 | */ |
| 1588 | if (fshared) | 1589 | futex_lock_mm(fshared); |
| 1589 | down_read(fshared); | ||
| 1590 | 1590 | ||
| 1591 | ret = get_futex_key(uaddr, fshared, &key); | 1591 | ret = get_futex_key(uaddr, fshared, &key); |
| 1592 | if (unlikely(ret != 0)) | 1592 | if (unlikely(ret != 0)) |
| @@ -1601,11 +1601,9 @@ retry_unlocked: | |||
| 1601 | * again. If it succeeds then we can return without waking | 1601 | * again. If it succeeds then we can return without waking |
| 1602 | * anyone else up: | 1602 | * anyone else up: |
| 1603 | */ | 1603 | */ |
| 1604 | if (!(uval & FUTEX_OWNER_DIED)) { | 1604 | if (!(uval & FUTEX_OWNER_DIED)) |
| 1605 | pagefault_disable(); | 1605 | uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0); |
| 1606 | uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); | 1606 | |
| 1607 | pagefault_enable(); | ||
| 1608 | } | ||
| 1609 | 1607 | ||
| 1610 | if (unlikely(uval == -EFAULT)) | 1608 | if (unlikely(uval == -EFAULT)) |
| 1611 | goto pi_faulted; | 1609 | goto pi_faulted; |
| @@ -1647,8 +1645,7 @@ retry_unlocked: | |||
| 1647 | out_unlock: | 1645 | out_unlock: |
| 1648 | spin_unlock(&hb->lock); | 1646 | spin_unlock(&hb->lock); |
| 1649 | out: | 1647 | out: |
| 1650 | if (fshared) | 1648 | futex_unlock_mm(fshared); |
| 1651 | up_read(fshared); | ||
| 1652 | 1649 | ||
| 1653 | return ret; | 1650 | return ret; |
| 1654 | 1651 | ||
| @@ -1671,8 +1668,7 @@ pi_faulted: | |||
| 1671 | goto retry_unlocked; | 1668 | goto retry_unlocked; |
| 1672 | } | 1669 | } |
| 1673 | 1670 | ||
| 1674 | if (fshared) | 1671 | futex_unlock_mm(fshared); |
| 1675 | up_read(fshared); | ||
| 1676 | 1672 | ||
| 1677 | ret = get_user(uval, uaddr); | 1673 | ret = get_user(uval, uaddr); |
| 1678 | if (!ret && (uval != -EFAULT)) | 1674 | if (!ret && (uval != -EFAULT)) |
| @@ -1729,8 +1725,8 @@ static int futex_fd(u32 __user *uaddr, int signal) | |||
| 1729 | 1725 | ||
| 1730 | if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { | 1726 | if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { |
| 1731 | printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " | 1727 | printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " |
| 1732 | "will be removed from the kernel in June 2007\n", | 1728 | "will be removed from the kernel in June 2007\n", |
| 1733 | current->comm); | 1729 | current->comm); |
| 1734 | } | 1730 | } |
| 1735 | 1731 | ||
| 1736 | ret = -EINVAL; | 1732 | ret = -EINVAL; |
| @@ -1908,10 +1904,8 @@ retry: | |||
| 1908 | * Wake robust non-PI futexes here. The wakeup of | 1904 | * Wake robust non-PI futexes here. The wakeup of |
| 1909 | * PI futexes happens in exit_pi_state(): | 1905 | * PI futexes happens in exit_pi_state(): |
| 1910 | */ | 1906 | */ |
| 1911 | if (!pi) { | 1907 | if (!pi && (uval & FUTEX_WAITERS)) |
| 1912 | if (uval & FUTEX_WAITERS) | ||
| 1913 | futex_wake(uaddr, &curr->mm->mmap_sem, 1); | 1908 | futex_wake(uaddr, &curr->mm->mmap_sem, 1); |
| 1914 | } | ||
| 1915 | } | 1909 | } |
| 1916 | return 0; | 1910 | return 0; |
| 1917 | } | 1911 | } |
