diff options
| author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 | 
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 | 
| commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
| tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /kernel/futex.c | |
| parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
| parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) | |
Merge branch 'linus' into release
Conflicts:
	arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'kernel/futex.c')
| -rw-r--r-- | kernel/futex.c | 201 | 
1 files changed, 76 insertions, 125 deletions
| diff --git a/kernel/futex.c b/kernel/futex.c index 438701adce23..6b50a024bca2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -114,7 +114,9 @@ struct futex_q { | |||
| 114 | }; | 114 | }; | 
| 115 | 115 | ||
| 116 | /* | 116 | /* | 
| 117 | * Split the global futex_lock into every hash list lock. | 117 | * Hash buckets are shared by all the futex_keys that hash to the same | 
| 118 | * location. Each key may have multiple futex_q structures, one for each task | ||
| 119 | * waiting on a futex. | ||
| 118 | */ | 120 | */ | 
| 119 | struct futex_hash_bucket { | 121 | struct futex_hash_bucket { | 
| 120 | spinlock_t lock; | 122 | spinlock_t lock; | 
| @@ -189,8 +191,7 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 189 | /** | 191 | /** | 
| 190 | * get_futex_key - Get parameters which are the keys for a futex. | 192 | * get_futex_key - Get parameters which are the keys for a futex. | 
| 191 | * @uaddr: virtual address of the futex | 193 | * @uaddr: virtual address of the futex | 
| 192 | * @shared: NULL for a PROCESS_PRIVATE futex, | 194 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 
| 193 | * ¤t->mm->mmap_sem for a PROCESS_SHARED futex | ||
| 194 | * @key: address where result is stored. | 195 | * @key: address where result is stored. | 
| 195 | * | 196 | * | 
| 196 | * Returns a negative error code or 0 | 197 | * Returns a negative error code or 0 | 
| @@ -200,9 +201,7 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 200 | * offset_within_page). For private mappings, it's (uaddr, current->mm). | 201 | * offset_within_page). For private mappings, it's (uaddr, current->mm). | 
| 201 | * We can usually work out the index without swapping in the page. | 202 | * We can usually work out the index without swapping in the page. | 
| 202 | * | 203 | * | 
| 203 | * fshared is NULL for PROCESS_PRIVATE futexes | 204 | * lock_page() might sleep, the caller should not hold a spinlock. | 
| 204 | * For other futexes, it points to ¤t->mm->mmap_sem and | ||
| 205 | * caller must have taken the reader lock. but NOT any spinlocks. | ||
| 206 | */ | 205 | */ | 
| 207 | static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | 206 | static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | 
| 208 | { | 207 | { | 
| @@ -299,41 +298,6 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from) | |||
| 299 | return ret ? -EFAULT : 0; | 298 | return ret ? -EFAULT : 0; | 
| 300 | } | 299 | } | 
| 301 | 300 | ||
| 302 | /* | ||
| 303 | * Fault handling. | ||
| 304 | */ | ||
| 305 | static int futex_handle_fault(unsigned long address, int attempt) | ||
| 306 | { | ||
| 307 | struct vm_area_struct * vma; | ||
| 308 | struct mm_struct *mm = current->mm; | ||
| 309 | int ret = -EFAULT; | ||
| 310 | |||
| 311 | if (attempt > 2) | ||
| 312 | return ret; | ||
| 313 | |||
| 314 | down_read(&mm->mmap_sem); | ||
| 315 | vma = find_vma(mm, address); | ||
| 316 | if (vma && address >= vma->vm_start && | ||
| 317 | (vma->vm_flags & VM_WRITE)) { | ||
| 318 | int fault; | ||
| 319 | fault = handle_mm_fault(mm, vma, address, 1); | ||
| 320 | if (unlikely((fault & VM_FAULT_ERROR))) { | ||
| 321 | #if 0 | ||
| 322 | /* XXX: let's do this when we verify it is OK */ | ||
| 323 | if (ret & VM_FAULT_OOM) | ||
| 324 | ret = -ENOMEM; | ||
| 325 | #endif | ||
| 326 | } else { | ||
| 327 | ret = 0; | ||
| 328 | if (fault & VM_FAULT_MAJOR) | ||
| 329 | current->maj_flt++; | ||
| 330 | else | ||
| 331 | current->min_flt++; | ||
| 332 | } | ||
| 333 | } | ||
| 334 | up_read(&mm->mmap_sem); | ||
| 335 | return ret; | ||
| 336 | } | ||
| 337 | 301 | ||
| 338 | /* | 302 | /* | 
| 339 | * PI code: | 303 | * PI code: | 
| @@ -589,10 +553,9 @@ static void wake_futex(struct futex_q *q) | |||
| 589 | * The waiting task can free the futex_q as soon as this is written, | 553 | * The waiting task can free the futex_q as soon as this is written, | 
| 590 | * without taking any locks. This must come last. | 554 | * without taking any locks. This must come last. | 
| 591 | * | 555 | * | 
| 592 | * A memory barrier is required here to prevent the following store | 556 | * A memory barrier is required here to prevent the following store to | 
| 593 | * to lock_ptr from getting ahead of the wakeup. Clearing the lock | 557 | * lock_ptr from getting ahead of the wakeup. Clearing the lock at the | 
| 594 | * at the end of wake_up_all() does not prevent this store from | 558 | * end of wake_up() does not prevent this store from moving. | 
| 595 | * moving. | ||
| 596 | */ | 559 | */ | 
| 597 | smp_wmb(); | 560 | smp_wmb(); | 
| 598 | q->lock_ptr = NULL; | 561 | q->lock_ptr = NULL; | 
| @@ -692,9 +655,16 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) | |||
| 692 | } | 655 | } | 
| 693 | } | 656 | } | 
| 694 | 657 | ||
| 658 | static inline void | ||
| 659 | double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) | ||
| 660 | { | ||
| 661 | spin_unlock(&hb1->lock); | ||
| 662 | if (hb1 != hb2) | ||
| 663 | spin_unlock(&hb2->lock); | ||
| 664 | } | ||
| 665 | |||
| 695 | /* | 666 | /* | 
| 696 | * Wake up all waiters hashed on the physical page that is mapped | 667 | * Wake up waiters matching bitset queued on this futex (uaddr). | 
| 697 | * to this virtual address: | ||
| 698 | */ | 668 | */ | 
| 699 | static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | 669 | static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | 
| 700 | { | 670 | { | 
| @@ -750,9 +720,9 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
| 750 | struct futex_hash_bucket *hb1, *hb2; | 720 | struct futex_hash_bucket *hb1, *hb2; | 
| 751 | struct plist_head *head; | 721 | struct plist_head *head; | 
| 752 | struct futex_q *this, *next; | 722 | struct futex_q *this, *next; | 
| 753 | int ret, op_ret, attempt = 0; | 723 | int ret, op_ret; | 
| 754 | 724 | ||
| 755 | retryfull: | 725 | retry: | 
| 756 | ret = get_futex_key(uaddr1, fshared, &key1); | 726 | ret = get_futex_key(uaddr1, fshared, &key1); | 
| 757 | if (unlikely(ret != 0)) | 727 | if (unlikely(ret != 0)) | 
| 758 | goto out; | 728 | goto out; | 
| @@ -763,16 +733,13 @@ retryfull: | |||
| 763 | hb1 = hash_futex(&key1); | 733 | hb1 = hash_futex(&key1); | 
| 764 | hb2 = hash_futex(&key2); | 734 | hb2 = hash_futex(&key2); | 
| 765 | 735 | ||
| 766 | retry: | ||
| 767 | double_lock_hb(hb1, hb2); | 736 | double_lock_hb(hb1, hb2); | 
| 768 | 737 | retry_private: | |
| 769 | op_ret = futex_atomic_op_inuser(op, uaddr2); | 738 | op_ret = futex_atomic_op_inuser(op, uaddr2); | 
| 770 | if (unlikely(op_ret < 0)) { | 739 | if (unlikely(op_ret < 0)) { | 
| 771 | u32 dummy; | 740 | u32 dummy; | 
| 772 | 741 | ||
| 773 | spin_unlock(&hb1->lock); | 742 | double_unlock_hb(hb1, hb2); | 
| 774 | if (hb1 != hb2) | ||
| 775 | spin_unlock(&hb2->lock); | ||
| 776 | 743 | ||
| 777 | #ifndef CONFIG_MMU | 744 | #ifndef CONFIG_MMU | 
| 778 | /* | 745 | /* | 
| @@ -788,26 +755,16 @@ retry: | |||
| 788 | goto out_put_keys; | 755 | goto out_put_keys; | 
| 789 | } | 756 | } | 
| 790 | 757 | ||
| 791 | /* | ||
| 792 | * futex_atomic_op_inuser needs to both read and write | ||
| 793 | * *(int __user *)uaddr2, but we can't modify it | ||
| 794 | * non-atomically. Therefore, if get_user below is not | ||
| 795 | * enough, we need to handle the fault ourselves, while | ||
| 796 | * still holding the mmap_sem. | ||
| 797 | */ | ||
| 798 | if (attempt++) { | ||
| 799 | ret = futex_handle_fault((unsigned long)uaddr2, | ||
| 800 | attempt); | ||
| 801 | if (ret) | ||
| 802 | goto out_put_keys; | ||
| 803 | goto retry; | ||
| 804 | } | ||
| 805 | |||
| 806 | ret = get_user(dummy, uaddr2); | 758 | ret = get_user(dummy, uaddr2); | 
| 807 | if (ret) | 759 | if (ret) | 
| 808 | return ret; | 760 | goto out_put_keys; | 
| 761 | |||
| 762 | if (!fshared) | ||
| 763 | goto retry_private; | ||
| 809 | 764 | ||
| 810 | goto retryfull; | 765 | put_futex_key(fshared, &key2); | 
| 766 | put_futex_key(fshared, &key1); | ||
| 767 | goto retry; | ||
| 811 | } | 768 | } | 
| 812 | 769 | ||
| 813 | head = &hb1->chain; | 770 | head = &hb1->chain; | 
| @@ -834,9 +791,7 @@ retry: | |||
| 834 | ret += op_ret; | 791 | ret += op_ret; | 
| 835 | } | 792 | } | 
| 836 | 793 | ||
| 837 | spin_unlock(&hb1->lock); | 794 | double_unlock_hb(hb1, hb2); | 
| 838 | if (hb1 != hb2) | ||
| 839 | spin_unlock(&hb2->lock); | ||
| 840 | out_put_keys: | 795 | out_put_keys: | 
| 841 | put_futex_key(fshared, &key2); | 796 | put_futex_key(fshared, &key2); | 
| 842 | out_put_key1: | 797 | out_put_key1: | 
| @@ -869,6 +824,7 @@ retry: | |||
| 869 | hb1 = hash_futex(&key1); | 824 | hb1 = hash_futex(&key1); | 
| 870 | hb2 = hash_futex(&key2); | 825 | hb2 = hash_futex(&key2); | 
| 871 | 826 | ||
| 827 | retry_private: | ||
| 872 | double_lock_hb(hb1, hb2); | 828 | double_lock_hb(hb1, hb2); | 
| 873 | 829 | ||
| 874 | if (likely(cmpval != NULL)) { | 830 | if (likely(cmpval != NULL)) { | 
| @@ -877,16 +833,18 @@ retry: | |||
| 877 | ret = get_futex_value_locked(&curval, uaddr1); | 833 | ret = get_futex_value_locked(&curval, uaddr1); | 
| 878 | 834 | ||
| 879 | if (unlikely(ret)) { | 835 | if (unlikely(ret)) { | 
| 880 | spin_unlock(&hb1->lock); | 836 | double_unlock_hb(hb1, hb2); | 
| 881 | if (hb1 != hb2) | ||
| 882 | spin_unlock(&hb2->lock); | ||
| 883 | 837 | ||
| 884 | ret = get_user(curval, uaddr1); | 838 | ret = get_user(curval, uaddr1); | 
| 839 | if (ret) | ||
| 840 | goto out_put_keys; | ||
| 885 | 841 | ||
| 886 | if (!ret) | 842 | if (!fshared) | 
| 887 | goto retry; | 843 | goto retry_private; | 
| 888 | 844 | ||
| 889 | goto out_put_keys; | 845 | put_futex_key(fshared, &key2); | 
| 846 | put_futex_key(fshared, &key1); | ||
| 847 | goto retry; | ||
| 890 | } | 848 | } | 
| 891 | if (curval != *cmpval) { | 849 | if (curval != *cmpval) { | 
| 892 | ret = -EAGAIN; | 850 | ret = -EAGAIN; | 
| @@ -923,9 +881,7 @@ retry: | |||
| 923 | } | 881 | } | 
| 924 | 882 | ||
| 925 | out_unlock: | 883 | out_unlock: | 
| 926 | spin_unlock(&hb1->lock); | 884 | double_unlock_hb(hb1, hb2); | 
| 927 | if (hb1 != hb2) | ||
| 928 | spin_unlock(&hb2->lock); | ||
| 929 | 885 | ||
| 930 | /* drop_futex_key_refs() must be called outside the spinlocks. */ | 886 | /* drop_futex_key_refs() must be called outside the spinlocks. */ | 
| 931 | while (--drop_count >= 0) | 887 | while (--drop_count >= 0) | 
| @@ -1063,7 +1019,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | |||
| 1063 | struct futex_pi_state *pi_state = q->pi_state; | 1019 | struct futex_pi_state *pi_state = q->pi_state; | 
| 1064 | struct task_struct *oldowner = pi_state->owner; | 1020 | struct task_struct *oldowner = pi_state->owner; | 
| 1065 | u32 uval, curval, newval; | 1021 | u32 uval, curval, newval; | 
| 1066 | int ret, attempt = 0; | 1022 | int ret; | 
| 1067 | 1023 | ||
| 1068 | /* Owner died? */ | 1024 | /* Owner died? */ | 
| 1069 | if (!pi_state->owner) | 1025 | if (!pi_state->owner) | 
| @@ -1076,11 +1032,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | |||
| 1076 | * in the user space variable. This must be atomic as we have | 1032 | * in the user space variable. This must be atomic as we have | 
| 1077 | * to preserve the owner died bit here. | 1033 | * to preserve the owner died bit here. | 
| 1078 | * | 1034 | * | 
| 1079 | * Note: We write the user space value _before_ changing the | 1035 | * Note: We write the user space value _before_ changing the pi_state | 
| 1080 | * pi_state because we can fault here. Imagine swapped out | 1036 | * because we can fault here. Imagine swapped out pages or a fork | 
| 1081 | * pages or a fork, which was running right before we acquired | 1037 | * that marked all the anonymous memory readonly for cow. | 
| 1082 | * mmap_sem, that marked all the anonymous memory readonly for | ||
| 1083 | * cow. | ||
| 1084 | * | 1038 | * | 
| 1085 | * Modifying pi_state _before_ the user space value would | 1039 | * Modifying pi_state _before_ the user space value would | 
| 1086 | * leave the pi_state in an inconsistent state when we fault | 1040 | * leave the pi_state in an inconsistent state when we fault | 
| @@ -1136,7 +1090,7 @@ retry: | |||
| 1136 | handle_fault: | 1090 | handle_fault: | 
| 1137 | spin_unlock(q->lock_ptr); | 1091 | spin_unlock(q->lock_ptr); | 
| 1138 | 1092 | ||
| 1139 | ret = futex_handle_fault((unsigned long)uaddr, attempt++); | 1093 | ret = get_user(uval, uaddr); | 
| 1140 | 1094 | ||
| 1141 | spin_lock(q->lock_ptr); | 1095 | spin_lock(q->lock_ptr); | 
| 1142 | 1096 | ||
| @@ -1185,10 +1139,11 @@ retry: | |||
| 1185 | if (unlikely(ret != 0)) | 1139 | if (unlikely(ret != 0)) | 
| 1186 | goto out; | 1140 | goto out; | 
| 1187 | 1141 | ||
| 1142 | retry_private: | ||
| 1188 | hb = queue_lock(&q); | 1143 | hb = queue_lock(&q); | 
| 1189 | 1144 | ||
| 1190 | /* | 1145 | /* | 
| 1191 | * Access the page AFTER the futex is queued. | 1146 | * Access the page AFTER the hash-bucket is locked. | 
| 1192 | * Order is important: | 1147 | * Order is important: | 
| 1193 | * | 1148 | * | 
| 1194 | * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); | 1149 | * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); | 
| @@ -1204,20 +1159,23 @@ retry: | |||
| 1204 | * a wakeup when *uaddr != val on entry to the syscall. This is | 1159 | * a wakeup when *uaddr != val on entry to the syscall. This is | 
| 1205 | * rare, but normal. | 1160 | * rare, but normal. | 
| 1206 | * | 1161 | * | 
| 1207 | * for shared futexes, we hold the mmap semaphore, so the mapping | 1162 | * For shared futexes, we hold the mmap semaphore, so the mapping | 
| 1208 | * cannot have changed since we looked it up in get_futex_key. | 1163 | * cannot have changed since we looked it up in get_futex_key. | 
| 1209 | */ | 1164 | */ | 
| 1210 | ret = get_futex_value_locked(&uval, uaddr); | 1165 | ret = get_futex_value_locked(&uval, uaddr); | 
| 1211 | 1166 | ||
| 1212 | if (unlikely(ret)) { | 1167 | if (unlikely(ret)) { | 
| 1213 | queue_unlock(&q, hb); | 1168 | queue_unlock(&q, hb); | 
| 1214 | put_futex_key(fshared, &q.key); | ||
| 1215 | 1169 | ||
| 1216 | ret = get_user(uval, uaddr); | 1170 | ret = get_user(uval, uaddr); | 
| 1171 | if (ret) | ||
| 1172 | goto out_put_key; | ||
| 1217 | 1173 | ||
| 1218 | if (!ret) | 1174 | if (!fshared) | 
| 1219 | goto retry; | 1175 | goto retry_private; | 
| 1220 | goto out; | 1176 | |
| 1177 | put_futex_key(fshared, &q.key); | ||
| 1178 | goto retry; | ||
| 1221 | } | 1179 | } | 
| 1222 | ret = -EWOULDBLOCK; | 1180 | ret = -EWOULDBLOCK; | 
| 1223 | if (unlikely(uval != val)) { | 1181 | if (unlikely(uval != val)) { | 
| @@ -1248,16 +1206,13 @@ retry: | |||
| 1248 | if (!abs_time) | 1206 | if (!abs_time) | 
| 1249 | schedule(); | 1207 | schedule(); | 
| 1250 | else { | 1208 | else { | 
| 1251 | unsigned long slack; | ||
| 1252 | slack = current->timer_slack_ns; | ||
| 1253 | if (rt_task(current)) | ||
| 1254 | slack = 0; | ||
| 1255 | hrtimer_init_on_stack(&t.timer, | 1209 | hrtimer_init_on_stack(&t.timer, | 
| 1256 | clockrt ? CLOCK_REALTIME : | 1210 | clockrt ? CLOCK_REALTIME : | 
| 1257 | CLOCK_MONOTONIC, | 1211 | CLOCK_MONOTONIC, | 
| 1258 | HRTIMER_MODE_ABS); | 1212 | HRTIMER_MODE_ABS); | 
| 1259 | hrtimer_init_sleeper(&t, current); | 1213 | hrtimer_init_sleeper(&t, current); | 
| 1260 | hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); | 1214 | hrtimer_set_expires_range_ns(&t.timer, *abs_time, | 
| 1215 | current->timer_slack_ns); | ||
| 1261 | 1216 | ||
| 1262 | hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); | 1217 | hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); | 
| 1263 | if (!hrtimer_active(&t.timer)) | 1218 | if (!hrtimer_active(&t.timer)) | 
| @@ -1354,7 +1309,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1354 | struct futex_hash_bucket *hb; | 1309 | struct futex_hash_bucket *hb; | 
| 1355 | u32 uval, newval, curval; | 1310 | u32 uval, newval, curval; | 
| 1356 | struct futex_q q; | 1311 | struct futex_q q; | 
| 1357 | int ret, lock_taken, ownerdied = 0, attempt = 0; | 1312 | int ret, lock_taken, ownerdied = 0; | 
| 1358 | 1313 | ||
| 1359 | if (refill_pi_state_cache()) | 1314 | if (refill_pi_state_cache()) | 
| 1360 | return -ENOMEM; | 1315 | return -ENOMEM; | 
| @@ -1374,7 +1329,7 @@ retry: | |||
| 1374 | if (unlikely(ret != 0)) | 1329 | if (unlikely(ret != 0)) | 
| 1375 | goto out; | 1330 | goto out; | 
| 1376 | 1331 | ||
| 1377 | retry_unlocked: | 1332 | retry_private: | 
| 1378 | hb = queue_lock(&q); | 1333 | hb = queue_lock(&q); | 
| 1379 | 1334 | ||
| 1380 | retry_locked: | 1335 | retry_locked: | 
| @@ -1458,6 +1413,7 @@ retry_locked: | |||
| 1458 | * exit to complete. | 1413 | * exit to complete. | 
| 1459 | */ | 1414 | */ | 
| 1460 | queue_unlock(&q, hb); | 1415 | queue_unlock(&q, hb); | 
| 1416 | put_futex_key(fshared, &q.key); | ||
| 1461 | cond_resched(); | 1417 | cond_resched(); | 
| 1462 | goto retry; | 1418 | goto retry; | 
| 1463 | 1419 | ||
| @@ -1564,6 +1520,13 @@ retry_locked: | |||
| 1564 | } | 1520 | } | 
| 1565 | } | 1521 | } | 
| 1566 | 1522 | ||
| 1523 | /* | ||
| 1524 | * If fixup_pi_state_owner() faulted and was unable to handle the | ||
| 1525 | * fault, unlock it and return the fault to userspace. | ||
| 1526 | */ | ||
| 1527 | if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) | ||
| 1528 | rt_mutex_unlock(&q.pi_state->pi_mutex); | ||
| 1529 | |||
| 1567 | /* Unqueue and drop the lock */ | 1530 | /* Unqueue and drop the lock */ | 
| 1568 | unqueue_me_pi(&q); | 1531 | unqueue_me_pi(&q); | 
| 1569 | 1532 | ||
| @@ -1591,22 +1554,18 @@ uaddr_faulted: | |||
| 1591 | */ | 1554 | */ | 
| 1592 | queue_unlock(&q, hb); | 1555 | queue_unlock(&q, hb); | 
| 1593 | 1556 | ||
| 1594 | if (attempt++) { | ||
| 1595 | ret = futex_handle_fault((unsigned long)uaddr, attempt); | ||
| 1596 | if (ret) | ||
| 1597 | goto out_put_key; | ||
| 1598 | goto retry_unlocked; | ||
| 1599 | } | ||
| 1600 | |||
| 1601 | ret = get_user(uval, uaddr); | 1557 | ret = get_user(uval, uaddr); | 
| 1602 | if (!ret) | 1558 | if (ret) | 
| 1603 | goto retry; | 1559 | goto out_put_key; | 
| 1604 | 1560 | ||
| 1605 | if (to) | 1561 | if (!fshared) | 
| 1606 | destroy_hrtimer_on_stack(&to->timer); | 1562 | goto retry_private; | 
| 1607 | return ret; | 1563 | |
| 1564 | put_futex_key(fshared, &q.key); | ||
| 1565 | goto retry; | ||
| 1608 | } | 1566 | } | 
| 1609 | 1567 | ||
| 1568 | |||
| 1610 | /* | 1569 | /* | 
| 1611 | * Userspace attempted a TID -> 0 atomic transition, and failed. | 1570 | * Userspace attempted a TID -> 0 atomic transition, and failed. | 
| 1612 | * This is the in-kernel slowpath: we look up the PI state (if any), | 1571 | * This is the in-kernel slowpath: we look up the PI state (if any), | 
| @@ -1619,7 +1578,7 @@ static int futex_unlock_pi(u32 __user *uaddr, int fshared) | |||
| 1619 | u32 uval; | 1578 | u32 uval; | 
| 1620 | struct plist_head *head; | 1579 | struct plist_head *head; | 
| 1621 | union futex_key key = FUTEX_KEY_INIT; | 1580 | union futex_key key = FUTEX_KEY_INIT; | 
| 1622 | int ret, attempt = 0; | 1581 | int ret; | 
| 1623 | 1582 | ||
| 1624 | retry: | 1583 | retry: | 
| 1625 | if (get_user(uval, uaddr)) | 1584 | if (get_user(uval, uaddr)) | 
| @@ -1635,7 +1594,6 @@ retry: | |||
| 1635 | goto out; | 1594 | goto out; | 
| 1636 | 1595 | ||
| 1637 | hb = hash_futex(&key); | 1596 | hb = hash_futex(&key); | 
| 1638 | retry_unlocked: | ||
| 1639 | spin_lock(&hb->lock); | 1597 | spin_lock(&hb->lock); | 
| 1640 | 1598 | ||
| 1641 | /* | 1599 | /* | 
| @@ -1700,14 +1658,7 @@ pi_faulted: | |||
| 1700 | * we have to drop the mmap_sem in order to call get_user(). | 1658 | * we have to drop the mmap_sem in order to call get_user(). | 
| 1701 | */ | 1659 | */ | 
| 1702 | spin_unlock(&hb->lock); | 1660 | spin_unlock(&hb->lock); | 
| 1703 | 1661 | put_futex_key(fshared, &key); | |
| 1704 | if (attempt++) { | ||
| 1705 | ret = futex_handle_fault((unsigned long)uaddr, attempt); | ||
| 1706 | if (ret) | ||
| 1707 | goto out; | ||
| 1708 | uval = 0; | ||
| 1709 | goto retry_unlocked; | ||
| 1710 | } | ||
| 1711 | 1662 | ||
| 1712 | ret = get_user(uval, uaddr); | 1663 | ret = get_user(uval, uaddr); | 
| 1713 | if (!ret) | 1664 | if (!ret) | 
