diff options
Diffstat (limited to 'kernel/futex.c')
-rw-r--r-- | kernel/futex.c | 137 |
1 files changed, 87 insertions, 50 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 4949d336d88d..e7a35f1039e7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key) | |||
150 | */ | 150 | */ |
151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
152 | { | 152 | { |
153 | return (key1->both.word == key2->both.word | 153 | return (key1 && key2 |
154 | && key1->both.word == key2->both.word | ||
154 | && key1->both.ptr == key2->both.ptr | 155 | && key1->both.ptr == key2->both.ptr |
155 | && key1->both.offset == key2->both.offset); | 156 | && key1->both.offset == key2->both.offset); |
156 | } | 157 | } |
@@ -202,8 +203,6 @@ static void drop_futex_key_refs(union futex_key *key) | |||
202 | * @uaddr: virtual address of the futex | 203 | * @uaddr: virtual address of the futex |
203 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
204 | * @key: address where result is stored. | 205 | * @key: address where result is stored. |
205 | * @rw: mapping needs to be read/write (values: VERIFY_READ, | ||
206 | * VERIFY_WRITE) | ||
207 | * | 206 | * |
208 | * Returns a negative error code or 0 | 207 | * Returns a negative error code or 0 |
209 | * The key words are stored in *key on success. | 208 | * The key words are stored in *key on success. |
@@ -215,7 +214,7 @@ static void drop_futex_key_refs(union futex_key *key) | |||
215 | * lock_page() might sleep, the caller should not hold a spinlock. | 214 | * lock_page() might sleep, the caller should not hold a spinlock. |
216 | */ | 215 | */ |
217 | static int | 216 | static int |
218 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | 217 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) |
219 | { | 218 | { |
220 | unsigned long address = (unsigned long)uaddr; | 219 | unsigned long address = (unsigned long)uaddr; |
221 | struct mm_struct *mm = current->mm; | 220 | struct mm_struct *mm = current->mm; |
@@ -238,7 +237,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
238 | * but access_ok() should be faster than find_vma() | 237 | * but access_ok() should be faster than find_vma() |
239 | */ | 238 | */ |
240 | if (!fshared) { | 239 | if (!fshared) { |
241 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) | 240 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) |
242 | return -EFAULT; | 241 | return -EFAULT; |
243 | key->private.mm = mm; | 242 | key->private.mm = mm; |
244 | key->private.address = address; | 243 | key->private.address = address; |
@@ -247,7 +246,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
247 | } | 246 | } |
248 | 247 | ||
249 | again: | 248 | again: |
250 | err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); | 249 | err = get_user_pages_fast(address, 1, 1, &page); |
251 | if (err < 0) | 250 | if (err < 0) |
252 | return err; | 251 | return err; |
253 | 252 | ||
@@ -303,8 +302,14 @@ void put_futex_key(int fshared, union futex_key *key) | |||
303 | */ | 302 | */ |
304 | static int fault_in_user_writeable(u32 __user *uaddr) | 303 | static int fault_in_user_writeable(u32 __user *uaddr) |
305 | { | 304 | { |
306 | int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, | 305 | struct mm_struct *mm = current->mm; |
307 | 1, 1, 0, NULL, NULL); | 306 | int ret; |
307 | |||
308 | down_read(&mm->mmap_sem); | ||
309 | ret = get_user_pages(current, mm, (unsigned long)uaddr, | ||
310 | 1, 1, 0, NULL, NULL); | ||
311 | up_read(&mm->mmap_sem); | ||
312 | |||
308 | return ret < 0 ? ret : 0; | 313 | return ret < 0 ? ret : 0; |
309 | } | 314 | } |
310 | 315 | ||
@@ -396,9 +401,9 @@ static void free_pi_state(struct futex_pi_state *pi_state) | |||
396 | * and has cleaned up the pi_state already | 401 | * and has cleaned up the pi_state already |
397 | */ | 402 | */ |
398 | if (pi_state->owner) { | 403 | if (pi_state->owner) { |
399 | spin_lock_irq(&pi_state->owner->pi_lock); | 404 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
400 | list_del_init(&pi_state->list); | 405 | list_del_init(&pi_state->list); |
401 | spin_unlock_irq(&pi_state->owner->pi_lock); | 406 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
402 | 407 | ||
403 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); | 408 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); |
404 | } | 409 | } |
@@ -463,18 +468,18 @@ void exit_pi_state_list(struct task_struct *curr) | |||
463 | * pi_state_list anymore, but we have to be careful | 468 | * pi_state_list anymore, but we have to be careful |
464 | * versus waiters unqueueing themselves: | 469 | * versus waiters unqueueing themselves: |
465 | */ | 470 | */ |
466 | spin_lock_irq(&curr->pi_lock); | 471 | raw_spin_lock_irq(&curr->pi_lock); |
467 | while (!list_empty(head)) { | 472 | while (!list_empty(head)) { |
468 | 473 | ||
469 | next = head->next; | 474 | next = head->next; |
470 | pi_state = list_entry(next, struct futex_pi_state, list); | 475 | pi_state = list_entry(next, struct futex_pi_state, list); |
471 | key = pi_state->key; | 476 | key = pi_state->key; |
472 | hb = hash_futex(&key); | 477 | hb = hash_futex(&key); |
473 | spin_unlock_irq(&curr->pi_lock); | 478 | raw_spin_unlock_irq(&curr->pi_lock); |
474 | 479 | ||
475 | spin_lock(&hb->lock); | 480 | spin_lock(&hb->lock); |
476 | 481 | ||
477 | spin_lock_irq(&curr->pi_lock); | 482 | raw_spin_lock_irq(&curr->pi_lock); |
478 | /* | 483 | /* |
479 | * We dropped the pi-lock, so re-check whether this | 484 | * We dropped the pi-lock, so re-check whether this |
480 | * task still owns the PI-state: | 485 | * task still owns the PI-state: |
@@ -488,15 +493,15 @@ void exit_pi_state_list(struct task_struct *curr) | |||
488 | WARN_ON(list_empty(&pi_state->list)); | 493 | WARN_ON(list_empty(&pi_state->list)); |
489 | list_del_init(&pi_state->list); | 494 | list_del_init(&pi_state->list); |
490 | pi_state->owner = NULL; | 495 | pi_state->owner = NULL; |
491 | spin_unlock_irq(&curr->pi_lock); | 496 | raw_spin_unlock_irq(&curr->pi_lock); |
492 | 497 | ||
493 | rt_mutex_unlock(&pi_state->pi_mutex); | 498 | rt_mutex_unlock(&pi_state->pi_mutex); |
494 | 499 | ||
495 | spin_unlock(&hb->lock); | 500 | spin_unlock(&hb->lock); |
496 | 501 | ||
497 | spin_lock_irq(&curr->pi_lock); | 502 | raw_spin_lock_irq(&curr->pi_lock); |
498 | } | 503 | } |
499 | spin_unlock_irq(&curr->pi_lock); | 504 | raw_spin_unlock_irq(&curr->pi_lock); |
500 | } | 505 | } |
501 | 506 | ||
502 | static int | 507 | static int |
@@ -525,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
525 | return -EINVAL; | 530 | return -EINVAL; |
526 | 531 | ||
527 | WARN_ON(!atomic_read(&pi_state->refcount)); | 532 | WARN_ON(!atomic_read(&pi_state->refcount)); |
528 | WARN_ON(pid && pi_state->owner && | 533 | |
529 | pi_state->owner->pid != pid); | 534 | /* |
535 | * When pi_state->owner is NULL then the owner died | ||
536 | * and another waiter is on the fly. pi_state->owner | ||
537 | * is fixed up by the task which acquires | ||
538 | * pi_state->rt_mutex. | ||
539 | * | ||
540 | * We do not check for pid == 0 which can happen when | ||
541 | * the owner died and robust_list_exit() cleared the | ||
542 | * TID. | ||
543 | */ | ||
544 | if (pid && pi_state->owner) { | ||
545 | /* | ||
546 | * Bail out if user space manipulated the | ||
547 | * futex value. | ||
548 | */ | ||
549 | if (pid != task_pid_vnr(pi_state->owner)) | ||
550 | return -EINVAL; | ||
551 | } | ||
530 | 552 | ||
531 | atomic_inc(&pi_state->refcount); | 553 | atomic_inc(&pi_state->refcount); |
532 | *ps = pi_state; | 554 | *ps = pi_state; |
@@ -551,7 +573,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
551 | * change of the task flags, we do this protected by | 573 | * change of the task flags, we do this protected by |
552 | * p->pi_lock: | 574 | * p->pi_lock: |
553 | */ | 575 | */ |
554 | spin_lock_irq(&p->pi_lock); | 576 | raw_spin_lock_irq(&p->pi_lock); |
555 | if (unlikely(p->flags & PF_EXITING)) { | 577 | if (unlikely(p->flags & PF_EXITING)) { |
556 | /* | 578 | /* |
557 | * The task is on the way out. When PF_EXITPIDONE is | 579 | * The task is on the way out. When PF_EXITPIDONE is |
@@ -560,7 +582,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
560 | */ | 582 | */ |
561 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; | 583 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; |
562 | 584 | ||
563 | spin_unlock_irq(&p->pi_lock); | 585 | raw_spin_unlock_irq(&p->pi_lock); |
564 | put_task_struct(p); | 586 | put_task_struct(p); |
565 | return ret; | 587 | return ret; |
566 | } | 588 | } |
@@ -579,7 +601,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
579 | WARN_ON(!list_empty(&pi_state->list)); | 601 | WARN_ON(!list_empty(&pi_state->list)); |
580 | list_add(&pi_state->list, &p->pi_state_list); | 602 | list_add(&pi_state->list, &p->pi_state_list); |
581 | pi_state->owner = p; | 603 | pi_state->owner = p; |
582 | spin_unlock_irq(&p->pi_lock); | 604 | raw_spin_unlock_irq(&p->pi_lock); |
583 | 605 | ||
584 | put_task_struct(p); | 606 | put_task_struct(p); |
585 | 607 | ||
@@ -753,7 +775,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
753 | if (!pi_state) | 775 | if (!pi_state) |
754 | return -EINVAL; | 776 | return -EINVAL; |
755 | 777 | ||
756 | spin_lock(&pi_state->pi_mutex.wait_lock); | 778 | /* |
779 | * If current does not own the pi_state then the futex is | ||
780 | * inconsistent and user space fiddled with the futex value. | ||
781 | */ | ||
782 | if (pi_state->owner != current) | ||
783 | return -EINVAL; | ||
784 | |||
785 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); | ||
757 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 786 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
758 | 787 | ||
759 | /* | 788 | /* |
@@ -782,23 +811,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
782 | else if (curval != uval) | 811 | else if (curval != uval) |
783 | ret = -EINVAL; | 812 | ret = -EINVAL; |
784 | if (ret) { | 813 | if (ret) { |
785 | spin_unlock(&pi_state->pi_mutex.wait_lock); | 814 | raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
786 | return ret; | 815 | return ret; |
787 | } | 816 | } |
788 | } | 817 | } |
789 | 818 | ||
790 | spin_lock_irq(&pi_state->owner->pi_lock); | 819 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
791 | WARN_ON(list_empty(&pi_state->list)); | 820 | WARN_ON(list_empty(&pi_state->list)); |
792 | list_del_init(&pi_state->list); | 821 | list_del_init(&pi_state->list); |
793 | spin_unlock_irq(&pi_state->owner->pi_lock); | 822 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
794 | 823 | ||
795 | spin_lock_irq(&new_owner->pi_lock); | 824 | raw_spin_lock_irq(&new_owner->pi_lock); |
796 | WARN_ON(!list_empty(&pi_state->list)); | 825 | WARN_ON(!list_empty(&pi_state->list)); |
797 | list_add(&pi_state->list, &new_owner->pi_state_list); | 826 | list_add(&pi_state->list, &new_owner->pi_state_list); |
798 | pi_state->owner = new_owner; | 827 | pi_state->owner = new_owner; |
799 | spin_unlock_irq(&new_owner->pi_lock); | 828 | raw_spin_unlock_irq(&new_owner->pi_lock); |
800 | 829 | ||
801 | spin_unlock(&pi_state->pi_mutex.wait_lock); | 830 | raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
802 | rt_mutex_unlock(&pi_state->pi_mutex); | 831 | rt_mutex_unlock(&pi_state->pi_mutex); |
803 | 832 | ||
804 | return 0; | 833 | return 0; |
@@ -860,7 +889,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
860 | if (!bitset) | 889 | if (!bitset) |
861 | return -EINVAL; | 890 | return -EINVAL; |
862 | 891 | ||
863 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); | 892 | ret = get_futex_key(uaddr, fshared, &key); |
864 | if (unlikely(ret != 0)) | 893 | if (unlikely(ret != 0)) |
865 | goto out; | 894 | goto out; |
866 | 895 | ||
@@ -906,10 +935,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
906 | int ret, op_ret; | 935 | int ret, op_ret; |
907 | 936 | ||
908 | retry: | 937 | retry: |
909 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | 938 | ret = get_futex_key(uaddr1, fshared, &key1); |
910 | if (unlikely(ret != 0)) | 939 | if (unlikely(ret != 0)) |
911 | goto out; | 940 | goto out; |
912 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 941 | ret = get_futex_key(uaddr2, fshared, &key2); |
913 | if (unlikely(ret != 0)) | 942 | if (unlikely(ret != 0)) |
914 | goto out_put_key1; | 943 | goto out_put_key1; |
915 | 944 | ||
@@ -1003,7 +1032,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
1003 | plist_add(&q->list, &hb2->chain); | 1032 | plist_add(&q->list, &hb2->chain); |
1004 | q->lock_ptr = &hb2->lock; | 1033 | q->lock_ptr = &hb2->lock; |
1005 | #ifdef CONFIG_DEBUG_PI_LIST | 1034 | #ifdef CONFIG_DEBUG_PI_LIST |
1006 | q->list.plist.lock = &hb2->lock; | 1035 | q->list.plist.spinlock = &hb2->lock; |
1007 | #endif | 1036 | #endif |
1008 | } | 1037 | } |
1009 | get_futex_key_refs(key2); | 1038 | get_futex_key_refs(key2); |
@@ -1028,7 +1057,6 @@ static inline | |||
1028 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | 1057 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1029 | struct futex_hash_bucket *hb) | 1058 | struct futex_hash_bucket *hb) |
1030 | { | 1059 | { |
1031 | drop_futex_key_refs(&q->key); | ||
1032 | get_futex_key_refs(key); | 1060 | get_futex_key_refs(key); |
1033 | q->key = *key; | 1061 | q->key = *key; |
1034 | 1062 | ||
@@ -1040,7 +1068,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | |||
1040 | 1068 | ||
1041 | q->lock_ptr = &hb->lock; | 1069 | q->lock_ptr = &hb->lock; |
1042 | #ifdef CONFIG_DEBUG_PI_LIST | 1070 | #ifdef CONFIG_DEBUG_PI_LIST |
1043 | q->list.plist.lock = &hb->lock; | 1071 | q->list.plist.spinlock = &hb->lock; |
1044 | #endif | 1072 | #endif |
1045 | 1073 | ||
1046 | wake_up_state(q->task, TASK_NORMAL); | 1074 | wake_up_state(q->task, TASK_NORMAL); |
@@ -1169,11 +1197,10 @@ retry: | |||
1169 | pi_state = NULL; | 1197 | pi_state = NULL; |
1170 | } | 1198 | } |
1171 | 1199 | ||
1172 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | 1200 | ret = get_futex_key(uaddr1, fshared, &key1); |
1173 | if (unlikely(ret != 0)) | 1201 | if (unlikely(ret != 0)) |
1174 | goto out; | 1202 | goto out; |
1175 | ret = get_futex_key(uaddr2, fshared, &key2, | 1203 | ret = get_futex_key(uaddr2, fshared, &key2); |
1176 | requeue_pi ? VERIFY_WRITE : VERIFY_READ); | ||
1177 | if (unlikely(ret != 0)) | 1204 | if (unlikely(ret != 0)) |
1178 | goto out_put_key1; | 1205 | goto out_put_key1; |
1179 | 1206 | ||
@@ -1226,6 +1253,7 @@ retry_private: | |||
1226 | */ | 1253 | */ |
1227 | if (ret == 1) { | 1254 | if (ret == 1) { |
1228 | WARN_ON(pi_state); | 1255 | WARN_ON(pi_state); |
1256 | drop_count++; | ||
1229 | task_count++; | 1257 | task_count++; |
1230 | ret = get_futex_value_locked(&curval2, uaddr2); | 1258 | ret = get_futex_value_locked(&curval2, uaddr2); |
1231 | if (!ret) | 1259 | if (!ret) |
@@ -1304,6 +1332,7 @@ retry_private: | |||
1304 | if (ret == 1) { | 1332 | if (ret == 1) { |
1305 | /* We got the lock. */ | 1333 | /* We got the lock. */ |
1306 | requeue_pi_wake_futex(this, &key2, hb2); | 1334 | requeue_pi_wake_futex(this, &key2, hb2); |
1335 | drop_count++; | ||
1307 | continue; | 1336 | continue; |
1308 | } else if (ret) { | 1337 | } else if (ret) { |
1309 | /* -EDEADLK */ | 1338 | /* -EDEADLK */ |
@@ -1386,7 +1415,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | |||
1386 | 1415 | ||
1387 | plist_node_init(&q->list, prio); | 1416 | plist_node_init(&q->list, prio); |
1388 | #ifdef CONFIG_DEBUG_PI_LIST | 1417 | #ifdef CONFIG_DEBUG_PI_LIST |
1389 | q->list.plist.lock = &hb->lock; | 1418 | q->list.plist.spinlock = &hb->lock; |
1390 | #endif | 1419 | #endif |
1391 | plist_add(&q->list, &hb->chain); | 1420 | plist_add(&q->list, &hb->chain); |
1392 | q->task = current; | 1421 | q->task = current; |
@@ -1521,18 +1550,18 @@ retry: | |||
1521 | * itself. | 1550 | * itself. |
1522 | */ | 1551 | */ |
1523 | if (pi_state->owner != NULL) { | 1552 | if (pi_state->owner != NULL) { |
1524 | spin_lock_irq(&pi_state->owner->pi_lock); | 1553 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
1525 | WARN_ON(list_empty(&pi_state->list)); | 1554 | WARN_ON(list_empty(&pi_state->list)); |
1526 | list_del_init(&pi_state->list); | 1555 | list_del_init(&pi_state->list); |
1527 | spin_unlock_irq(&pi_state->owner->pi_lock); | 1556 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
1528 | } | 1557 | } |
1529 | 1558 | ||
1530 | pi_state->owner = newowner; | 1559 | pi_state->owner = newowner; |
1531 | 1560 | ||
1532 | spin_lock_irq(&newowner->pi_lock); | 1561 | raw_spin_lock_irq(&newowner->pi_lock); |
1533 | WARN_ON(!list_empty(&pi_state->list)); | 1562 | WARN_ON(!list_empty(&pi_state->list)); |
1534 | list_add(&pi_state->list, &newowner->pi_state_list); | 1563 | list_add(&pi_state->list, &newowner->pi_state_list); |
1535 | spin_unlock_irq(&newowner->pi_lock); | 1564 | raw_spin_unlock_irq(&newowner->pi_lock); |
1536 | return 0; | 1565 | return 0; |
1537 | 1566 | ||
1538 | /* | 1567 | /* |
@@ -1730,7 +1759,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, | |||
1730 | */ | 1759 | */ |
1731 | retry: | 1760 | retry: |
1732 | q->key = FUTEX_KEY_INIT; | 1761 | q->key = FUTEX_KEY_INIT; |
1733 | ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ); | 1762 | ret = get_futex_key(uaddr, fshared, &q->key); |
1734 | if (unlikely(ret != 0)) | 1763 | if (unlikely(ret != 0)) |
1735 | return ret; | 1764 | return ret; |
1736 | 1765 | ||
@@ -1791,6 +1820,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1791 | current->timer_slack_ns); | 1820 | current->timer_slack_ns); |
1792 | } | 1821 | } |
1793 | 1822 | ||
1823 | retry: | ||
1794 | /* Prepare to wait on uaddr. */ | 1824 | /* Prepare to wait on uaddr. */ |
1795 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 1825 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
1796 | if (ret) | 1826 | if (ret) |
@@ -1808,9 +1838,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1808 | goto out_put_key; | 1838 | goto out_put_key; |
1809 | 1839 | ||
1810 | /* | 1840 | /* |
1811 | * We expect signal_pending(current), but another thread may | 1841 | * We expect signal_pending(current), but we might be the |
1812 | * have handled it for us already. | 1842 | * victim of a spurious wakeup as well. |
1813 | */ | 1843 | */ |
1844 | if (!signal_pending(current)) { | ||
1845 | put_futex_key(fshared, &q.key); | ||
1846 | goto retry; | ||
1847 | } | ||
1848 | |||
1814 | ret = -ERESTARTSYS; | 1849 | ret = -ERESTARTSYS; |
1815 | if (!abs_time) | 1850 | if (!abs_time) |
1816 | goto out_put_key; | 1851 | goto out_put_key; |
@@ -1890,7 +1925,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
1890 | q.requeue_pi_key = NULL; | 1925 | q.requeue_pi_key = NULL; |
1891 | retry: | 1926 | retry: |
1892 | q.key = FUTEX_KEY_INIT; | 1927 | q.key = FUTEX_KEY_INIT; |
1893 | ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); | 1928 | ret = get_futex_key(uaddr, fshared, &q.key); |
1894 | if (unlikely(ret != 0)) | 1929 | if (unlikely(ret != 0)) |
1895 | goto out; | 1930 | goto out; |
1896 | 1931 | ||
@@ -1960,7 +1995,7 @@ retry_private: | |||
1960 | /* Unqueue and drop the lock */ | 1995 | /* Unqueue and drop the lock */ |
1961 | unqueue_me_pi(&q); | 1996 | unqueue_me_pi(&q); |
1962 | 1997 | ||
1963 | goto out; | 1998 | goto out_put_key; |
1964 | 1999 | ||
1965 | out_unlock_put_key: | 2000 | out_unlock_put_key: |
1966 | queue_unlock(&q, hb); | 2001 | queue_unlock(&q, hb); |
@@ -2009,7 +2044,7 @@ retry: | |||
2009 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 2044 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
2010 | return -EPERM; | 2045 | return -EPERM; |
2011 | 2046 | ||
2012 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); | 2047 | ret = get_futex_key(uaddr, fshared, &key); |
2013 | if (unlikely(ret != 0)) | 2048 | if (unlikely(ret != 0)) |
2014 | goto out; | 2049 | goto out; |
2015 | 2050 | ||
@@ -2118,9 +2153,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2118 | */ | 2153 | */ |
2119 | plist_del(&q->list, &q->list.plist); | 2154 | plist_del(&q->list, &q->list.plist); |
2120 | 2155 | ||
2156 | /* Handle spurious wakeups gracefully */ | ||
2157 | ret = -EWOULDBLOCK; | ||
2121 | if (timeout && !timeout->task) | 2158 | if (timeout && !timeout->task) |
2122 | ret = -ETIMEDOUT; | 2159 | ret = -ETIMEDOUT; |
2123 | else | 2160 | else if (signal_pending(current)) |
2124 | ret = -ERESTARTNOINTR; | 2161 | ret = -ERESTARTNOINTR; |
2125 | } | 2162 | } |
2126 | return ret; | 2163 | return ret; |
@@ -2199,7 +2236,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2199 | rt_waiter.task = NULL; | 2236 | rt_waiter.task = NULL; |
2200 | 2237 | ||
2201 | key2 = FUTEX_KEY_INIT; | 2238 | key2 = FUTEX_KEY_INIT; |
2202 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 2239 | ret = get_futex_key(uaddr2, fshared, &key2); |
2203 | if (unlikely(ret != 0)) | 2240 | if (unlikely(ret != 0)) |
2204 | goto out; | 2241 | goto out; |
2205 | 2242 | ||