aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c75
-rw-r--r--kernel/cpuset.c24
-rw-r--r--kernel/futex.c121
-rw-r--r--kernel/futex_compat.c34
4 files changed, 165 insertions, 89 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 70fbf2e83766..f230f9ae01c2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,56 +16,48 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19static DEFINE_MUTEX(cpucontrol); 19static DEFINE_MUTEX(cpu_add_remove_lock);
20static DEFINE_MUTEX(cpu_bitmask_lock);
20 21
21static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
22 23
23#ifdef CONFIG_HOTPLUG_CPU 24#ifdef CONFIG_HOTPLUG_CPU
24static struct task_struct *lock_cpu_hotplug_owner;
25static int lock_cpu_hotplug_depth;
26 25
27static int __lock_cpu_hotplug(int interruptible) 26/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
28{ 27static struct task_struct *recursive;
29 int ret = 0; 28static int recursive_depth;
30
31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible)
33 ret = mutex_lock_interruptible(&cpucontrol);
34 else
35 mutex_lock(&cpucontrol);
36 }
37
38 /*
39 * Set only if we succeed in locking
40 */
41 if (!ret) {
42 lock_cpu_hotplug_depth++;
43 lock_cpu_hotplug_owner = current;
44 }
45
46 return ret;
47}
48 29
49void lock_cpu_hotplug(void) 30void lock_cpu_hotplug(void)
50{ 31{
51 __lock_cpu_hotplug(0); 32 struct task_struct *tsk = current;
33
34 if (tsk == recursive) {
35 static int warnings = 10;
36 if (warnings) {
37 printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
38 WARN_ON(1);
39 warnings--;
40 }
41 recursive_depth++;
42 return;
43 }
44 mutex_lock(&cpu_bitmask_lock);
45 recursive = tsk;
52} 46}
53EXPORT_SYMBOL_GPL(lock_cpu_hotplug); 47EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
54 48
55void unlock_cpu_hotplug(void) 49void unlock_cpu_hotplug(void)
56{ 50{
57 if (--lock_cpu_hotplug_depth == 0) { 51 WARN_ON(recursive != current);
58 lock_cpu_hotplug_owner = NULL; 52 if (recursive_depth) {
59 mutex_unlock(&cpucontrol); 53 recursive_depth--;
54 return;
60 } 55 }
56 mutex_unlock(&cpu_bitmask_lock);
57 recursive = NULL;
61} 58}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); 59EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
63 60
64int lock_cpu_hotplug_interruptible(void)
65{
66 return __lock_cpu_hotplug(1);
67}
68EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */ 61#endif /* CONFIG_HOTPLUG_CPU */
70 62
71/* Need to know about CPUs going up/down? */ 63/* Need to know about CPUs going up/down? */
@@ -122,9 +114,7 @@ int cpu_down(unsigned int cpu)
122 struct task_struct *p; 114 struct task_struct *p;
123 cpumask_t old_allowed, tmp; 115 cpumask_t old_allowed, tmp;
124 116
125 if ((err = lock_cpu_hotplug_interruptible()) != 0) 117 mutex_lock(&cpu_add_remove_lock);
126 return err;
127
128 if (num_online_cpus() == 1) { 118 if (num_online_cpus() == 1) {
129 err = -EBUSY; 119 err = -EBUSY;
130 goto out; 120 goto out;
@@ -150,7 +140,10 @@ int cpu_down(unsigned int cpu)
150 cpu_clear(cpu, tmp); 140 cpu_clear(cpu, tmp);
151 set_cpus_allowed(current, tmp); 141 set_cpus_allowed(current, tmp);
152 142
143 mutex_lock(&cpu_bitmask_lock);
153 p = __stop_machine_run(take_cpu_down, NULL, cpu); 144 p = __stop_machine_run(take_cpu_down, NULL, cpu);
145 mutex_unlock(&cpu_bitmask_lock);
146
154 if (IS_ERR(p)) { 147 if (IS_ERR(p)) {
155 /* CPU didn't die: tell everyone. Can't complain. */ 148 /* CPU didn't die: tell everyone. Can't complain. */
156 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 149 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
@@ -187,7 +180,7 @@ out_thread:
187out_allowed: 180out_allowed:
188 set_cpus_allowed(current, old_allowed); 181 set_cpus_allowed(current, old_allowed);
189out: 182out:
190 unlock_cpu_hotplug(); 183 mutex_unlock(&cpu_add_remove_lock);
191 return err; 184 return err;
192} 185}
193#endif /*CONFIG_HOTPLUG_CPU*/ 186#endif /*CONFIG_HOTPLUG_CPU*/
@@ -197,9 +190,7 @@ int __devinit cpu_up(unsigned int cpu)
197 int ret; 190 int ret;
198 void *hcpu = (void *)(long)cpu; 191 void *hcpu = (void *)(long)cpu;
199 192
200 if ((ret = lock_cpu_hotplug_interruptible()) != 0) 193 mutex_lock(&cpu_add_remove_lock);
201 return ret;
202
203 if (cpu_online(cpu) || !cpu_present(cpu)) { 194 if (cpu_online(cpu) || !cpu_present(cpu)) {
204 ret = -EINVAL; 195 ret = -EINVAL;
205 goto out; 196 goto out;
@@ -214,7 +205,9 @@ int __devinit cpu_up(unsigned int cpu)
214 } 205 }
215 206
216 /* Arch-specific enabling code. */ 207 /* Arch-specific enabling code. */
208 mutex_lock(&cpu_bitmask_lock);
217 ret = __cpu_up(cpu); 209 ret = __cpu_up(cpu);
210 mutex_unlock(&cpu_bitmask_lock);
218 if (ret != 0) 211 if (ret != 0)
219 goto out_notify; 212 goto out_notify;
220 BUG_ON(!cpu_online(cpu)); 213 BUG_ON(!cpu_online(cpu));
@@ -227,6 +220,6 @@ out_notify:
227 blocking_notifier_call_chain(&cpu_chain, 220 blocking_notifier_call_chain(&cpu_chain,
228 CPU_UP_CANCELED, hcpu); 221 CPU_UP_CANCELED, hcpu);
229out: 222out:
230 unlock_cpu_hotplug(); 223 mutex_unlock(&cpu_add_remove_lock);
231 return ret; 224 return ret;
232} 225}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c232dc077438..1a649f2bb9bb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -762,6 +762,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
762 * 762 *
763 * Call with manage_mutex held. May nest a call to the 763 * Call with manage_mutex held. May nest a call to the
764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
765 * Must not be called holding callback_mutex, because we must
766 * not call lock_cpu_hotplug() while holding callback_mutex.
765 */ 767 */
766 768
767static void update_cpu_domains(struct cpuset *cur) 769static void update_cpu_domains(struct cpuset *cur)
@@ -781,7 +783,7 @@ static void update_cpu_domains(struct cpuset *cur)
781 if (is_cpu_exclusive(c)) 783 if (is_cpu_exclusive(c))
782 cpus_andnot(pspan, pspan, c->cpus_allowed); 784 cpus_andnot(pspan, pspan, c->cpus_allowed);
783 } 785 }
784 if (is_removed(cur) || !is_cpu_exclusive(cur)) { 786 if (!is_cpu_exclusive(cur)) {
785 cpus_or(pspan, pspan, cur->cpus_allowed); 787 cpus_or(pspan, pspan, cur->cpus_allowed);
786 if (cpus_equal(pspan, cur->cpus_allowed)) 788 if (cpus_equal(pspan, cur->cpus_allowed))
787 return; 789 return;
@@ -1917,6 +1919,17 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1917 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1919 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1918} 1920}
1919 1921
1922/*
1923 * Locking note on the strange update_flag() call below:
1924 *
1925 * If the cpuset being removed is marked cpu_exclusive, then simulate
1926 * turning cpu_exclusive off, which will call update_cpu_domains().
1927 * The lock_cpu_hotplug() call in update_cpu_domains() must not be
1928 * made while holding callback_mutex. Elsewhere the kernel nests
1929 * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
1930 * nesting would risk an ABBA deadlock.
1931 */
1932
1920static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1933static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1921{ 1934{
1922 struct cpuset *cs = dentry->d_fsdata; 1935 struct cpuset *cs = dentry->d_fsdata;
@@ -1936,11 +1949,16 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1936 mutex_unlock(&manage_mutex); 1949 mutex_unlock(&manage_mutex);
1937 return -EBUSY; 1950 return -EBUSY;
1938 } 1951 }
1952 if (is_cpu_exclusive(cs)) {
1953 int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
1954 if (retval < 0) {
1955 mutex_unlock(&manage_mutex);
1956 return retval;
1957 }
1958 }
1939 parent = cs->parent; 1959 parent = cs->parent;
1940 mutex_lock(&callback_mutex); 1960 mutex_lock(&callback_mutex);
1941 set_bit(CS_REMOVED, &cs->flags); 1961 set_bit(CS_REMOVED, &cs->flags);
1942 if (is_cpu_exclusive(cs))
1943 update_cpu_domains(cs);
1944 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1962 list_del(&cs->sibling); /* delete my sibling from parent->children */
1945 spin_lock(&cs->dentry->d_lock); 1963 spin_lock(&cs->dentry->d_lock);
1946 d = dget(cs->dentry); 1964 d = dget(cs->dentry);
diff --git a/kernel/futex.c b/kernel/futex.c
index cf0c8e21d1ab..dda2049692a2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -415,15 +415,15 @@ out_unlock:
415 */ 415 */
416void exit_pi_state_list(struct task_struct *curr) 416void exit_pi_state_list(struct task_struct *curr)
417{ 417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list; 418 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state; 419 struct futex_pi_state *pi_state;
420 struct futex_hash_bucket *hb;
421 union futex_key key; 421 union futex_key key;
422 422
423 /* 423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on 424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful 425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs 426 * versus waiters unqueueing themselves:
427 */ 427 */
428 spin_lock_irq(&curr->pi_lock); 428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) { 429 while (!list_empty(head)) {
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr)
431 next = head->next; 431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list); 432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key; 433 key = pi_state->key;
434 hb = hash_futex(&key);
434 spin_unlock_irq(&curr->pi_lock); 435 spin_unlock_irq(&curr->pi_lock);
435 436
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock); 437 spin_lock(&hb->lock);
438 438
439 spin_lock_irq(&curr->pi_lock); 439 spin_lock_irq(&curr->pi_lock);
440 /*
441 * We dropped the pi-lock, so re-check whether this
442 * task still owns the PI-state:
443 */
440 if (head->next != next) { 444 if (head->next != next) {
441 spin_unlock(&hb->lock); 445 spin_unlock(&hb->lock);
442 continue; 446 continue;
443 } 447 }
444 448
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr); 449 WARN_ON(pi_state->owner != curr);
448 450 WARN_ON(list_empty(&pi_state->list));
451 list_del_init(&pi_state->list);
449 pi_state->owner = NULL; 452 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock); 453 spin_unlock_irq(&curr->pi_lock);
451 454
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
470 head = &hb->chain; 473 head = &hb->chain;
471 474
472 list_for_each_entry_safe(this, next, head, list) { 475 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) { 476 if (match_futex(&this->key, &me->key)) {
474 /* 477 /*
475 * Another waiter already exists - bump up 478 * Another waiter already exists - bump up
476 * the refcount and return its pi_state: 479 * the refcount and return its pi_state:
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
482 if (unlikely(!pi_state)) 485 if (unlikely(!pi_state))
483 return -EINVAL; 486 return -EINVAL;
484 487
488 WARN_ON(!atomic_read(&pi_state->refcount));
489
485 atomic_inc(&pi_state->refcount); 490 atomic_inc(&pi_state->refcount);
486 me->pi_state = pi_state; 491 me->pi_state = pi_state;
487 492
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
490 } 495 }
491 496
492 /* 497 /*
493 * We are the first waiter - try to look up the real owner and 498 * We are the first waiter - try to look up the real owner and attach
494 * attach the new pi_state to it: 499 * the new pi_state to it, but bail out when the owner died bit is set
500 * and TID = 0:
495 */ 501 */
496 pid = uval & FUTEX_TID_MASK; 502 pid = uval & FUTEX_TID_MASK;
503 if (!pid && (uval & FUTEX_OWNER_DIED))
504 return -ESRCH;
497 p = futex_find_get_task(pid); 505 p = futex_find_get_task(pid);
498 if (!p) 506 if (!p)
499 return -ESRCH; 507 return -ESRCH;
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
510 pi_state->key = me->key; 518 pi_state->key = me->key;
511 519
512 spin_lock_irq(&p->pi_lock); 520 spin_lock_irq(&p->pi_lock);
521 WARN_ON(!list_empty(&pi_state->list));
513 list_add(&pi_state->list, &p->pi_state_list); 522 list_add(&pi_state->list, &p->pi_state_list);
514 pi_state->owner = p; 523 pi_state->owner = p;
515 spin_unlock_irq(&p->pi_lock); 524 spin_unlock_irq(&p->pi_lock);
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
573 * kept enabled while there is PI state around. We must also 582 * kept enabled while there is PI state around. We must also
574 * preserve the owner died bit.) 583 * preserve the owner died bit.)
575 */ 584 */
576 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid;
577 587
578 inc_preempt_count(); 588 inc_preempt_count();
579 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
580 dec_preempt_count(); 590 dec_preempt_count();
591 if (curval == -EFAULT)
592 return -EFAULT;
593 if (curval != uval)
594 return -EINVAL;
595 }
581 596
582 if (curval == -EFAULT) 597 spin_lock_irq(&pi_state->owner->pi_lock);
583 return -EFAULT; 598 WARN_ON(list_empty(&pi_state->list));
584 if (curval != uval) 599 list_del_init(&pi_state->list);
585 return -EINVAL; 600 spin_unlock_irq(&pi_state->owner->pi_lock);
586 601
587 list_del_init(&pi_state->owner->pi_state_list); 602 spin_lock_irq(&new_owner->pi_lock);
603 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &new_owner->pi_state_list); 604 list_add(&pi_state->list, &new_owner->pi_state_list);
589 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
606 spin_unlock_irq(&new_owner->pi_lock);
607
590 rt_mutex_unlock(&pi_state->pi_mutex); 608 rt_mutex_unlock(&pi_state->pi_mutex);
591 609
592 return 0; 610 return 0;
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1236 /* Owner died? */ 1254 /* Owner died? */
1237 if (q.pi_state->owner != NULL) { 1255 if (q.pi_state->owner != NULL) {
1238 spin_lock_irq(&q.pi_state->owner->pi_lock); 1256 spin_lock_irq(&q.pi_state->owner->pi_lock);
1257 WARN_ON(list_empty(&q.pi_state->list));
1239 list_del_init(&q.pi_state->list); 1258 list_del_init(&q.pi_state->list);
1240 spin_unlock_irq(&q.pi_state->owner->pi_lock); 1259 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1241 } else 1260 } else
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1244 q.pi_state->owner = current; 1263 q.pi_state->owner = current;
1245 1264
1246 spin_lock_irq(&current->pi_lock); 1265 spin_lock_irq(&current->pi_lock);
1266 WARN_ON(!list_empty(&q.pi_state->list));
1247 list_add(&q.pi_state->list, &current->pi_state_list); 1267 list_add(&q.pi_state->list, &current->pi_state_list);
1248 spin_unlock_irq(&current->pi_lock); 1268 spin_unlock_irq(&current->pi_lock);
1249 1269
@@ -1427,9 +1447,11 @@ retry_locked:
1427 * again. If it succeeds then we can return without waking 1447 * again. If it succeeds then we can return without waking
1428 * anyone else up: 1448 * anyone else up:
1429 */ 1449 */
1430 inc_preempt_count(); 1450 if (!(uval & FUTEX_OWNER_DIED)) {
1431 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1451 inc_preempt_count();
1432 dec_preempt_count(); 1452 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1453 dec_preempt_count();
1454 }
1433 1455
1434 if (unlikely(uval == -EFAULT)) 1456 if (unlikely(uval == -EFAULT))
1435 goto pi_faulted; 1457 goto pi_faulted;
@@ -1462,9 +1484,11 @@ retry_locked:
1462 /* 1484 /*
1463 * No waiters - kernel unlocks the futex: 1485 * No waiters - kernel unlocks the futex:
1464 */ 1486 */
1465 ret = unlock_futex_pi(uaddr, uval); 1487 if (!(uval & FUTEX_OWNER_DIED)) {
1466 if (ret == -EFAULT) 1488 ret = unlock_futex_pi(uaddr, uval);
1467 goto pi_faulted; 1489 if (ret == -EFAULT)
1490 goto pi_faulted;
1491 }
1468 1492
1469out_unlock: 1493out_unlock:
1470 spin_unlock(&hb->lock); 1494 spin_unlock(&hb->lock);
@@ -1683,9 +1707,9 @@ err_unlock:
1683 * Process a futex-list entry, check whether it's owned by the 1707 * Process a futex-list entry, check whether it's owned by the
1684 * dying task, and do notification if so: 1708 * dying task, and do notification if so:
1685 */ 1709 */
1686int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1710int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1687{ 1711{
1688 u32 uval, nval; 1712 u32 uval, nval, mval;
1689 1713
1690retry: 1714retry:
1691 if (get_user(uval, uaddr)) 1715 if (get_user(uval, uaddr))
@@ -1702,21 +1726,45 @@ retry:
1702 * thread-death.) The rest of the cleanup is done in 1726 * thread-death.) The rest of the cleanup is done in
1703 * userspace. 1727 * userspace.
1704 */ 1728 */
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 1729 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1706 uval | FUTEX_OWNER_DIED); 1730 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1731
1707 if (nval == -EFAULT) 1732 if (nval == -EFAULT)
1708 return -1; 1733 return -1;
1709 1734
1710 if (nval != uval) 1735 if (nval != uval)
1711 goto retry; 1736 goto retry;
1712 1737
1713 if (uval & FUTEX_WAITERS) 1738 /*
1714 futex_wake(uaddr, 1); 1739 * Wake robust non-PI futexes here. The wakeup of
1740 * PI futexes happens in exit_pi_state():
1741 */
1742 if (!pi) {
1743 if (uval & FUTEX_WAITERS)
1744 futex_wake(uaddr, 1);
1745 }
1715 } 1746 }
1716 return 0; 1747 return 0;
1717} 1748}
1718 1749
1719/* 1750/*
1751 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1752 */
1753static inline int fetch_robust_entry(struct robust_list __user **entry,
1754 struct robust_list __user **head, int *pi)
1755{
1756 unsigned long uentry;
1757
1758 if (get_user(uentry, (unsigned long *)head))
1759 return -EFAULT;
1760
1761 *entry = (void *)(uentry & ~1UL);
1762 *pi = uentry & 1;
1763
1764 return 0;
1765}
1766
1767/*
1720 * Walk curr->robust_list (very carefully, it's a userspace list!) 1768 * Walk curr->robust_list (very carefully, it's a userspace list!)
1721 * and mark any locks found there dead, and notify any waiters. 1769 * and mark any locks found there dead, and notify any waiters.
1722 * 1770 *
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr)
1726{ 1774{
1727 struct robust_list_head __user *head = curr->robust_list; 1775 struct robust_list_head __user *head = curr->robust_list;
1728 struct robust_list __user *entry, *pending; 1776 struct robust_list __user *entry, *pending;
1729 unsigned int limit = ROBUST_LIST_LIMIT; 1777 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1730 unsigned long futex_offset; 1778 unsigned long futex_offset;
1731 1779
1732 /* 1780 /*
1733 * Fetch the list head (which was registered earlier, via 1781 * Fetch the list head (which was registered earlier, via
1734 * sys_set_robust_list()): 1782 * sys_set_robust_list()):
1735 */ 1783 */
1736 if (get_user(entry, &head->list.next)) 1784 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1737 return; 1785 return;
1738 /* 1786 /*
1739 * Fetch the relative futex offset: 1787 * Fetch the relative futex offset:
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr)
1744 * Fetch any possibly pending lock-add first, and handle it 1792 * Fetch any possibly pending lock-add first, and handle it
1745 * if it exists: 1793 * if it exists:
1746 */ 1794 */
1747 if (get_user(pending, &head->list_op_pending)) 1795 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1748 return; 1796 return;
1797
1749 if (pending) 1798 if (pending)
1750 handle_futex_death((void *)pending + futex_offset, curr); 1799 handle_futex_death((void *)pending + futex_offset, curr, pip);
1751 1800
1752 while (entry != &head->list) { 1801 while (entry != &head->list) {
1753 /* 1802 /*
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr)
1756 */ 1805 */
1757 if (entry != pending) 1806 if (entry != pending)
1758 if (handle_futex_death((void *)entry + futex_offset, 1807 if (handle_futex_death((void *)entry + futex_offset,
1759 curr)) 1808 curr, pi))
1760 return; 1809 return;
1761 /* 1810 /*
1762 * Fetch the next entry in the list: 1811 * Fetch the next entry in the list:
1763 */ 1812 */
1764 if (get_user(entry, &entry->next)) 1813 if (fetch_robust_entry(&entry, &entry->next, &pi))
1765 return; 1814 return;
1766 /* 1815 /*
1767 * Avoid excessively long or circular lists: 1816 * Avoid excessively long or circular lists:
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d1d92b441fb7..d1aab1a452cc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -12,6 +12,23 @@
12 12
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15
16/*
17 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
18 */
19static inline int
20fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
21 compat_uptr_t *head, int *pi)
22{
23 if (get_user(*uentry, head))
24 return -EFAULT;
25
26 *entry = compat_ptr((*uentry) & ~1);
27 *pi = (unsigned int)(*uentry) & 1;
28
29 return 0;
30}
31
15/* 32/*
16 * Walk curr->robust_list (very carefully, it's a userspace list!) 33 * Walk curr->robust_list (very carefully, it's a userspace list!)
17 * and mark any locks found there dead, and notify any waiters. 34 * and mark any locks found there dead, and notify any waiters.
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr)
22{ 39{
23 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
24 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi;
25 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, upending;
26 unsigned int limit = ROBUST_LIST_LIMIT;
27 compat_long_t futex_offset; 44 compat_long_t futex_offset;
28 45
29 /* 46 /*
30 * Fetch the list head (which was registered earlier, via 47 * Fetch the list head (which was registered earlier, via
31 * sys_set_robust_list()): 48 * sys_set_robust_list()):
32 */ 49 */
33 if (get_user(uentry, &head->list.next)) 50 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
34 return; 51 return;
35 entry = compat_ptr(uentry);
36 /* 52 /*
37 * Fetch the relative futex offset: 53 * Fetch the relative futex offset:
38 */ 54 */
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr)
42 * Fetch any possibly pending lock-add first, and handle it 58 * Fetch any possibly pending lock-add first, and handle it
43 * if it exists: 59 * if it exists:
44 */ 60 */
45 if (get_user(upending, &head->list_op_pending)) 61 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pi))
46 return; 63 return;
47 pending = compat_ptr(upending);
48 if (upending) 64 if (upending)
49 handle_futex_death((void *)pending + futex_offset, curr); 65 handle_futex_death((void *)pending + futex_offset, curr, pi);
50 66
51 while (compat_ptr(uentry) != &head->list) { 67 while (compat_ptr(uentry) != &head->list) {
52 /* 68 /*
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr)
55 */ 71 */
56 if (entry != pending) 72 if (entry != pending)
57 if (handle_futex_death((void *)entry + futex_offset, 73 if (handle_futex_death((void *)entry + futex_offset,
58 curr)) 74 curr, pi))
59 return; 75 return;
60 76
61 /* 77 /*
62 * Fetch the next entry in the list: 78 * Fetch the next entry in the list:
63 */ 79 */
64 if (get_user(uentry, (compat_uptr_t *)&entry->next)) 80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t *)&entry->next, &pi))
65 return; 82 return;
66 entry = compat_ptr(uentry);
67 /* 83 /*
68 * Avoid excessively long or circular lists: 84 * Avoid excessively long or circular lists:
69 */ 85 */