aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave@hiauly1.hia.nrc.ca>2011-10-09 16:40:10 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-02-27 10:35:08 -0500
commit8b232816057702d5c9ffeac1a65118f504524fea (patch)
tree7cc6c65fbd2613c1216c2625ed27e2f1476a2997
parent500dd2370e77c9551ba298bdeeb91b02d8402199 (diff)
[PARISC] futex: Use same lock set as lws calls
In debugging the failure of the glibc tst-cond18 test on parisc, I realized that futexes need to use the same locks the lws calls. This fixes all the pthread 'cond' tests. Sadly, there are still problems with thread cancellation. [jejb: checkpatch fixes] Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r--arch/parisc/include/asm/futex.h31
1 files changed, 27 insertions, 4 deletions
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 2388bdb32832..49df14805a9b 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -8,6 +8,29 @@
8#include <asm/atomic.h> 8#include <asm/atomic.h>
9#include <asm/errno.h> 9#include <asm/errno.h>
10 10
11/* The following has to match the LWS code in syscall.S. We have
12 sixteen four-word locks. */
13
14static inline void
15_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
16{
17 extern u32 lws_lock_start[];
18 long index = ((long)uaddr & 0xf0) >> 2;
19 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
20 local_irq_save(*flags);
21 arch_spin_lock(s);
22}
23
24static inline void
25_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
26{
27 extern u32 lws_lock_start[];
28 long index = ((long)uaddr & 0xf0) >> 2;
29 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
30 arch_spin_unlock(s);
31 local_irq_restore(*flags);
32}
33
11static inline int 34static inline int
12futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 35futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
13{ 36{
@@ -26,7 +49,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
26 49
27 pagefault_disable(); 50 pagefault_disable();
28 51
29 _atomic_spin_lock_irqsave(uaddr, flags); 52 _futex_spin_lock_irqsave(uaddr, &flags);
30 53
31 switch (op) { 54 switch (op) {
32 case FUTEX_OP_SET: 55 case FUTEX_OP_SET:
@@ -71,7 +94,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
71 ret = -ENOSYS; 94 ret = -ENOSYS;
72 } 95 }
73 96
74 _atomic_spin_unlock_irqrestore(uaddr, flags); 97 _futex_spin_unlock_irqrestore(uaddr, &flags);
75 98
76 pagefault_enable(); 99 pagefault_enable();
77 100
@@ -113,7 +136,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
113 * address. This should scale to a couple of CPUs. 136 * address. This should scale to a couple of CPUs.
114 */ 137 */
115 138
116 _atomic_spin_lock_irqsave(uaddr, flags); 139 _futex_spin_lock_irqsave(uaddr, &flags);
117 140
118 ret = get_user(val, uaddr); 141 ret = get_user(val, uaddr);
119 142
@@ -122,7 +145,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
122 145
123 *uval = val; 146 *uval = val;
124 147
125 _atomic_spin_unlock_irqrestore(uaddr, flags); 148 _futex_spin_unlock_irqrestore(uaddr, &flags);
126 149
127 return ret; 150 return ret;
128} 151}