diff options
author | Carlos O'Donell <carlos@systemhalted.org> | 2011-07-08 17:27:00 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2011-07-18 14:08:00 -0400 |
commit | d9ba5fe76d604514444b1ea0a19f38c6196a46e3 (patch) | |
tree | abf1db68a2b44512b18d23b52a64f0921ba56eec | |
parent | 1765a367a3b5d5e9692949b0c5ec933eff2c0701 (diff) |
[PARISC] Fix futex support
Implements futex op support and makes futex cmpxchg atomic.
Tested on 64-bit SMP kernel running on 2 x PA8700s.
[jejb: checkpatch fixes]
Signed-off-by: Carlos O'Donell <carlos@systemhalted.org>
Tested-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@kernel.org
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r-- | arch/parisc/include/asm/futex.h | 66 |
1 files changed, 60 insertions, 6 deletions
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 67a33cc27ef2..2388bdb32832 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h | |||
@@ -5,11 +5,14 @@ | |||
5 | 5 | ||
6 | #include <linux/futex.h> | 6 | #include <linux/futex.h> |
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <asm/atomic.h> | ||
8 | #include <asm/errno.h> | 9 | #include <asm/errno.h> |
9 | 10 | ||
10 | static inline int | 11 | static inline int |
11 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | 12 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
12 | { | 13 | { |
14 | unsigned long int flags; | ||
15 | u32 val; | ||
13 | int op = (encoded_op >> 28) & 7; | 16 | int op = (encoded_op >> 28) & 7; |
14 | int cmp = (encoded_op >> 24) & 15; | 17 | int cmp = (encoded_op >> 24) & 15; |
15 | int oparg = (encoded_op << 8) >> 20; | 18 | int oparg = (encoded_op << 8) >> 20; |
@@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 21 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
19 | oparg = 1 << oparg; | 22 | oparg = 1 << oparg; |
20 | 23 | ||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | 24 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr))) |
22 | return -EFAULT; | 25 | return -EFAULT; |
23 | 26 | ||
24 | pagefault_disable(); | 27 | pagefault_disable(); |
25 | 28 | ||
29 | _atomic_spin_lock_irqsave(uaddr, flags); | ||
30 | |||
26 | switch (op) { | 31 | switch (op) { |
27 | case FUTEX_OP_SET: | 32 | case FUTEX_OP_SET: |
33 | /* *(int *)UADDR2 = OPARG; */ | ||
34 | ret = get_user(oldval, uaddr); | ||
35 | if (!ret) | ||
36 | ret = put_user(oparg, uaddr); | ||
37 | break; | ||
28 | case FUTEX_OP_ADD: | 38 | case FUTEX_OP_ADD: |
39 | /* *(int *)UADDR2 += OPARG; */ | ||
40 | ret = get_user(oldval, uaddr); | ||
41 | if (!ret) { | ||
42 | val = oldval + oparg; | ||
43 | ret = put_user(val, uaddr); | ||
44 | } | ||
45 | break; | ||
29 | case FUTEX_OP_OR: | 46 | case FUTEX_OP_OR: |
47 | /* *(int *)UADDR2 |= OPARG; */ | ||
48 | ret = get_user(oldval, uaddr); | ||
49 | if (!ret) { | ||
50 | val = oldval | oparg; | ||
51 | ret = put_user(val, uaddr); | ||
52 | } | ||
53 | break; | ||
30 | case FUTEX_OP_ANDN: | 54 | case FUTEX_OP_ANDN: |
55 | /* *(int *)UADDR2 &= ~OPARG; */ | ||
56 | ret = get_user(oldval, uaddr); | ||
57 | if (!ret) { | ||
58 | val = oldval & ~oparg; | ||
59 | ret = put_user(val, uaddr); | ||
60 | } | ||
61 | break; | ||
31 | case FUTEX_OP_XOR: | 62 | case FUTEX_OP_XOR: |
63 | /* *(int *)UADDR2 ^= OPARG; */ | ||
64 | ret = get_user(oldval, uaddr); | ||
65 | if (!ret) { | ||
66 | val = oldval ^ oparg; | ||
67 | ret = put_user(val, uaddr); | ||
68 | } | ||
69 | break; | ||
32 | default: | 70 | default: |
33 | ret = -ENOSYS; | 71 | ret = -ENOSYS; |
34 | } | 72 | } |
35 | 73 | ||
74 | _atomic_spin_unlock_irqrestore(uaddr, flags); | ||
75 | |||
36 | pagefault_enable(); | 76 | pagefault_enable(); |
37 | 77 | ||
38 | if (!ret) { | 78 | if (!ret) { |
@@ -54,7 +94,9 @@ static inline int | |||
54 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 94 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
55 | u32 oldval, u32 newval) | 95 | u32 oldval, u32 newval) |
56 | { | 96 | { |
97 | int ret; | ||
57 | u32 val; | 98 | u32 val; |
99 | unsigned long flags; | ||
58 | 100 | ||
59 | /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is | 101 | /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is |
60 | * our gateway page, and causes no end of trouble... | 102 | * our gateway page, and causes no end of trouble... |
@@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
65 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 107 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
66 | return -EFAULT; | 108 | return -EFAULT; |
67 | 109 | ||
68 | if (get_user(val, uaddr)) | 110 | /* HPPA has no cmpxchg in hardware and therefore the |
69 | return -EFAULT; | 111 | * best we can do here is use an array of locks. The |
70 | if (val == oldval && put_user(newval, uaddr)) | 112 | * lock selected is based on a hash of the userspace |
71 | return -EFAULT; | 113 | * address. This should scale to a couple of CPUs. |
114 | */ | ||
115 | |||
116 | _atomic_spin_lock_irqsave(uaddr, flags); | ||
117 | |||
118 | ret = get_user(val, uaddr); | ||
119 | |||
120 | if (!ret && val == oldval) | ||
121 | ret = put_user(newval, uaddr); | ||
122 | |||
72 | *uval = val; | 123 | *uval = val; |
73 | return 0; | 124 | |
125 | _atomic_spin_unlock_irqrestore(uaddr, flags); | ||
126 | |||
127 | return ret; | ||
74 | } | 128 | } |
75 | 129 | ||
76 | #endif /*__KERNEL__*/ | 130 | #endif /*__KERNEL__*/ |