diff options
Diffstat (limited to 'arch/tile/lib/atomic_32.c')
-rw-r--r-- | arch/tile/lib/atomic_32.c | 53 |
1 files changed, 18 insertions, 35 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index be1e8acd105d..8040b42a8eea 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c | |||
@@ -18,27 +18,10 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
21 | #include <asm/futex.h> | ||
21 | #include <arch/chip.h> | 22 | #include <arch/chip.h> |
22 | 23 | ||
23 | /* The routines in atomic_asm.S are private, so we only declare them here. */ | 24 | /* See <asm/atomic_32.h> */ |
24 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | ||
25 | int *lock, int o, int n); | ||
26 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | ||
27 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | ||
28 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | ||
29 | int *lock, int o, int n); | ||
30 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | ||
31 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | ||
32 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | ||
33 | |||
34 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | ||
35 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | ||
36 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | ||
37 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | ||
38 | int *lock, u64 o, u64 n); | ||
39 | |||
40 | |||
41 | /* See <asm/atomic.h> */ | ||
42 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | 25 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
43 | 26 | ||
44 | /* | 27 | /* |
@@ -209,7 +192,7 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | |||
209 | EXPORT_SYMBOL(_atomic64_cmpxchg); | 192 | EXPORT_SYMBOL(_atomic64_cmpxchg); |
210 | 193 | ||
211 | 194 | ||
212 | static inline int *__futex_setup(__user int *v) | 195 | static inline int *__futex_setup(int __user *v) |
213 | { | 196 | { |
214 | /* | 197 | /* |
215 | * Issue a prefetch to the counter to bring it into cache. | 198 | * Issue a prefetch to the counter to bring it into cache. |
@@ -217,37 +200,37 @@ static inline int *__futex_setup(__user int *v) | |||
217 | * since it might fault; instead we do a prefetch into the L2. | 200 | * since it might fault; instead we do a prefetch into the L2. |
218 | */ | 201 | */ |
219 | __insn_prefetch(v); | 202 | __insn_prefetch(v); |
220 | return __atomic_hashed_lock(v); | 203 | return __atomic_hashed_lock((int __force *)v); |
221 | } | 204 | } |
222 | 205 | ||
223 | struct __get_user futex_set(int *v, int i) | 206 | struct __get_user futex_set(int __user *v, int i) |
224 | { | 207 | { |
225 | return __atomic_xchg(v, __futex_setup(v), i); | 208 | return __atomic_xchg((int __force *)v, __futex_setup(v), i); |
226 | } | 209 | } |
227 | 210 | ||
228 | struct __get_user futex_add(int *v, int n) | 211 | struct __get_user futex_add(int __user *v, int n) |
229 | { | 212 | { |
230 | return __atomic_xchg_add(v, __futex_setup(v), n); | 213 | return __atomic_xchg_add((int __force *)v, __futex_setup(v), n); |
231 | } | 214 | } |
232 | 215 | ||
233 | struct __get_user futex_or(int *v, int n) | 216 | struct __get_user futex_or(int __user *v, int n) |
234 | { | 217 | { |
235 | return __atomic_or(v, __futex_setup(v), n); | 218 | return __atomic_or((int __force *)v, __futex_setup(v), n); |
236 | } | 219 | } |
237 | 220 | ||
238 | struct __get_user futex_andn(int *v, int n) | 221 | struct __get_user futex_andn(int __user *v, int n) |
239 | { | 222 | { |
240 | return __atomic_andn(v, __futex_setup(v), n); | 223 | return __atomic_andn((int __force *)v, __futex_setup(v), n); |
241 | } | 224 | } |
242 | 225 | ||
243 | struct __get_user futex_xor(int *v, int n) | 226 | struct __get_user futex_xor(int __user *v, int n) |
244 | { | 227 | { |
245 | return __atomic_xor(v, __futex_setup(v), n); | 228 | return __atomic_xor((int __force *)v, __futex_setup(v), n); |
246 | } | 229 | } |
247 | 230 | ||
248 | struct __get_user futex_cmpxchg(int *v, int o, int n) | 231 | struct __get_user futex_cmpxchg(int __user *v, int o, int n) |
249 | { | 232 | { |
250 | return __atomic_cmpxchg(v, __futex_setup(v), o, n); | 233 | return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n); |
251 | } | 234 | } |
252 | 235 | ||
253 | /* | 236 | /* |
@@ -260,7 +243,7 @@ struct __get_user futex_cmpxchg(int *v, int o, int n) | |||
260 | * invoked in is the context of the "_atomic_xxx()" routines called | 243 | * invoked in is the context of the "_atomic_xxx()" routines called |
261 | * by the functions in this file. | 244 | * by the functions in this file. |
262 | */ | 245 | */ |
263 | struct __get_user __atomic_bad_address(int *addr) | 246 | struct __get_user __atomic_bad_address(int __user *addr) |
264 | { | 247 | { |
265 | if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) | 248 | if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) |
266 | panic("Bad address used for kernel atomic op: %p\n", addr); | 249 | panic("Bad address used for kernel atomic op: %p\n", addr); |
@@ -271,7 +254,7 @@ struct __get_user __atomic_bad_address(int *addr) | |||
271 | #if CHIP_HAS_CBOX_HOME_MAP() | 254 | #if CHIP_HAS_CBOX_HOME_MAP() |
272 | static int __init noatomichash(char *str) | 255 | static int __init noatomichash(char *str) |
273 | { | 256 | { |
274 | printk("noatomichash is deprecated.\n"); | 257 | pr_warning("noatomichash is deprecated.\n"); |
275 | return 1; | 258 | return 1; |
276 | } | 259 | } |
277 | __setup("noatomichash", noatomichash); | 260 | __setup("noatomichash", noatomichash); |