aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/lib/atomic_32.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-06-25 17:04:17 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-07-06 13:41:51 -0400
commit0707ad30d10110aebc01a5a64fb63f4b32d20b73 (patch)
tree64d8ba73e605ac26e56808d1d77701b3f83cf8b2 /arch/tile/lib/atomic_32.c
parentc78095bd8c77fca2619769ff8efb639fd100e373 (diff)
arch/tile: Miscellaneous cleanup changes.
This commit is primarily changes caused by reviewing "sparse" and "checkpatch" output on our sources, so is somewhat noisy, since things like "printk() -> pr_err()" (or whatever) throughout the codebase tend to get tedious to read. Rather than trying to tease apart precisely which things changed due to which type of code review, this commit includes various cleanups in the code: - sparse: Add declarations in headers for globals. - sparse: Fix __user annotations. - sparse: Using gfp_t consistently instead of int. - sparse: removing functions not actually used. - checkpatch: Clean up printk() warnings by using pr_info(), etc.; also avoid partial-line printks except in bootup code. - checkpatch: Use exposed structs rather than typedefs. - checkpatch: Change some C99 comments to C89 comments. In addition, a couple of minor other changes are rolled in to this commit: - Add support for a "raise" instruction to cause SIGFPE, etc., to be raised. - Remove some compat code that is unnecessary when we fully eliminate some of the deprecated syscalls from the generic syscall ABI. - Update the tile_defconfig to reflect current config contents. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/tile/lib/atomic_32.c')
-rw-r--r--arch/tile/lib/atomic_32.c53
1 files changed, 18 insertions, 35 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index be1e8acd105..8040b42a8ee 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,27 +18,10 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <asm/atomic.h> 20#include <asm/atomic.h>
21#include <asm/futex.h>
21#include <arch/chip.h> 22#include <arch/chip.h>
22 23
23/* The routines in atomic_asm.S are private, so we only declare them here. */ 24/* See <asm/atomic_32.h> */
24extern struct __get_user __atomic_cmpxchg(volatile int *p,
25 int *lock, int o, int n);
26extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
27extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
28extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
29 int *lock, int o, int n);
30extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
31extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
32extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
33
34extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
35extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
36extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
37extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
38 int *lock, u64 o, u64 n);
39
40
41/* See <asm/atomic.h> */
42#if ATOMIC_LOCKS_FOUND_VIA_TABLE() 25#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
43 26
44/* 27/*
@@ -209,7 +192,7 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
209EXPORT_SYMBOL(_atomic64_cmpxchg); 192EXPORT_SYMBOL(_atomic64_cmpxchg);
210 193
211 194
212static inline int *__futex_setup(__user int *v) 195static inline int *__futex_setup(int __user *v)
213{ 196{
214 /* 197 /*
215 * Issue a prefetch to the counter to bring it into cache. 198 * Issue a prefetch to the counter to bring it into cache.
@@ -217,37 +200,37 @@ static inline int *__futex_setup(__user int *v)
217 * since it might fault; instead we do a prefetch into the L2. 200 * since it might fault; instead we do a prefetch into the L2.
218 */ 201 */
219 __insn_prefetch(v); 202 __insn_prefetch(v);
220 return __atomic_hashed_lock(v); 203 return __atomic_hashed_lock((int __force *)v);
221} 204}
222 205
223struct __get_user futex_set(int *v, int i) 206struct __get_user futex_set(int __user *v, int i)
224{ 207{
225 return __atomic_xchg(v, __futex_setup(v), i); 208 return __atomic_xchg((int __force *)v, __futex_setup(v), i);
226} 209}
227 210
228struct __get_user futex_add(int *v, int n) 211struct __get_user futex_add(int __user *v, int n)
229{ 212{
230 return __atomic_xchg_add(v, __futex_setup(v), n); 213 return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
231} 214}
232 215
233struct __get_user futex_or(int *v, int n) 216struct __get_user futex_or(int __user *v, int n)
234{ 217{
235 return __atomic_or(v, __futex_setup(v), n); 218 return __atomic_or((int __force *)v, __futex_setup(v), n);
236} 219}
237 220
238struct __get_user futex_andn(int *v, int n) 221struct __get_user futex_andn(int __user *v, int n)
239{ 222{
240 return __atomic_andn(v, __futex_setup(v), n); 223 return __atomic_andn((int __force *)v, __futex_setup(v), n);
241} 224}
242 225
243struct __get_user futex_xor(int *v, int n) 226struct __get_user futex_xor(int __user *v, int n)
244{ 227{
245 return __atomic_xor(v, __futex_setup(v), n); 228 return __atomic_xor((int __force *)v, __futex_setup(v), n);
246} 229}
247 230
248struct __get_user futex_cmpxchg(int *v, int o, int n) 231struct __get_user futex_cmpxchg(int __user *v, int o, int n)
249{ 232{
250 return __atomic_cmpxchg(v, __futex_setup(v), o, n); 233 return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
251} 234}
252 235
253/* 236/*
@@ -260,7 +243,7 @@ struct __get_user futex_cmpxchg(int *v, int o, int n)
260 * invoked in is the context of the "_atomic_xxx()" routines called 243 * invoked in is the context of the "_atomic_xxx()" routines called
261 * by the functions in this file. 244 * by the functions in this file.
262 */ 245 */
263struct __get_user __atomic_bad_address(int *addr) 246struct __get_user __atomic_bad_address(int __user *addr)
264{ 247{
265 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) 248 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
266 panic("Bad address used for kernel atomic op: %p\n", addr); 249 panic("Bad address used for kernel atomic op: %p\n", addr);
@@ -271,7 +254,7 @@ struct __get_user __atomic_bad_address(int *addr)
271#if CHIP_HAS_CBOX_HOME_MAP() 254#if CHIP_HAS_CBOX_HOME_MAP()
272static int __init noatomichash(char *str) 255static int __init noatomichash(char *str)
273{ 256{
274 printk("noatomichash is deprecated.\n"); 257 pr_warning("noatomichash is deprecated.\n");
275 return 1; 258 return 1;
276} 259}
277__setup("noatomichash", noatomichash); 260__setup("noatomichash", noatomichash);