aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/lib/atomic_32.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/lib/atomic_32.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/lib/atomic_32.c')
-rw-r--r--arch/tile/lib/atomic_32.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 8040b42a8eea..46570211df52 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -46,14 +46,13 @@ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
46#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 46#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
47 47
48/* This page is remapped on startup to be hash-for-home. */ 48/* This page is remapped on startup to be hash-for-home. */
49int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */] 49int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
50 __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned")));
51 50
52#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 51#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
53 52
54static inline int *__atomic_hashed_lock(volatile void *v) 53static inline int *__atomic_hashed_lock(volatile void *v)
55{ 54{
56 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec.S */ 55 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
57#if ATOMIC_LOCKS_FOUND_VIA_TABLE() 56#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
58 unsigned long i = 57 unsigned long i =
59 (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long)); 58 (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
@@ -203,32 +202,32 @@ static inline int *__futex_setup(int __user *v)
203 return __atomic_hashed_lock((int __force *)v); 202 return __atomic_hashed_lock((int __force *)v);
204} 203}
205 204
206struct __get_user futex_set(int __user *v, int i) 205struct __get_user futex_set(u32 __user *v, int i)
207{ 206{
208 return __atomic_xchg((int __force *)v, __futex_setup(v), i); 207 return __atomic_xchg((int __force *)v, __futex_setup(v), i);
209} 208}
210 209
211struct __get_user futex_add(int __user *v, int n) 210struct __get_user futex_add(u32 __user *v, int n)
212{ 211{
213 return __atomic_xchg_add((int __force *)v, __futex_setup(v), n); 212 return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
214} 213}
215 214
216struct __get_user futex_or(int __user *v, int n) 215struct __get_user futex_or(u32 __user *v, int n)
217{ 216{
218 return __atomic_or((int __force *)v, __futex_setup(v), n); 217 return __atomic_or((int __force *)v, __futex_setup(v), n);
219} 218}
220 219
221struct __get_user futex_andn(int __user *v, int n) 220struct __get_user futex_andn(u32 __user *v, int n)
222{ 221{
223 return __atomic_andn((int __force *)v, __futex_setup(v), n); 222 return __atomic_andn((int __force *)v, __futex_setup(v), n);
224} 223}
225 224
226struct __get_user futex_xor(int __user *v, int n) 225struct __get_user futex_xor(u32 __user *v, int n)
227{ 226{
228 return __atomic_xor((int __force *)v, __futex_setup(v), n); 227 return __atomic_xor((int __force *)v, __futex_setup(v), n);
229} 228}
230 229
231struct __get_user futex_cmpxchg(int __user *v, int o, int n) 230struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
232{ 231{
233 return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n); 232 return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
234} 233}
@@ -300,7 +299,7 @@ void __init __init_atomic_per_cpu(void)
300#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 299#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
301 300
302 /* Validate power-of-two and "bigger than cpus" assumption */ 301 /* Validate power-of-two and "bigger than cpus" assumption */
303 BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); 302 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
304 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); 303 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
305 304
306 /* 305 /*
@@ -314,17 +313,17 @@ void __init __init_atomic_per_cpu(void)
314 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); 313 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
315 314
316 /* The locks must all fit on one page. */ 315 /* The locks must all fit on one page. */
317 BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); 316 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
318 317
319 /* 318 /*
320 * We use the page offset of the atomic value's address as 319 * We use the page offset of the atomic value's address as
321 * an index into atomic_locks, excluding the low 3 bits. 320 * an index into atomic_locks, excluding the low 3 bits.
322 * That should not produce more indices than ATOMIC_HASH_SIZE. 321 * That should not produce more indices than ATOMIC_HASH_SIZE.
323 */ 322 */
324 BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); 323 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
325 324
326#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 325#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
327 326
328 /* The futex code makes this assumption, so we validate it here. */ 327 /* The futex code makes this assumption, so we validate it here. */
329 BUG_ON(sizeof(atomic_t) != sizeof(int)); 328 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
330} 329}