aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2010-10-05 11:55:29 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-10-05 12:49:35 -0400
commitde5bbad6770882209b0ac58b0ba9259a98cfb953 (patch)
treeda258f026fbd5342a61ce858fc2de5ed1e233932 /arch/tile
parent2bfc96a127bc1cc94d26bfaa40159966064f9c8c (diff)
tile: replace some BUG_ON checks with BUILD_BUG_ON checks
Some BUG_ON checks can be detected at compile time rather than at runtime. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/lib/atomic_32.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 8040b42a8ee..7a5cc706ab6 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
300#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 300#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
301 301
302 /* Validate power-of-two and "bigger than cpus" assumption */ 302 /* Validate power-of-two and "bigger than cpus" assumption */
303 BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); 303 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
304 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); 304 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
305 305
306 /* 306 /*
@@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
314 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); 314 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
315 315
316 /* The locks must all fit on one page. */ 316 /* The locks must all fit on one page. */
317 BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); 317 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
318 318
319 /* 319 /*
320 * We use the page offset of the atomic value's address as 320 * We use the page offset of the atomic value's address as
321 * an index into atomic_locks, excluding the low 3 bits. 321 * an index into atomic_locks, excluding the low 3 bits.
322 * That should not produce more indices than ATOMIC_HASH_SIZE. 322 * That should not produce more indices than ATOMIC_HASH_SIZE.
323 */ 323 */
324 BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); 324 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
325 325
326#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 326#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
327 327
328 /* The futex code makes this assumption, so we validate it here. */ 328 /* The futex code makes this assumption, so we validate it here. */
329 BUG_ON(sizeof(atomic_t) != sizeof(int)); 329 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
330} 330}