aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2014-11-07 08:49:37 -0500
committerVineet Gupta <vgupta@synopsys.com>2015-04-13 05:44:57 -0400
commitde60c1a1849c57e864f02f0d921993982b1648f8 (patch)
tree869ad7203ec49c483fcc2d5a89ace26337a67dda /arch/arc/include/asm
parent0dfb8ec70fd67be02096eaf9898feb94950d6f06 (diff)
ARC: fold __builtin_constant_p() into test_bit()
This makes test_bit() more like its siblings *_bit() routines. Also add some comments about the constant @nr micro-optimization Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/bitops.h31
1 files changed, 18 insertions, 13 deletions
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 1a5bf07eefe2..4051e9525939 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -32,6 +32,20 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *m)
32 32
33 m += nr >> 5; 33 m += nr >> 5;
34 34
35 /*
36 * ARC ISA micro-optimization:
37 *
38 * Instructions dealing with bitpos only consider lower 5 bits (0-31)
39 * e.g (x << 33) is handled like (x << 1) by ASL instruction
40 * (mem pointer still needs adjustment to point to next word)
41 *
42 * Hence the masking to clamp @nr arg can be elided in general.
43 *
44 * However if @nr is a constant (above assumed it in a register),
45 * and greater than 31, gcc can optimize away (x << 33) to 0,
46 * as overflow, given the 32-bit ISA. Thus masking needs to be done
47 * for constant @nr, but no code is generated due to const prop.
48 */
35 if (__builtin_constant_p(nr)) 49 if (__builtin_constant_p(nr))
36 nr &= 0x1f; 50 nr &= 0x1f;
37 51
@@ -374,29 +388,20 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *m)
374 * This routine doesn't need to be atomic. 388 * This routine doesn't need to be atomic.
375 */ 389 */
376static inline int 390static inline int
377__constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 391test_bit(unsigned int nr, const volatile unsigned long *addr)
378{
379 return ((1UL << (nr & 31)) &
380 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
381}
382
383static inline int
384__test_bit(unsigned int nr, const volatile unsigned long *addr)
385{ 392{
386 unsigned long mask; 393 unsigned long mask;
387 394
388 addr += nr >> 5; 395 addr += nr >> 5;
389 396
390 /* ARC700 only considers 5 bits in bit-fiddling insn */ 397 if (__builtin_constant_p(nr))
398 nr &= 0x1f;
399
391 mask = 1 << nr; 400 mask = 1 << nr;
392 401
393 return ((mask & *addr) != 0); 402 return ((mask & *addr) != 0);
394} 403}
395 404
396#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
397 __constant_test_bit((nr), (addr)) : \
398 __test_bit((nr), (addr)))
399
400/* 405/*
401 * Count the number of zeros, starting from MSB 406 * Count the number of zeros, starting from MSB
402 * Helper for fls( ) friends 407 * Helper for fls( ) friends