aboutsummaryrefslogtreecommitdiffstats
path: root/lib/find_next_bit.c
diff options
context:
space:
mode:
authorAlexander van Heukelum <heukelum@mailshack.com>2008-03-11 11:17:19 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-26 13:21:16 -0400
commit64970b68d2b3ed32b964b0b30b1b98518fde388e (patch)
tree7d8eb5ea3ab1a841afa0f7ae1c65e7be4a9ca690 /lib/find_next_bit.c
parent60b6783a044a55273b637983f52965c2808a6b86 (diff)
x86, generic: optimize find_next_(zero_)bit for small constant-size bitmaps
This moves an optimization for searching constant-sized small bitmaps form x86_64-specific to generic code. On an i386 defconfig (the x86#testing one), the size of vmlinux hardly changes with this applied. I have observed only four places where this optimization avoids a call into find_next_bit: In the functions return_unused_surplus_pages, alloc_fresh_huge_page, and adjust_pool_surplus, this patch avoids a call for a 1-bit bitmap. In __next_cpu a call is avoided for a 32-bit bitmap. That's it. On x86_64, 52 locations are optimized with a minimal increase in code size: Current #testing defconfig: 146 x bsf, 27 x find_next_*bit text data bss dec hex filename 5392637 846592 724424 6963653 6a41c5 vmlinux After removing the x86_64 specific optimization for find_next_*bit: 94 x bsf, 79 x find_next_*bit text data bss dec hex filename 5392358 846592 724424 6963374 6a40ae vmlinux After this patch (making the optimization generic): 146 x bsf, 27 x find_next_*bit text data bss dec hex filename 5392396 846592 724424 6963412 6a40d4 vmlinux [ tglx@linutronix.de: build fixes ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib/find_next_bit.c')
-rw-r--r--lib/find_next_bit.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 5820e072b890..ce94c4c92d10 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -15,17 +15,12 @@
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16 16
17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) 17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
18#undef find_next_bit 18
19#undef find_next_zero_bit 19/*
20 20 * Find the next set bit in a memory region.
21/**
22 * find_next_bit - find the next set bit in a memory region
23 * @addr: The address to base the search on
24 * @offset: The bitnumber to start searching at
25 * @size: The maximum size to search
26 */ 21 */
27unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 22unsigned long __find_next_bit(const unsigned long *addr,
28 unsigned long offset) 23 unsigned long size, unsigned long offset)
29{ 24{
30 const unsigned long *p = addr + BITOP_WORD(offset); 25 const unsigned long *p = addr + BITOP_WORD(offset);
31 unsigned long result = offset & ~(BITS_PER_LONG-1); 26 unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -62,15 +57,14 @@ found_first:
62found_middle: 57found_middle:
63 return result + __ffs(tmp); 58 return result + __ffs(tmp);
64} 59}
65 60EXPORT_SYMBOL(__find_next_bit);
66EXPORT_SYMBOL(find_next_bit);
67 61
68/* 62/*
69 * This implementation of find_{first,next}_zero_bit was stolen from 63 * This implementation of find_{first,next}_zero_bit was stolen from
70 * Linus' asm-alpha/bitops.h. 64 * Linus' asm-alpha/bitops.h.
71 */ 65 */
72unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, 66unsigned long __find_next_zero_bit(const unsigned long *addr,
73 unsigned long offset) 67 unsigned long size, unsigned long offset)
74{ 68{
75 const unsigned long *p = addr + BITOP_WORD(offset); 69 const unsigned long *p = addr + BITOP_WORD(offset);
76 unsigned long result = offset & ~(BITS_PER_LONG-1); 70 unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -107,8 +101,7 @@ found_first:
107found_middle: 101found_middle:
108 return result + ffz(tmp); 102 return result + ffz(tmp);
109} 103}
110 104EXPORT_SYMBOL(__find_next_zero_bit);
111EXPORT_SYMBOL(find_next_zero_bit);
112 105
113#ifdef __BIG_ENDIAN 106#ifdef __BIG_ENDIAN
114 107