aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib/find.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-09-18 05:45:36 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-24 11:16:55 -0400
commit746479cdcbb131a0645e8cb4a35d6b26126e3e4c (patch)
tree7cf0fa041804197c14abe21c56bdf27c82274718 /arch/s390/lib/find.c
parent8e6a8285668b139460cc6852bfd58fdbd00c7157 (diff)
s390/bitops: use generic find bit functions / reimplement _left variant
Just like all other architectures we should use out-of-line find bit operations, since the inline variant bloat the size of the kernel image. And also like all other architecures we should only supply optimized variants of the __ffs, ffs, etc. primitives. Therefore this patch removes the inlined s390 find bit functions and uses the generic out-of-line variants instead. The optimization of the primitives follows with the next patch. With this patch also the functions find_first_bit_left() and find_next_bit_left() have been reimplemented, since logically, they are nothing else but a find_first_bit()/find_next_bit() implementation that use an inverted __fls() instead of __ffs(). Also the restriction that these functions only work on machines which support the "flogr" instruction is gone now. This reduces the size of the kernel image (defconfig, -march=z9-109) by 144,482 bytes. Alone the size of the function build_sched_domains() gets reduced from 7 KB to 3,5 KB. We also git rid of unused functions like find_first_bit_le()... Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib/find.c')
-rw-r--r--arch/s390/lib/find.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644
index 000000000000..8963929b06b9
--- /dev/null
+++ b/arch/s390/lib/find.c
@@ -0,0 +1,77 @@
1/*
2 * MSB0 numbered special bitops handling.
3 *
4 * On s390x the bits are numbered:
5 * |0..............63|64............127|128...........191|192...........255|
6 * and on s390:
7 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
8 *
9 * The reason for this bit numbering is the fact that the hardware sets bits
10 * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
11 * from the 'wrong end'.
12 */
13
14#include <linux/compiler.h>
15#include <linux/bitops.h>
16#include <linux/export.h>
17
18unsigned long find_first_bit_left(const unsigned long *addr, unsigned long size)
19{
20 const unsigned long *p = addr;
21 unsigned long result = 0;
22 unsigned long tmp;
23
24 while (size & ~(BITS_PER_LONG - 1)) {
25 if ((tmp = *(p++)))
26 goto found;
27 result += BITS_PER_LONG;
28 size -= BITS_PER_LONG;
29 }
30 if (!size)
31 return result;
32 tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
33 if (!tmp) /* Are any bits set? */
34 return result + size; /* Nope. */
35found:
36 return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
37}
38EXPORT_SYMBOL(find_first_bit_left);
39
40unsigned long find_next_bit_left(const unsigned long *addr, unsigned long size,
41 unsigned long offset)
42{
43 const unsigned long *p = addr + (offset / BITS_PER_LONG);
44 unsigned long result = offset & ~(BITS_PER_LONG - 1);
45 unsigned long tmp;
46
47 if (offset >= size)
48 return size;
49 size -= result;
50 offset %= BITS_PER_LONG;
51 if (offset) {
52 tmp = *(p++);
53 tmp &= (~0UL >> offset);
54 if (size < BITS_PER_LONG)
55 goto found_first;
56 if (tmp)
57 goto found_middle;
58 size -= BITS_PER_LONG;
59 result += BITS_PER_LONG;
60 }
61 while (size & ~(BITS_PER_LONG-1)) {
62 if ((tmp = *(p++)))
63 goto found_middle;
64 result += BITS_PER_LONG;
65 size -= BITS_PER_LONG;
66 }
67 if (!size)
68 return result;
69 tmp = *p;
70found_first:
71 tmp &= (~0UL << (BITS_PER_LONG - size));
72 if (!tmp) /* Are any bits set? */
73 return result + size; /* Nope. */
74found_middle:
75 return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
76}
77EXPORT_SYMBOL(find_next_bit_left);