aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/word-at-a-time.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/word-at-a-time.h')
-rw-r--r--arch/x86/include/asm/word-at-a-time.h46
1 files changed, 46 insertions, 0 deletions
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6fe6767b7124
--- /dev/null
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -0,0 +1,46 @@
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4/*
5 * This is largely generic for little-endian machines, but the
6 * optimal byte mask counting is probably going to be something
7 * that is architecture-specific. If you have a reliably fast
8 * bit count instruction, that might be better than the multiply
9 * and shift, for example.
10 */
11
12#ifdef CONFIG_64BIT
13
14/*
15 * Jan Achrenius on G+: microoptimized version of
16 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
17 * that works for the bytemasks without having to
18 * mask them first.
19 */
20static inline long count_masked_bytes(unsigned long mask)
21{
22 return mask*0x0001020304050608ul >> 56;
23}
24
25#else /* 32-bit case */
26
27/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
28static inline long count_masked_bytes(long mask)
29{
30 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
31 long a = (0x0ff0001+mask) >> 23;
32 /* Fix the 1 for 00 case */
33 return a & mask;
34}
35
36#endif
37
38#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
39
40/* Return the high bit set in the first byte that is a zero */
41static inline unsigned long has_zero(unsigned long a)
42{
43 return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
44}
45
46#endif /* _ASM_WORD_AT_A_TIME_H */