aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-03-05 11:34:46 -0500
committerH. Peter Anvin <hpa@zytor.com>2010-04-06 18:52:11 -0400
commitd61931d89be506372d01a90d1755f6d0a9fafe2d (patch)
tree652c34238edcb6c558163abc3cd9d6ce7c5f91a5 /arch
parent1527bc8b928dd1399c3d3467dd47d9ede210978a (diff)
x86: Add optimized popcnt variants
Add support for the hardware version of the Hamming weight function, popcnt, present in CPUs which advertize it under CPUID, Function 0x0000_0001_ECX[23]. On CPUs which don't support it, we fallback to the default lib/hweight.c sw versions. A synthetic benchmark comparing popcnt with __sw_hweight64 showed almost a 3x speedup on a F10h machine. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <20100318112015.GC11152@aftab> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/include/asm/alternative.h9
-rw-r--r--arch/x86/include/asm/arch_hweight.h59
-rw-r--r--arch/x86/include/asm/bitops.h4
4 files changed, 73 insertions, 4 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0eacb1ffb421..89d8c54cdd37 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -238,6 +238,11 @@ config X86_32_LAZY_GS
238 def_bool y 238 def_bool y
239 depends on X86_32 && !CC_STACKPROTECTOR 239 depends on X86_32 && !CC_STACKPROTECTOR
240 240
241config ARCH_HWEIGHT_CFLAGS
242 string
243 default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
244 default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
245
241config KTIME_SCALAR 246config KTIME_SCALAR
242 def_bool X86_32 247 def_bool X86_32
243source "init/Kconfig" 248source "init/Kconfig"
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index b09ec55650b3..67dae51e7fd0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -39,9 +39,6 @@
39#define LOCK_PREFIX "" 39#define LOCK_PREFIX ""
40#endif 40#endif
41 41
42/* This must be included *after* the definition of LOCK_PREFIX */
43#include <asm/cpufeature.h>
44
45struct alt_instr { 42struct alt_instr {
46 u8 *instr; /* original instruction */ 43 u8 *instr; /* original instruction */
47 u8 *replacement; 44 u8 *replacement;
@@ -96,6 +93,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
96 ".previous" 93 ".previous"
97 94
98/* 95/*
96 * This must be included *after* the definition of ALTERNATIVE due to
97 * <asm/arch_hweight.h>
98 */
99#include <asm/cpufeature.h>
100
101/*
99 * Alternative instructions for different CPU types or capabilities. 102 * Alternative instructions for different CPU types or capabilities.
100 * 103 *
101 * This allows to use optimized instructions even on generic binary 104 * This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..d1fc3c219ae6
--- /dev/null
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,59 @@
1#ifndef _ASM_X86_HWEIGHT_H
2#define _ASM_X86_HWEIGHT_H
3
4#ifdef CONFIG_64BIT
5/* popcnt %rdi, %rax */
6#define POPCNT ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
7#define REG_IN "D"
8#define REG_OUT "a"
9#else
10/* popcnt %eax, %eax */
11#define POPCNT ".byte 0xf3,0x0f,0xb8,0xc0"
12#define REG_IN "a"
13#define REG_OUT "a"
14#endif
15
16/*
17 * __sw_hweightXX are called from within the alternatives below
18 * and callee-clobbered registers need to be taken care of. See
19 * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
20 * compiler switches.
21 */
22static inline unsigned int __arch_hweight32(unsigned int w)
23{
24 unsigned int res = 0;
25
26 asm (ALTERNATIVE("call __sw_hweight32", POPCNT, X86_FEATURE_POPCNT)
27 : "="REG_OUT (res)
28 : REG_IN (w));
29
30 return res;
31}
32
33static inline unsigned int __arch_hweight16(unsigned int w)
34{
35 return __arch_hweight32(w & 0xffff);
36}
37
38static inline unsigned int __arch_hweight8(unsigned int w)
39{
40 return __arch_hweight32(w & 0xff);
41}
42
43static inline unsigned long __arch_hweight64(__u64 w)
44{
45 unsigned long res = 0;
46
47#ifdef CONFIG_X86_32
48 return __arch_hweight32((u32)w) +
49 __arch_hweight32((u32)(w >> 32));
50#else
51 asm (ALTERNATIVE("call __sw_hweight64", POPCNT, X86_FEATURE_POPCNT)
52 : "="REG_OUT (res)
53 : REG_IN (w));
54#endif /* CONFIG_X86_32 */
55
56 return res;
57}
58
59#endif
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a603fc8..545776efeb16 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -444,7 +444,9 @@ static inline int fls(int x)
444 444
445#define ARCH_HAS_FAST_MULTIPLIER 1 445#define ARCH_HAS_FAST_MULTIPLIER 1
446 446
447#include <asm-generic/bitops/hweight.h> 447#include <asm/arch_hweight.h>
448
449#include <asm-generic/bitops/const_hweight.h>
448 450
449#endif /* __KERNEL__ */ 451#endif /* __KERNEL__ */
450 452