aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2011-12-13 09:56:54 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2011-12-15 18:16:49 -0500
commitca3d30cc02f780f68771087040ce935add6ba2b7 (patch)
treec87ace9a026497c20a7a9b5d6f25085e56c0f3c4 /arch/x86
parent83d99df7c4bf37176d8c7b199e3b129a51fa04c8 (diff)
x86_64, asm: Optimise fls(), ffs() and fls64()
fls(N), ffs(N) and fls64(N) can be optimised on x86_64. Currently they use a CMOV instruction after the BSR/BSF to set the destination register to -1 if the value to be scanned was 0 (in which case BSR/BSF set the Z flag). Instead, according to the AMD64 specification, we can make use of the fact that BSR/BSF doesn't modify its output register if its input is 0. By preloading the output with -1 and incrementing the result, we achieve the desired result without the need for a conditional check. The Intel x86_64 specification, however, says that the result of BSR/BSF in such a case is undefined. That said, when queried, one of the Intel CPU architects said that the behaviour on all Intel CPUs is that: (1) with BSRQ/BSFQ, the 64-bit destination register is written with its original value if the source is 0, thus, in essence, giving the effect we want. And, (2) with BSRL/BSFL, the lower half of the 64-bit destination register is written with its original value if the source is 0, and the upper half is cleared, thus giving us the effect we want (we return a 4-byte int). Further, it was indicated that they (Intel) are unlikely to get away with changing the behaviour. It might be possible to optimise the 32-bit versions of these functions, but there's a lot more variation, and so the effective non-destructive property of BSRL/BSRF cannot be relied on. [ hpa: specifically, some 486 chips are known to NOT have this property. ] I have benchmarked these functions on my Core2 Duo test machine using the following program: #include <stdlib.h> #include <stdio.h> #ifndef __x86_64__ #error #endif #define PAGE_SHIFT 12 typedef unsigned long long __u64, u64; typedef unsigned int __u32, u32; #define noinline __attribute__((noinline)) static __always_inline int fls64(__u64 x) { long bitpos = -1; asm("bsrq %1,%0" : "+r" (bitpos) : "rm" (x)); return bitpos + 1; } static inline unsigned long __fls(unsigned long word) { asm("bsr %1,%0" : "=r" (word) : "rm" (word)); return word; } static __always_inline int old_fls64(__u64 x) { if (x == 0) return 0; return __fls(x) + 1; } static noinline // __attribute__((const)) int old_get_order(unsigned long size) { int order; size = (size - 1) >> (PAGE_SHIFT - 1); order = -1; do { size >>= 1; order++; } while (size); return order; } static inline __attribute__((const)) int get_order_old_fls64(unsigned long size) { int order; size--; size >>= PAGE_SHIFT; order = old_fls64(size); return order; } static inline __attribute__((const)) int get_order(unsigned long size) { int order; size--; size >>= PAGE_SHIFT; order = fls64(size); return order; } unsigned long prevent_optimise_out; static noinline unsigned long test_old_get_order(void) { unsigned long n, total = 0; long rep, loop; for (rep = 1000000; rep > 0; rep--) { for (loop = 0; loop <= 16384; loop += 4) { n = 1UL << loop; total += old_get_order(n); } } return total; } static noinline unsigned long test_get_order_old_fls64(void) { unsigned long n, total = 0; long rep, loop; for (rep = 1000000; rep > 0; rep--) { for (loop = 0; loop <= 16384; loop += 4) { n = 1UL << loop; total += get_order_old_fls64(n); } } return total; } static noinline unsigned long test_get_order(void) { unsigned long n, total = 0; long rep, loop; for (rep = 1000000; rep > 0; rep--) { for (loop = 0; loop <= 16384; loop += 4) { n = 1UL << loop; total += get_order(n); } } return total; } int main(int argc, char **argv) { unsigned long total; switch (argc) { case 1: total = test_old_get_order(); break; case 2: total = test_get_order_old_fls64(); break; default: total = test_get_order(); break; } prevent_optimise_out = total; return 0; } This allows me to test the use of the old fls64() implementation and the new fls64() implementation and also to contrast these to the out-of-line loop-based implementation of get_order(). The results were: warthog>time ./get_order real 1m37.191s user 1m36.313s sys 0m0.861s warthog>time ./get_order x real 0m16.892s user 0m16.586s sys 0m0.287s warthog>time ./get_order x x real 0m7.731s user 0m7.727s sys 0m0.002s Using the current upstream fls64() as a basis for an inlined get_order() [the second result above] is much faster than using the current out-of-line loop-based get_order() [the first result above]. Using my optimised inline fls64()-based get_order() [the third result above] is even faster still. [ hpa: changed the selection of 32 vs 64 bits to use CONFIG_X86_64 instead of comparing BITS_PER_LONG, updated comments, rebased manually on top of 83d99df7c4bf x86, bitops: Move fls64.h inside __KERNEL__ ] Signed-off-by: David Howells <dhowells@redhat.com> Link: http://lkml.kernel.org/r/20111213145654.14362.39868.stgit@warthog.procyon.org.uk Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/bitops.h67
1 files changed, 62 insertions, 5 deletions
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 4a6235b053cb..b97596e2b68c 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -397,10 +397,25 @@ static inline unsigned long __fls(unsigned long word)
397static inline int ffs(int x) 397static inline int ffs(int x)
398{ 398{
399 int r; 399 int r;
400#ifdef CONFIG_X86_CMOV 400
401#ifdef CONFIG_X86_64
402 /*
403 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
404 * dest reg is undefined if x==0, but their CPU architect says its
405 * value is written to set it to the same as before, except that the
406 * top 32 bits will be cleared.
407 *
408 * We cannot do this on 32 bits because at the very least some
409 * 486 CPUs did not behave this way.
410 */
411 long tmp = -1;
412 asm("bsfl %1,%0"
413 : "=r" (r)
414 : "rm" (x), "0" (tmp));
415#elif defined(CONFIG_X86_CMOV)
401 asm("bsfl %1,%0\n\t" 416 asm("bsfl %1,%0\n\t"
402 "cmovzl %2,%0" 417 "cmovzl %2,%0"
403 : "=r" (r) : "rm" (x), "r" (-1)); 418 : "=&r" (r) : "rm" (x), "r" (-1));
404#else 419#else
405 asm("bsfl %1,%0\n\t" 420 asm("bsfl %1,%0\n\t"
406 "jnz 1f\n\t" 421 "jnz 1f\n\t"
@@ -424,7 +439,22 @@ static inline int ffs(int x)
424static inline int fls(int x) 439static inline int fls(int x)
425{ 440{
426 int r; 441 int r;
427#ifdef CONFIG_X86_CMOV 442
443#ifdef CONFIG_X86_64
444 /*
445 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
446 * dest reg is undefined if x==0, but their CPU architect says its
447 * value is written to set it to the same as before, except that the
448 * top 32 bits will be cleared.
449 *
450 * We cannot do this on 32 bits because at the very least some
451 * 486 CPUs did not behave this way.
452 */
453 long tmp = -1;
454 asm("bsrl %1,%0"
455 : "=r" (r)
456 : "rm" (x), "0" (tmp));
457#elif defined(CONFIG_X86_CMOV)
428 asm("bsrl %1,%0\n\t" 458 asm("bsrl %1,%0\n\t"
429 "cmovzl %2,%0" 459 "cmovzl %2,%0"
430 : "=&r" (r) : "rm" (x), "rm" (-1)); 460 : "=&r" (r) : "rm" (x), "rm" (-1));
@@ -437,6 +467,35 @@ static inline int fls(int x)
437 return r + 1; 467 return r + 1;
438} 468}
439 469
470/**
471 * fls64 - find last set bit in a 64-bit word
472 * @x: the word to search
473 *
474 * This is defined in a similar way as the libc and compiler builtin
475 * ffsll, but returns the position of the most significant set bit.
476 *
477 * fls64(value) returns 0 if value is 0 or the position of the last
478 * set bit if value is nonzero. The last (most significant) bit is
479 * at position 64.
480 */
481#ifdef CONFIG_X86_64
482static __always_inline int fls64(__u64 x)
483{
484 long bitpos = -1;
485 /*
486 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
487 * dest reg is undefined if x==0, but their CPU architect says its
488 * value is written to set it to the same as before.
489 */
490 asm("bsrq %1,%0"
491 : "+r" (bitpos)
492 : "rm" (x));
493 return bitpos + 1;
494}
495#else
496#include <asm-generic/bitops/fls64.h>
497#endif
498
440#include <asm-generic/bitops/find.h> 499#include <asm-generic/bitops/find.h>
441 500
442#include <asm-generic/bitops/sched.h> 501#include <asm-generic/bitops/sched.h>
@@ -447,8 +506,6 @@ static inline int fls(int x)
447 506
448#include <asm-generic/bitops/const_hweight.h> 507#include <asm-generic/bitops/const_hweight.h>
449 508
450#include <asm-generic/bitops/fls64.h>
451
452#include <asm-generic/bitops/le.h> 509#include <asm-generic/bitops/le.h>
453 510
454#include <asm-generic/bitops/ext2-atomic-setbit.h> 511#include <asm-generic/bitops/ext2-atomic-setbit.h>