aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-02-06 04:36:14 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 13:41:00 -0500
commit4e701482d1d7b90c358e2bd244bb71623f767120 (patch)
treea054f87e31645eba4d9e2c76396c54afbe6fa895 /include
parentd9ae90ac4bdce769ddb27c2e24c3351a30c3daf8 (diff)
hash: add explicit u32 and u64 versions of hash
The 32-bit version is more efficient (and apparently gives better hash results than the 64-bit version), so users who are only hashing a 32-bit quantity can now opt to use the 32-bit version explicitly, rather than promoting to a long. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hash.h42
1 files changed, 27 insertions, 15 deletions
diff --git a/include/linux/hash.h b/include/linux/hash.h
index acf17bb8e7f9..06d25c189cc5 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -1,6 +1,6 @@
1#ifndef _LINUX_HASH_H 1#ifndef _LINUX_HASH_H
2#define _LINUX_HASH_H 2#define _LINUX_HASH_H
3/* Fast hashing routine for a long. 3/* Fast hashing routine for ints, longs and pointers.
4 (C) 2002 William Lee Irwin III, IBM */ 4 (C) 2002 William Lee Irwin III, IBM */
5 5
6/* 6/*
@@ -13,23 +13,30 @@
13 * them can use shifts and additions instead of multiplications for 13 * them can use shifts and additions instead of multiplications for
14 * machines where multiplications are slow. 14 * machines where multiplications are slow.
15 */ 15 */
16#if BITS_PER_LONG == 32 16
17#include <asm/types.h>
18
17/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 19/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
18#define GOLDEN_RATIO_PRIME 0x9e370001UL 20#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
19#elif BITS_PER_LONG == 64
20/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ 21/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
21#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL 22#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
23
24#if BITS_PER_LONG == 32
25#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
26#define hash_long(val, bits) hash_32(val, bits)
27#elif BITS_PER_LONG == 64
28#define hash_long(val, bits) hash_64(val, bits)
29#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
22#else 30#else
23#error Define GOLDEN_RATIO_PRIME for your wordsize. 31#error Wordsize not 32 or 64
24#endif 32#endif
25 33
26static inline unsigned long hash_long(unsigned long val, unsigned int bits) 34static inline u64 hash_64(u64 val, unsigned int bits)
27{ 35{
28 unsigned long hash = val; 36 u64 hash = val;
29 37
30#if BITS_PER_LONG == 64
31 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 38 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
32 unsigned long n = hash; 39 u64 n = hash;
33 n <<= 18; 40 n <<= 18;
34 hash -= n; 41 hash -= n;
35 n <<= 33; 42 n <<= 33;
@@ -42,15 +49,20 @@ static inline unsigned long hash_long(unsigned long val, unsigned int bits)
42 hash += n; 49 hash += n;
43 n <<= 2; 50 n <<= 2;
44 hash += n; 51 hash += n;
45#else 52
53 /* High bits are more random, so use them. */
54 return hash >> (64 - bits);
55}
56
57static inline u32 hash_32(u32 val, unsigned int bits)
58{
46 /* On some cpus multiply is faster, on others gcc will do shifts */ 59 /* On some cpus multiply is faster, on others gcc will do shifts */
47 hash *= GOLDEN_RATIO_PRIME; 60 u32 hash = val * GOLDEN_RATIO_PRIME_32;
48#endif
49 61
50 /* High bits are more random, so use them. */ 62 /* High bits are more random, so use them. */
51 return hash >> (BITS_PER_LONG - bits); 63 return hash >> (32 - bits);
52} 64}
53 65
54static inline unsigned long hash_ptr(void *ptr, unsigned int bits) 66static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
55{ 67{
56 return hash_long((unsigned long)ptr, bits); 68 return hash_long((unsigned long)ptr, bits);