aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/hash.h53
-rw-r--r--arch/m68k/Kconfig.cpu1
-rw-r--r--arch/m68k/include/asm/hash.h59
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/hash.h81
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/namei.c162
-rw-r--r--include/linux/dcache.h27
-rw-r--r--include/linux/hash.h108
-rw-r--r--include/linux/stringhash.h76
-rw-r--r--include/linux/sunrpc/svcauth.h40
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/Makefile1
-rw-r--r--lib/test_hash.c250
17 files changed, 734 insertions, 150 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index b16e74e4b5af..d794384a0404 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -598,6 +598,14 @@ config HAVE_STACK_VALIDATION
598 Architecture supports the 'objtool check' host tool command, which 598 Architecture supports the 'objtool check' host tool command, which
599 performs compile-time stack metadata validation. 599 performs compile-time stack metadata validation.
600 600
601config HAVE_ARCH_HASH
602 bool
603 default n
604 help
605 If this is set, the architecture provides an <asm/hash.h>
606 file which provides platform-specific implementations of some
607 functions in <linux/hash.h> or fs/namei.c.
608
601# 609#
602# ABI hall of shame 610# ABI hall of shame
603# 611#
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index aa232de2d4bc..3ae852507e57 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -20,6 +20,7 @@ config H8300
20 select HAVE_KERNEL_GZIP 20 select HAVE_KERNEL_GZIP
21 select HAVE_KERNEL_LZO 21 select HAVE_KERNEL_LZO
22 select HAVE_ARCH_KGDB 22 select HAVE_ARCH_KGDB
23 select HAVE_ARCH_HASH
23 select CPU_NO_EFFICIENT_FFS 24 select CPU_NO_EFFICIENT_FFS
24 25
25config RWSEM_GENERIC_SPINLOCK 26config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/h8300/include/asm/hash.h b/arch/h8300/include/asm/hash.h
new file mode 100644
index 000000000000..04cfbd2bd850
--- /dev/null
+++ b/arch/h8300/include/asm/hash.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_HASH_H
2#define _ASM_HASH_H
3
4/*
5 * The later H8SX models have a 32x32-bit multiply, but the H8/300H
6 * and H8S have only 16x16->32. Since it's tolerably compact, this is
7 * basically an inlined version of the __mulsi3 code. Since the inputs
8 * are not expected to be small, it's also simplfied by skipping the
9 * early-out checks.
10 *
11 * (Since neither CPU has any multi-bit shift instructions, a
12 * shift-and-add version is a non-starter.)
13 *
14 * TODO: come up with an arch-specific version of the hashing in fs/namei.c,
15 * since that is heavily dependent on rotates. Which, as mentioned, suck
16 * horribly on H8.
17 */
18
19#if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S)
20
21#define HAVE_ARCH__HASH_32 1
22
23/*
24 * Multiply by k = 0x61C88647. Fitting this into three registers requires
25 * one extra instruction, but reducing register pressure will probably
26 * make that back and then some.
27 *
28 * GCC asm note: %e1 is the high half of operand %1, while %f1 is the
29 * low half. So if %1 is er4, then %e1 is e4 and %f1 is r4.
30 *
31 * This has been designed to modify x in place, since that's the most
32 * common usage, but preserve k, since hash_64() makes two calls in
33 * quick succession.
34 */
35static inline u32 __attribute_const__ __hash_32(u32 x)
36{
37 u32 temp;
38
39 asm( "mov.w %e1,%f0"
40 "\n mulxu.w %f2,%0" /* klow * xhigh */
41 "\n mov.w %f0,%e1" /* The extra instruction */
42 "\n mov.w %f1,%f0"
43 "\n mulxu.w %e2,%0" /* khigh * xlow */
44 "\n add.w %e1,%f0"
45 "\n mulxu.w %f2,%1" /* klow * xlow */
46 "\n add.w %f0,%e1"
47 : "=&r" (temp), "=r" (x)
48 : "%r" (GOLDEN_RATIO_32), "1" (x));
49 return x;
50}
51
52#endif
53#endif /* _ASM_HASH_H */
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 8ace920ca24a..967260f2eb1c 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -41,6 +41,7 @@ config M68000
41 select CPU_HAS_NO_UNALIGNED 41 select CPU_HAS_NO_UNALIGNED
42 select GENERIC_CSUM 42 select GENERIC_CSUM
43 select CPU_NO_EFFICIENT_FFS 43 select CPU_NO_EFFICIENT_FFS
44 select HAVE_ARCH_HASH
44 help 45 help
45 The Freescale (was Motorola) 68000 CPU is the first generation of 46 The Freescale (was Motorola) 68000 CPU is the first generation of
46 the well known M68K family of processors. The CPU core as well as 47 the well known M68K family of processors. The CPU core as well as
diff --git a/arch/m68k/include/asm/hash.h b/arch/m68k/include/asm/hash.h
new file mode 100644
index 000000000000..6407af84a994
--- /dev/null
+++ b/arch/m68k/include/asm/hash.h
@@ -0,0 +1,59 @@
1#ifndef _ASM_HASH_H
2#define _ASM_HASH_H
3
4/*
5 * If CONFIG_M68000=y (original mc68000/010), this file is #included
6 * to work around the lack of a MULU.L instruction.
7 */
8
9#define HAVE_ARCH__HASH_32 1
10/*
11 * While it would be legal to substitute a different hash operation
12 * entirely, let's keep it simple and just use an optimized multiply
13 * by GOLDEN_RATIO_32 = 0x61C88647.
14 *
15 * The best way to do that appears to be to multiply by 0x8647 with
16 * shifts and adds, and use mulu.w to multiply the high half by 0x61C8.
17 *
18 * Because the 68000 has multi-cycle shifts, this addition chain is
19 * chosen to minimise the shift distances.
20 *
21 * Despite every attempt to spoon-feed it simple operations, GCC
22 * 6.1.1 doggedly insists on doing annoying things like converting
23 * "lsl.l #2,<reg>" (12 cycles) to two adds (8+8 cycles).
24 *
25 * It also likes to notice two shifts in a row, like "a = x << 2" and
26 * "a <<= 7", and convert that to "a = x << 9". But shifts longer
27 * than 8 bits are extra-slow on m68k, so that's a lose.
28 *
29 * Since the 68000 is a very simple in-order processor with no
30 * instruction scheduling effects on execution time, we can safely
31 * take it out of GCC's hands and write one big asm() block.
32 *
33 * Without calling overhead, this operation is 30 bytes (14 instructions
34 * plus one immediate constant) and 166 cycles.
35 *
36 * (Because %2 is fetched twice, it can't be postincrement, and thus it
37 * can't be a fully general "g" or "m". Register is preferred, but
38 * offsettable memory or immediate will work.)
39 */
40static inline u32 __attribute_const__ __hash_32(u32 x)
41{
42 u32 a, b;
43
44 asm( "move.l %2,%0" /* a = x * 0x0001 */
45 "\n lsl.l #2,%0" /* a = x * 0x0004 */
46 "\n move.l %0,%1"
47 "\n lsl.l #7,%0" /* a = x * 0x0200 */
48 "\n add.l %2,%0" /* a = x * 0x0201 */
49 "\n add.l %0,%1" /* b = x * 0x0205 */
50 "\n add.l %0,%0" /* a = x * 0x0402 */
51 "\n add.l %0,%1" /* b = x * 0x0607 */
52 "\n lsl.l #5,%0" /* a = x * 0x8040 */
53 : "=&d,d" (a), "=&r,r" (b)
54 : "r,roi?" (x)); /* a+b = x*0x8647 */
55
56 return ((u16)(x*0x61c8) << 16) + a + b;
57}
58
59#endif /* _ASM_HASH_H */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f17c3a4fb697..636e0720fb20 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -16,6 +16,7 @@ config MICROBLAZE
16 select GENERIC_IRQ_SHOW 16 select GENERIC_IRQ_SHOW
17 select GENERIC_PCI_IOMAP 17 select GENERIC_PCI_IOMAP
18 select GENERIC_SCHED_CLOCK 18 select GENERIC_SCHED_CLOCK
19 select HAVE_ARCH_HASH
19 select HAVE_ARCH_KGDB 20 select HAVE_ARCH_KGDB
20 select HAVE_DEBUG_KMEMLEAK 21 select HAVE_DEBUG_KMEMLEAK
21 select HAVE_DMA_API_DEBUG 22 select HAVE_DMA_API_DEBUG
diff --git a/arch/microblaze/include/asm/hash.h b/arch/microblaze/include/asm/hash.h
new file mode 100644
index 000000000000..753513ae8cb0
--- /dev/null
+++ b/arch/microblaze/include/asm/hash.h
@@ -0,0 +1,81 @@
1#ifndef _ASM_HASH_H
2#define _ASM_HASH_H
3
4/*
5 * Fortunately, most people who want to run Linux on Microblaze enable
6 * both multiplier and barrel shifter, but omitting them is technically
7 * a supported configuration.
8 *
9 * With just a barrel shifter, we can implement an efficient constant
10 * multiply using shifts and adds. GCC can find a 9-step solution, but
11 * this 6-step solution was found by Yevgen Voronenko's implementation
12 * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
13 *
14 * That software is really not designed for a single multiplier this large,
15 * but if you run it enough times with different seeds, it'll find several
16 * 6-shift, 6-add sequences for computing x * 0x61C88647. They are all
17 * c = (x << 19) + x;
18 * a = (x << 9) + c;
19 * b = (x << 23) + a;
20 * return (a<<11) + (b<<6) + (c<<3) - b;
21 * with variations on the order of the final add.
22 *
23 * Without even a shifter, it's hopless; any hash function will suck.
24 */
25
26#if CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL == 0
27
28#define HAVE_ARCH__HASH_32 1
29
30/* Multiply by GOLDEN_RATIO_32 = 0x61C88647 */
31static inline u32 __attribute_const__ __hash_32(u32 a)
32{
33#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL
34 unsigned int b, c;
35
36 /* Phase 1: Compute three intermediate values */
37 b = a << 23;
38 c = (a << 19) + a;
39 a = (a << 9) + c;
40 b += a;
41
42 /* Phase 2: Compute (a << 11) + (b << 6) + (c << 3) - b */
43 a <<= 5;
44 a += b; /* (a << 5) + b */
45 a <<= 3;
46 a += c; /* (a << 8) + (b << 3) + c */
47 a <<= 3;
48 return a - b; /* (a << 11) + (b << 6) + (c << 3) - b */
49#else
50 /*
51 * "This is really going to hurt."
52 *
53 * Without a barrel shifter, left shifts are implemented as
54 * repeated additions, and the best we can do is an optimal
55 * addition-subtraction chain. This one is not known to be
56 * optimal, but at 37 steps, it's decent for a 31-bit multiplier.
57 *
58 * Question: given its size (37*4 = 148 bytes per instance),
59 * and slowness, is this worth having inline?
60 */
61 unsigned int b, c, d;
62
63 b = a << 4; /* 4 */
64 c = b << 1; /* 1 5 */
65 b += a; /* 1 6 */
66 c += b; /* 1 7 */
67 c <<= 3; /* 3 10 */
68 c -= a; /* 1 11 */
69 d = c << 7; /* 7 18 */
70 d += b; /* 1 19 */
71 d <<= 8; /* 8 27 */
72 d += a; /* 1 28 */
73 d <<= 1; /* 1 29 */
74 d += b; /* 1 30 */
75 d <<= 6; /* 6 36 */
76 return d + c; /* 1 37 total instructions*/
77#endif
78}
79
80#endif /* !CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL */
81#endif /* _ASM_HASH_H */
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 95a7388e89d4..09e0f58f6bb7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -398,6 +398,8 @@ error:
398} 398}
399 399
400#define AF9015_EEPROM_SIZE 256 400#define AF9015_EEPROM_SIZE 256
401/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
402#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
401 403
402/* hash (and dump) eeprom */ 404/* hash (and dump) eeprom */
403static int af9015_eeprom_hash(struct dvb_usb_device *d) 405static int af9015_eeprom_hash(struct dvb_usb_device *d)
diff --git a/fs/dcache.c b/fs/dcache.c
index c622872c12c5..ad4a542e9bab 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1670,8 +1670,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1670 struct qstr q; 1670 struct qstr q;
1671 1671
1672 q.name = name; 1672 q.name = name;
1673 q.len = strlen(name); 1673 q.hash_len = hashlen_string(name);
1674 q.hash = full_name_hash(q.name, q.len);
1675 return d_alloc(parent, &q); 1674 return d_alloc(parent, &q);
1676} 1675}
1677EXPORT_SYMBOL(d_alloc_name); 1676EXPORT_SYMBOL(d_alloc_name);
diff --git a/fs/namei.c b/fs/namei.c
index 15b124c18ed8..e7bf99d387d0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -35,6 +35,7 @@
35#include <linux/fs_struct.h> 35#include <linux/fs_struct.h>
36#include <linux/posix_acl.h> 36#include <linux/posix_acl.h>
37#include <linux/hash.h> 37#include <linux/hash.h>
38#include <linux/bitops.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
40#include "internal.h" 41#include "internal.h"
@@ -1797,74 +1798,144 @@ static int walk_component(struct nameidata *nd, int flags)
1797 1798
1798#include <asm/word-at-a-time.h> 1799#include <asm/word-at-a-time.h>
1799 1800
1800#ifdef CONFIG_64BIT 1801#ifdef HASH_MIX
1801 1802
1802static inline unsigned int fold_hash(unsigned long hash) 1803/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
1803{
1804 return hash_64(hash, 32);
1805}
1806 1804
1805#elif defined(CONFIG_64BIT)
1807/* 1806/*
1808 * This is George Marsaglia's XORSHIFT generator. 1807 * Register pressure in the mixing function is an issue, particularly
1809 * It implements a maximum-period LFSR in only a few 1808 * on 32-bit x86, but almost any function requires one state value and
1810 * instructions. It also has the property (required 1809 * one temporary. Instead, use a function designed for two state values
1811 * by hash_name()) that mix_hash(0) = 0. 1810 * and no temporaries.
1811 *
1812 * This function cannot create a collision in only two iterations, so
1813 * we have two iterations to achieve avalanche. In those two iterations,
1814 * we have six layers of mixing, which is enough to spread one bit's
1815 * influence out to 2^6 = 64 state bits.
1816 *
1817 * Rotate constants are scored by considering either 64 one-bit input
1818 * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
1819 * probability of that delta causing a change to each of the 128 output
1820 * bits, using a sample of random initial states.
1821 *
1822 * The Shannon entropy of the computed probabilities is then summed
1823 * to produce a score. Ideally, any input change has a 50% chance of
1824 * toggling any given output bit.
1825 *
1826 * Mixing scores (in bits) for (12,45):
1827 * Input delta: 1-bit 2-bit
1828 * 1 round: 713.3 42542.6
1829 * 2 rounds: 2753.7 140389.8
1830 * 3 rounds: 5954.1 233458.2
1831 * 4 rounds: 7862.6 256672.2
1832 * Perfect: 8192 258048
1833 * (64*128) (64*63/2 * 128)
1812 */ 1834 */
1813static inline unsigned long mix_hash(unsigned long hash) 1835#define HASH_MIX(x, y, a) \
1836 ( x ^= (a), \
1837 y ^= x, x = rol64(x,12),\
1838 x += y, y = rol64(y,45),\
1839 y *= 9 )
1840
1841/*
1842 * Fold two longs into one 32-bit hash value. This must be fast, but
1843 * latency isn't quite as critical, as there is a fair bit of additional
1844 * work done before the hash value is used.
1845 */
1846static inline unsigned int fold_hash(unsigned long x, unsigned long y)
1814{ 1847{
1815 hash ^= hash << 13; 1848 y ^= x * GOLDEN_RATIO_64;
1816 hash ^= hash >> 7; 1849 y *= GOLDEN_RATIO_64;
1817 hash ^= hash << 17; 1850 return y >> 32;
1818 return hash;
1819} 1851}
1820 1852
1821#else /* 32-bit case */ 1853#else /* 32-bit case */
1822 1854
1823#define fold_hash(x) (x) 1855/*
1856 * Mixing scores (in bits) for (7,20):
1857 * Input delta: 1-bit 2-bit
1858 * 1 round: 330.3 9201.6
1859 * 2 rounds: 1246.4 25475.4
1860 * 3 rounds: 1907.1 31295.1
1861 * 4 rounds: 2042.3 31718.6
1862 * Perfect: 2048 31744
1863 * (32*64) (32*31/2 * 64)
1864 */
1865#define HASH_MIX(x, y, a) \
1866 ( x ^= (a), \
1867 y ^= x, x = rol32(x, 7),\
1868 x += y, y = rol32(y,20),\
1869 y *= 9 )
1824 1870
1825static inline unsigned long mix_hash(unsigned long hash) 1871static inline unsigned int fold_hash(unsigned long x, unsigned long y)
1826{ 1872{
1827 hash ^= hash << 13; 1873 /* Use arch-optimized multiply if one exists */
1828 hash ^= hash >> 17; 1874 return __hash_32(y ^ __hash_32(x));
1829 hash ^= hash << 5;
1830 return hash;
1831} 1875}
1832 1876
1833#endif 1877#endif
1834 1878
1835unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1879/*
1880 * Return the hash of a string of known length. This is carfully
1881 * designed to match hash_name(), which is the more critical function.
1882 * In particular, we must end by hashing a final word containing 0..7
1883 * payload bytes, to match the way that hash_name() iterates until it
1884 * finds the delimiter after the name.
1885 */
1886unsigned int full_name_hash(const char *name, unsigned int len)
1836{ 1887{
1837 unsigned long a, hash = 0; 1888 unsigned long a, x = 0, y = 0;
1838 1889
1839 for (;;) { 1890 for (;;) {
1891 if (!len)
1892 goto done;
1840 a = load_unaligned_zeropad(name); 1893 a = load_unaligned_zeropad(name);
1841 if (len < sizeof(unsigned long)) 1894 if (len < sizeof(unsigned long))
1842 break; 1895 break;
1843 hash = mix_hash(hash + a); 1896 HASH_MIX(x, y, a);
1844 name += sizeof(unsigned long); 1897 name += sizeof(unsigned long);
1845 len -= sizeof(unsigned long); 1898 len -= sizeof(unsigned long);
1846 if (!len)
1847 goto done;
1848 } 1899 }
1849 hash += a & bytemask_from_count(len); 1900 x ^= a & bytemask_from_count(len);
1850done: 1901done:
1851 return fold_hash(hash); 1902 return fold_hash(x, y);
1852} 1903}
1853EXPORT_SYMBOL(full_name_hash); 1904EXPORT_SYMBOL(full_name_hash);
1854 1905
1906/* Return the "hash_len" (hash and length) of a null-terminated string */
1907u64 hashlen_string(const char *name)
1908{
1909 unsigned long a = 0, x = 0, y = 0, adata, mask, len;
1910 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1911
1912 len = -sizeof(unsigned long);
1913 do {
1914 HASH_MIX(x, y, a);
1915 len += sizeof(unsigned long);
1916 a = load_unaligned_zeropad(name+len);
1917 } while (!has_zero(a, &adata, &constants));
1918
1919 adata = prep_zero_mask(a, adata, &constants);
1920 mask = create_zero_mask(adata);
1921 x ^= a & zero_bytemask(mask);
1922
1923 return hashlen_create(fold_hash(x, y), len + find_zero(mask));
1924}
1925EXPORT_SYMBOL(hashlen_string);
1926
1855/* 1927/*
1856 * Calculate the length and hash of the path component, and 1928 * Calculate the length and hash of the path component, and
1857 * return the "hash_len" as the result. 1929 * return the "hash_len" as the result.
1858 */ 1930 */
1859static inline u64 hash_name(const char *name) 1931static inline u64 hash_name(const char *name)
1860{ 1932{
1861 unsigned long a, b, adata, bdata, mask, hash, len; 1933 unsigned long a = 0, b, x = 0, y = 0, adata, bdata, mask, len;
1862 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; 1934 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1863 1935
1864 hash = a = 0;
1865 len = -sizeof(unsigned long); 1936 len = -sizeof(unsigned long);
1866 do { 1937 do {
1867 hash = mix_hash(hash + a); 1938 HASH_MIX(x, y, a);
1868 len += sizeof(unsigned long); 1939 len += sizeof(unsigned long);
1869 a = load_unaligned_zeropad(name+len); 1940 a = load_unaligned_zeropad(name+len);
1870 b = a ^ REPEAT_BYTE('/'); 1941 b = a ^ REPEAT_BYTE('/');
@@ -1872,25 +1943,40 @@ static inline u64 hash_name(const char *name)
1872 1943
1873 adata = prep_zero_mask(a, adata, &constants); 1944 adata = prep_zero_mask(a, adata, &constants);
1874 bdata = prep_zero_mask(b, bdata, &constants); 1945 bdata = prep_zero_mask(b, bdata, &constants);
1875
1876 mask = create_zero_mask(adata | bdata); 1946 mask = create_zero_mask(adata | bdata);
1947 x ^= a & zero_bytemask(mask);
1877 1948
1878 hash += a & zero_bytemask(mask); 1949 return hashlen_create(fold_hash(x, y), len + find_zero(mask));
1879 len += find_zero(mask);
1880 return hashlen_create(fold_hash(hash), len);
1881} 1950}
1882 1951
1883#else 1952#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
1884 1953
1885unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1954/* Return the hash of a string of known length */
1955unsigned int full_name_hash(const char *name, unsigned int len)
1886{ 1956{
1887 unsigned long hash = init_name_hash(); 1957 unsigned long hash = init_name_hash();
1888 while (len--) 1958 while (len--)
1889 hash = partial_name_hash(*name++, hash); 1959 hash = partial_name_hash((unsigned char)*name++, hash);
1890 return end_name_hash(hash); 1960 return end_name_hash(hash);
1891} 1961}
1892EXPORT_SYMBOL(full_name_hash); 1962EXPORT_SYMBOL(full_name_hash);
1893 1963
1964/* Return the "hash_len" (hash and length) of a null-terminated string */
1965u64 hash_string(const char *name)
1966{
1967 unsigned long hash = init_name_hash();
1968 unsigned long len = 0, c;
1969
1970 c = (unsigned char)*name;
1971 do {
1972 len++;
1973 hash = partial_name_hash(c, hash);
1974 c = (unsigned char)name[len];
1975 } while (c);
1976 return hashlen_create(end_name_hash(hash), len);
1977}
1978EXPORT_SYMBOL(hash_string);
1979
1894/* 1980/*
1895 * We know there's a real path component here of at least 1981 * We know there's a real path component here of at least
1896 * one character. 1982 * one character.
@@ -1934,7 +2020,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
1934 int type; 2020 int type;
1935 2021
1936 err = may_lookup(nd); 2022 err = may_lookup(nd);
1937 if (err) 2023 if (err)
1938 return err; 2024 return err;
1939 2025
1940 hash_len = hash_name(name); 2026 hash_len = hash_name(name);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f8506e8dd4d4..484c8792da82 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -10,6 +10,7 @@
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h> 12#include <linux/lockref.h>
13#include <linux/stringhash.h>
13 14
14struct path; 15struct path;
15struct vfsmount; 16struct vfsmount;
@@ -52,9 +53,6 @@ struct qstr {
52}; 53};
53 54
54#define QSTR_INIT(n,l) { { { .len = l } }, .name = n } 55#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
55#define hashlen_hash(hashlen) ((u32) (hashlen))
56#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
57#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
58 56
59struct dentry_stat_t { 57struct dentry_stat_t {
60 long nr_dentry; 58 long nr_dentry;
@@ -65,29 +63,6 @@ struct dentry_stat_t {
65}; 63};
66extern struct dentry_stat_t dentry_stat; 64extern struct dentry_stat_t dentry_stat;
67 65
68/* Name hashing routines. Initial hash value */
69/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
70#define init_name_hash() 0
71
72/* partial hash update function. Assume roughly 4 bits per character */
73static inline unsigned long
74partial_name_hash(unsigned long c, unsigned long prevhash)
75{
76 return (prevhash + (c << 4) + (c >> 4)) * 11;
77}
78
79/*
80 * Finally: cut down the number of bits to a int value (and try to avoid
81 * losing bits)
82 */
83static inline unsigned long end_name_hash(unsigned long hash)
84{
85 return (unsigned int) hash;
86}
87
88/* Compute the hash for a name string. */
89extern unsigned int full_name_hash(const unsigned char *, unsigned int);
90
91/* 66/*
92 * Try to keep struct dentry aligned on 64 byte cachelines (this will 67 * Try to keep struct dentry aligned on 64 byte cachelines (this will
93 * give reasonable cacheline footprint with larger lines without the 68 * give reasonable cacheline footprint with larger lines without the
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 79c52fa81cac..ad6fa21d977b 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -3,92 +3,94 @@
3/* Fast hashing routine for ints, longs and pointers. 3/* Fast hashing routine for ints, longs and pointers.
4 (C) 2002 Nadia Yvette Chambers, IBM */ 4 (C) 2002 Nadia Yvette Chambers, IBM */
5 5
6/*
7 * Knuth recommends primes in approximately golden ratio to the maximum
8 * integer representable by a machine word for multiplicative hashing.
9 * Chuck Lever verified the effectiveness of this technique:
10 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
11 *
12 * These primes are chosen to be bit-sparse, that is operations on
13 * them can use shifts and additions instead of multiplications for
14 * machines where multiplications are slow.
15 */
16
17#include <asm/types.h> 6#include <asm/types.h>
18#include <linux/compiler.h> 7#include <linux/compiler.h>
19 8
20/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 9/*
21#define GOLDEN_RATIO_PRIME_32 0x9e370001UL 10 * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
22/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ 11 * fs/inode.c. It's not actually prime any more (the previous primes
23#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL 12 * were actively bad for hashing), but the name remains.
24 13 */
25#if BITS_PER_LONG == 32 14#if BITS_PER_LONG == 32
26#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 15#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
27#define hash_long(val, bits) hash_32(val, bits) 16#define hash_long(val, bits) hash_32(val, bits)
28#elif BITS_PER_LONG == 64 17#elif BITS_PER_LONG == 64
29#define hash_long(val, bits) hash_64(val, bits) 18#define hash_long(val, bits) hash_64(val, bits)
30#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 19#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
31#else 20#else
32#error Wordsize not 32 or 64 21#error Wordsize not 32 or 64
33#endif 22#endif
34 23
35/* 24/*
36 * The above primes are actively bad for hashing, since they are 25 * This hash multiplies the input by a large odd number and takes the
37 * too sparse. The 32-bit one is mostly ok, the 64-bit one causes 26 * high bits. Since multiplication propagates changes to the most
38 * real problems. Besides, the "prime" part is pointless for the 27 * significant end only, it is essential that the high bits of the
39 * multiplicative hash. 28 * product be used for the hash value.
29 *
30 * Chuck Lever verified the effectiveness of this technique:
31 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
40 * 32 *
41 * Although a random odd number will do, it turns out that the golden 33 * Although a random odd number will do, it turns out that the golden
42 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice 34 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
43 * properties. 35 * properties. (See Knuth vol 3, section 6.4, exercise 9.)
44 * 36 *
45 * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. 37 * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
46 * (See Knuth vol 3, section 6.4, exercise 9.) 38 * which is very slightly easier to multiply by and makes no
39 * difference to the hash distribution.
47 */ 40 */
48#define GOLDEN_RATIO_32 0x61C88647 41#define GOLDEN_RATIO_32 0x61C88647
49#define GOLDEN_RATIO_64 0x61C8864680B583EBull 42#define GOLDEN_RATIO_64 0x61C8864680B583EBull
50 43
51static __always_inline u64 hash_64(u64 val, unsigned int bits) 44#ifdef CONFIG_HAVE_ARCH_HASH
52{ 45/* This header may use the GOLDEN_RATIO_xx constants */
53 u64 hash = val; 46#include <asm/hash.h>
47#endif
54 48
55#if BITS_PER_LONG == 64 49/*
56 hash = hash * GOLDEN_RATIO_64; 50 * The _generic versions exist only so lib/test_hash.c can compare
57#else 51 * the arch-optimized versions with the generic.
58 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 52 *
59 u64 n = hash; 53 * Note that if you change these, any <asm/hash.h> that aren't updated
60 n <<= 18; 54 * to match need to have their HAVE_ARCH_* define values updated so the
61 hash -= n; 55 * self-test will not false-positive.
62 n <<= 33; 56 */
63 hash -= n; 57#ifndef HAVE_ARCH__HASH_32
64 n <<= 3; 58#define __hash_32 __hash_32_generic
65 hash += n;
66 n <<= 3;
67 hash -= n;
68 n <<= 4;
69 hash += n;
70 n <<= 2;
71 hash += n;
72#endif 59#endif
60static inline u32 __hash_32_generic(u32 val)
61{
62 return val * GOLDEN_RATIO_32;
63}
73 64
65#ifndef HAVE_ARCH_HASH_32
66#define hash_32 hash_32_generic
67#endif
68static inline u32 hash_32_generic(u32 val, unsigned int bits)
69{
74 /* High bits are more random, so use them. */ 70 /* High bits are more random, so use them. */
75 return hash >> (64 - bits); 71 return __hash_32(val) >> (32 - bits);
76} 72}
77 73
78static inline u32 hash_32(u32 val, unsigned int bits) 74#ifndef HAVE_ARCH_HASH_64
75#define hash_64 hash_64_generic
76#endif
77static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
79{ 78{
80 /* On some cpus multiply is faster, on others gcc will do shifts */ 79#if BITS_PER_LONG == 64
81 u32 hash = val * GOLDEN_RATIO_PRIME_32; 80 /* 64x64-bit multiply is efficient on all 64-bit processors */
82 81 return val * GOLDEN_RATIO_64 >> (64 - bits);
83 /* High bits are more random, so use them. */ 82#else
84 return hash >> (32 - bits); 83 /* Hash 64 bits using only 32x32-bit multiply. */
84 return hash_32((u32)val ^ __hash_32(val >> 32), bits);
85#endif
85} 86}
86 87
87static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) 88static inline u32 hash_ptr(const void *ptr, unsigned int bits)
88{ 89{
89 return hash_long((unsigned long)ptr, bits); 90 return hash_long((unsigned long)ptr, bits);
90} 91}
91 92
93/* This really should be called fold32_ptr; it does no hashing to speak of. */
92static inline u32 hash32_ptr(const void *ptr) 94static inline u32 hash32_ptr(const void *ptr)
93{ 95{
94 unsigned long val = (unsigned long)ptr; 96 unsigned long val = (unsigned long)ptr;
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
new file mode 100644
index 000000000000..451771d9b9c0
--- /dev/null
+++ b/include/linux/stringhash.h
@@ -0,0 +1,76 @@
1#ifndef __LINUX_STRINGHASH_H
2#define __LINUX_STRINGHASH_H
3
4#include <linux/compiler.h> /* For __pure */
5#include <linux/types.h> /* For u32, u64 */
6
7/*
8 * Routines for hashing strings of bytes to a 32-bit hash value.
9 *
10 * These hash functions are NOT GUARANTEED STABLE between kernel
11 * versions, architectures, or even repeated boots of the same kernel.
12 * (E.g. they may depend on boot-time hardware detection or be
13 * deliberately randomized.)
14 *
15 * They are also not intended to be secure against collisions caused by
16 * malicious inputs; much slower hash functions are required for that.
17 *
18 * They are optimized for pathname components, meaning short strings.
19 * Even if a majority of files have longer names, the dynamic profile of
20 * pathname components skews short due to short directory names.
21 * (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
22 */
23
24/*
25 * Version 1: one byte at a time. Example of use:
26 *
27 * unsigned long hash = init_name_hash;
28 * while (*p)
29 * hash = partial_name_hash(tolower(*p++), hash);
30 * hash = end_name_hash(hash);
31 *
32 * Although this is designed for bytes, fs/hfsplus/unicode.c
33 * abuses it to hash 16-bit values.
34 */
35
36/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
37#define init_name_hash() 0
38
39/* partial hash update function. Assume roughly 4 bits per character */
40static inline unsigned long
41partial_name_hash(unsigned long c, unsigned long prevhash)
42{
43 return (prevhash + (c << 4) + (c >> 4)) * 11;
44}
45
46/*
47 * Finally: cut down the number of bits to a int value (and try to avoid
48 * losing bits)
49 */
50static inline unsigned long end_name_hash(unsigned long hash)
51{
52 return (unsigned int)hash;
53}
54
55/*
56 * Version 2: One word (32 or 64 bits) at a time.
57 * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
58 * exists, which describes major Linux platforms like x86 and ARM), then
59 * this computes a different hash function much faster.
60 *
61 * If not set, this falls back to a wrapper around the preceding.
62 */
63extern unsigned int __pure full_name_hash(const char *, unsigned int);
64
65/*
66 * A hash_len is a u64 with the hash of a string in the low
67 * half and the length in the high half.
68 */
69#define hashlen_hash(hashlen) ((u32)(hashlen))
70#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
71#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
72
73/* Return the "hash_len" (hash and length) of a null-terminated string */
74extern u64 __pure hashlen_string(const char *name);
75
76#endif /* __LINUX_STRINGHASH_H */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c00f53a4ccdd..91d5a5d6f52b 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -16,6 +16,7 @@
16#include <linux/sunrpc/cache.h> 16#include <linux/sunrpc/cache.h>
17#include <linux/sunrpc/gss_api.h> 17#include <linux/sunrpc/gss_api.h>
18#include <linux/hash.h> 18#include <linux/hash.h>
19#include <linux/stringhash.h>
19#include <linux/cred.h> 20#include <linux/cred.h>
20 21
21struct svc_cred { 22struct svc_cred {
@@ -165,41 +166,18 @@ extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
165extern int unix_gid_cache_create(struct net *net); 166extern int unix_gid_cache_create(struct net *net);
166extern void unix_gid_cache_destroy(struct net *net); 167extern void unix_gid_cache_destroy(struct net *net);
167 168
168static inline unsigned long hash_str(char *name, int bits) 169/*
170 * The <stringhash.h> functions are good enough that we don't need to
171 * use hash_32() on them; just extracting the high bits is enough.
172 */
173static inline unsigned long hash_str(char const *name, int bits)
169{ 174{
170 unsigned long hash = 0; 175 return hashlen_hash(hashlen_string(name)) >> (32 - bits);
171 unsigned long l = 0;
172 int len = 0;
173 unsigned char c;
174 do {
175 if (unlikely(!(c = *name++))) {
176 c = (char)len; len = -1;
177 }
178 l = (l << 8) | c;
179 len++;
180 if ((len & (BITS_PER_LONG/8-1))==0)
181 hash = hash_long(hash^l, BITS_PER_LONG);
182 } while (len);
183 return hash >> (BITS_PER_LONG - bits);
184} 176}
185 177
186static inline unsigned long hash_mem(char *buf, int length, int bits) 178static inline unsigned long hash_mem(char const *buf, int length, int bits)
187{ 179{
188 unsigned long hash = 0; 180 return full_name_hash(buf, length) >> (32 - bits);
189 unsigned long l = 0;
190 int len = 0;
191 unsigned char c;
192 do {
193 if (len == length) {
194 c = (char)len; len = -1;
195 } else
196 c = *buf++;
197 l = (l << 8) | c;
198 len++;
199 if ((len & (BITS_PER_LONG/8-1))==0)
200 hash = hash_long(hash^l, BITS_PER_LONG);
201 } while (len);
202 return hash >> (BITS_PER_LONG - bits);
203} 181}
204 182
205#endif /* __KERNEL__ */ 183#endif /* __KERNEL__ */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e707ab3e1991..77d7d034bac3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1849,6 +1849,17 @@ config TEST_RHASHTABLE
1849 1849
1850 If unsure, say N. 1850 If unsure, say N.
1851 1851
1852config TEST_HASH
1853 tristate "Perform selftest on hash functions"
1854 default n
1855 help
1856 Enable this option to test the kernel's integer (<linux/hash,h>)
1857 and string (<linux/stringhash.h>) hash functions on boot
1858 (or module load).
1859
1860 This is intended to help people writing architecture-specific
1861 optimized versions. If unsure, say N.
1862
1852endmenu # runtime tests 1863endmenu # runtime tests
1853 1864
1854config PROVIDE_OHCI1394_DMA_INIT 1865config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Makefile b/lib/Makefile
index 42b69185f963..499fb354d627 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
48obj-y += kstrtox.o 48obj-y += kstrtox.o
49obj-$(CONFIG_TEST_BPF) += test_bpf.o 49obj-$(CONFIG_TEST_BPF) += test_bpf.o
50obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o 50obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
51obj-$(CONFIG_TEST_HASH) += test_hash.o
51obj-$(CONFIG_TEST_KASAN) += test_kasan.o 52obj-$(CONFIG_TEST_KASAN) += test_kasan.o
52obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 53obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
53obj-$(CONFIG_TEST_LKM) += test_module.o 54obj-$(CONFIG_TEST_LKM) += test_module.o
diff --git a/lib/test_hash.c b/lib/test_hash.c
new file mode 100644
index 000000000000..c9549c8b4909
--- /dev/null
+++ b/lib/test_hash.c
@@ -0,0 +1,250 @@
1/*
2 * Test cases for <linux/hash.h> and <linux/stringhash.h>
3 * This just verifies that various ways of computing a hash
4 * produce the same thing and, for cases where a k-bit hash
5 * value is requested, is of the requested size.
6 *
7 * We fill a buffer with a 255-byte null-terminated string,
8 * and use both full_name_hash() and hashlen_string() to hash the
9 * substrings from i to j, where 0 <= i < j < 256.
10 *
11 * The returned values are used to check that __hash_32() and
12 * __hash_32_generic() compute the same thing. Likewise hash_32()
13 * and hash_64().
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"
17
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/module.h>
21#include <linux/hash.h>
22#include <linux/stringhash.h>
23#include <linux/printk.h>
24
25/* 32-bit XORSHIFT generator. Seed must not be zero. */
26static u32 __init __attribute_const__
27xorshift(u32 seed)
28{
29 seed ^= seed << 13;
30 seed ^= seed >> 17;
31 seed ^= seed << 5;
32 return seed;
33}
34
35/* Given a non-zero x, returns a non-zero byte. */
36static u8 __init __attribute_const__
37mod255(u32 x)
38{
39 x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
40 x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */
41 x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */
42 x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */
43 return x;
44}
45
46/* Fill the buffer with non-zero bytes. */
47static void __init
48fill_buf(char *buf, size_t len, u32 seed)
49{
50 size_t i;
51
52 for (i = 0; i < len; i++) {
53 seed = xorshift(seed);
54 buf[i] = mod255(seed);
55 }
56}
57
58/*
59 * Test the various integer hash functions. h64 (or its low-order bits)
60 * is the integer to hash. hash_or accumulates the OR of the hash values,
61 * which are later checked to see that they cover all the requested bits.
62 *
63 * Because these functions (as opposed to the string hashes) are all
64 * inline, the code being tested is actually in the module, and you can
65 * recompile and re-test the module without rebooting.
66 */
67static bool __init
68test_int_hash(unsigned long long h64, u32 hash_or[2][33])
69{
70 int k;
71 u32 h0 = (u32)h64, h1, h2;
72
73 /* Test __hash32 */
74 hash_or[0][0] |= h1 = __hash_32(h0);
75#ifdef HAVE_ARCH__HASH_32
76 hash_or[1][0] |= h2 = __hash_32_generic(h0);
77#if HAVE_ARCH__HASH_32 == 1
78 if (h1 != h2) {
79 pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
80 h0, h1, h2);
81 return false;
82 }
83#endif
84#endif
85
86 /* Test k = 1..32 bits */
87 for (k = 1; k <= 32; k++) {
88 u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
89
90 /* Test hash_32 */
91 hash_or[0][k] |= h1 = hash_32(h0, k);
92 if (h1 > m) {
93 pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
94 return false;
95 }
96#ifdef HAVE_ARCH_HASH_32
97 h2 = hash_32_generic(h0, k);
98#if HAVE_ARCH_HASH_32 == 1
99 if (h1 != h2) {
100 pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
101 " = %#x", h0, k, h1, h2);
102 return false;
103 }
104#else
105 if (h2 > m) {
106 pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
107 h0, k, h1, m);
108 return false;
109 }
110#endif
111#endif
112 /* Test hash_64 */
113 hash_or[1][k] |= h1 = hash_64(h64, k);
114 if (h1 > m) {
115 pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
116 return false;
117 }
118#ifdef HAVE_ARCH_HASH_64
119 h2 = hash_64_generic(h64, k);
120#if HAVE_ARCH_HASH_64 == 1
121 if (h1 != h2) {
122 pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
123 "= %#x", h64, k, h1, h2);
124 return false;
125 }
126#else
127 if (h2 > m) {
128 pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
129 h64, k, h1, m);
130 return false;
131 }
132#endif
133#endif
134 }
135
136 (void)h2; /* Suppress unused variable warning */
137 return true;
138}
139
140#define SIZE 256 /* Run time is cubic in SIZE */
141
142static int __init
143test_hash_init(void)
144{
145 char buf[SIZE+1];
146 u32 string_or = 0, hash_or[2][33] = { 0 };
147 unsigned tests = 0;
148 unsigned long long h64 = 0;
149 int i, j;
150
151 fill_buf(buf, SIZE, 1);
152
153 /* Test every possible non-empty substring in the buffer. */
154 for (j = SIZE; j > 0; --j) {
155 buf[j] = '\0';
156
157 for (i = 0; i <= j; i++) {
158 u64 hashlen = hashlen_string(buf+i);
159 u32 h0 = full_name_hash(buf+i, j-i);
160
161 /* Check that hashlen_string gets the length right */
162 if (hashlen_len(hashlen) != j-i) {
163 pr_err("hashlen_string(%d..%d) returned length"
164 " %u, expected %d",
165 i, j, hashlen_len(hashlen), j-i);
166 return -EINVAL;
167 }
168 /* Check that the hashes match */
169 if (hashlen_hash(hashlen) != h0) {
170 pr_err("hashlen_string(%d..%d) = %08x != "
171 "full_name_hash() = %08x",
172 i, j, hashlen_hash(hashlen), h0);
173 return -EINVAL;
174 }
175
176 string_or |= h0;
177 h64 = h64 << 32 | h0; /* For use with hash_64 */
178 if (!test_int_hash(h64, hash_or))
179 return -EINVAL;
180 tests++;
181 } /* i */
182 } /* j */
183
184 /* The OR of all the hash values should cover all the bits */
185 if (~string_or) {
186 pr_err("OR of all string hash results = %#x != %#x",
187 string_or, -1u);
188 return -EINVAL;
189 }
190 if (~hash_or[0][0]) {
191 pr_err("OR of all __hash_32 results = %#x != %#x",
192 hash_or[0][0], -1u);
193 return -EINVAL;
194 }
195#ifdef HAVE_ARCH__HASH_32
196#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
197 if (~hash_or[1][0]) {
198 pr_err("OR of all __hash_32_generic results = %#x != %#x",
199 hash_or[1][0], -1u);
200 return -EINVAL;
201 }
202#endif
203#endif
204
205 /* Likewise for all the i-bit hash values */
206 for (i = 1; i <= 32; i++) {
207 u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
208
209 if (hash_or[0][i] != m) {
210 pr_err("OR of all hash_32(%d) results = %#x "
211 "(%#x expected)", i, hash_or[0][i], m);
212 return -EINVAL;
213 }
214 if (hash_or[1][i] != m) {
215 pr_err("OR of all hash_64(%d) results = %#x "
216 "(%#x expected)", i, hash_or[1][i], m);
217 return -EINVAL;
218 }
219 }
220
221 /* Issue notices about skipped tests. */
222#ifndef HAVE_ARCH__HASH_32
223 pr_info("__hash_32() has no arch implementation to test.");
224#elif HAVE_ARCH__HASH_32 != 1
225 pr_info("__hash_32() is arch-specific; not compared to generic.");
226#endif
227#ifndef HAVE_ARCH_HASH_32
228 pr_info("hash_32() has no arch implementation to test.");
229#elif HAVE_ARCH_HASH_32 != 1
230 pr_info("hash_32() is arch-specific; not compared to generic.");
231#endif
232#ifndef HAVE_ARCH_HASH_64
233 pr_info("hash_64() has no arch implementation to test.");
234#elif HAVE_ARCH_HASH_64 != 1
235 pr_info("hash_64() is arch-specific; not compared to generic.");
236#endif
237
238 pr_notice("%u tests passed.", tests);
239
240 return 0;
241}
242
243static void __exit test_hash_exit(void)
244{
245}
246
247module_init(test_hash_init); /* Does everything */
248module_exit(test_hash_exit); /* Does nothing */
249
250MODULE_LICENSE("GPL");