diff options
author | Hannes Frederic Sowa <hannes@stressinduktion.org> | 2014-11-04 18:23:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-05 22:01:21 -0500 |
commit | e5a2c899957659cd1a9f789bc462f9c0b35f5150 (patch) | |
tree | 4c9b8a6f89d961daf9ada9f5ee95f8b371ce3a04 /lib | |
parent | 2c99cd914d4fed9160d98849c9dd38034616768e (diff) |
fast_hash: avoid indirect function calls
By default the arch_fast_hash hashing function pointers are initialized
to jhash(2). If during boot-up a CPU with SSE4.2 is detected they get
updated to the CRC32 ones. This dispatching scheme incurs a function
pointer lookup and indirect call for every hashing operation.
rhashtable as a user of arch_fast_hash e.g. stores pointers to hashing
functions in its structure, too, causing two indirect branches per
hashing operation.
Using alternative_call we can get away with one of those indirect branches.
Acked-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Thomas Graf <tgraf@suug.ch>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/hash.c | 39 |
2 files changed, 1 insertions, 40 deletions
diff --git a/lib/Makefile b/lib/Makefile index 7512dc978f18..04e53dd16070 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o hash.o rhashtable.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f4bd57..000000000000 --- a/lib/hash.c +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | /* General purpose hashing library | ||
2 | * | ||
3 | * That's a start of a kernel hashing library, which can be extended | ||
4 | * with further algorithms in future. arch_fast_hash{2,}() will | ||
5 | * eventually resolve to an architecture optimized implementation. | ||
6 | * | ||
7 | * Copyright 2013 Francesco Fusco <ffusco@redhat.com> | ||
8 | * Copyright 2013 Daniel Borkmann <dborkman@redhat.com> | ||
9 | * Copyright 2013 Thomas Graf <tgraf@redhat.com> | ||
10 | * Licensed under the GNU General Public License, version 2.0 (GPLv2) | ||
11 | */ | ||
12 | |||
13 | #include <linux/jhash.h> | ||
14 | #include <linux/hash.h> | ||
15 | #include <linux/cache.h> | ||
16 | |||
17 | static struct fast_hash_ops arch_hash_ops __read_mostly = { | ||
18 | .hash = jhash, | ||
19 | .hash2 = jhash2, | ||
20 | }; | ||
21 | |||
22 | u32 arch_fast_hash(const void *data, u32 len, u32 seed) | ||
23 | { | ||
24 | return arch_hash_ops.hash(data, len, seed); | ||
25 | } | ||
26 | EXPORT_SYMBOL_GPL(arch_fast_hash); | ||
27 | |||
28 | u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) | ||
29 | { | ||
30 | return arch_hash_ops.hash2(data, len, seed); | ||
31 | } | ||
32 | EXPORT_SYMBOL_GPL(arch_fast_hash2); | ||
33 | |||
34 | static int __init hashlib_init(void) | ||
35 | { | ||
36 | setup_arch_fast_hash(&arch_hash_ops); | ||
37 | return 0; | ||
38 | } | ||
39 | early_initcall(hashlib_init); | ||