aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-12-18 01:45:52 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:59:11 -0500
commit34498825cb9062192b77fa02dae672a4fe6eec70 (patch)
tree7f856aff21ae3ab1328ab9bcf3549bca8e13df35 /net/netfilter
parent7b21e09d1c17ef0296ec5a6df231a6c5c87b2fd7 (diff)
[NETFILTER]: non-power-of-two jhash optimizations
Apply Eric Dumazet's jhash optimizations where applicable. Quoting Eric: Thanks to jhash, hash value uses full 32 bits. Instead of returning hash % size (implying a divide) we return the high 32 bits of the (hash * size) that will give results between [0 and size-1] and same hash distribution. On most cpus, a multiply is less expensive than a divide, by an order of magnitude. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c8
2 files changed, 6 insertions, 4 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5920f953f782..25564073ee23 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -81,7 +81,7 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
81 ((__force __u16)tuple->src.u.all << 16) | 81 ((__force __u16)tuple->src.u.all << 16) |
82 (__force __u16)tuple->dst.u.all); 82 (__force __u16)tuple->dst.u.all);
83 83
84 return jhash_2words(a, b, rnd) % size; 84 return ((u64)jhash_2words(a, b, rnd) * size) >> 32;
85} 85}
86 86
87static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 87static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 0efbf343eac8..e0cd9d00aa61 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -73,15 +73,17 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)
73 73
74static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple) 74static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
75{ 75{
76 unsigned int hash;
77
76 if (unlikely(!nf_ct_expect_hash_rnd_initted)) { 78 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
77 get_random_bytes(&nf_ct_expect_hash_rnd, 4); 79 get_random_bytes(&nf_ct_expect_hash_rnd, 4);
78 nf_ct_expect_hash_rnd_initted = 1; 80 nf_ct_expect_hash_rnd_initted = 1;
79 } 81 }
80 82
81 return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
82 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
83 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) % 85 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
84 nf_ct_expect_hsize; 86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
85} 87}
86 88
87struct nf_conntrack_expect * 89struct nf_conntrack_expect *