aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-12-18 01:45:52 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:59:11 -0500
commit34498825cb9062192b77fa02dae672a4fe6eec70 (patch)
tree7f856aff21ae3ab1328ab9bcf3549bca8e13df35 /net/ipv4
parent7b21e09d1c17ef0296ec5a6df231a6c5c87b2fd7 (diff)
[NETFILTER]: non-power-of-two jhash optimizations
Apply Eric Dumazet's jhash optimizations where applicable. Quoting Eric: Thanks to jhash, hash value uses full 32 bits. Instead of returning hash % size (implying a divide) we return the high 32 bits of the (hash * size) that will give results between [0 and size-1] and same hash distribution. On most cpus, a multiply is less expensive than a divide, by an order of magnitude. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c10
2 files changed, 8 insertions, 4 deletions
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a48e26449fd5..df39ca07fb12 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -273,7 +273,7 @@ clusterip_hashfn(const struct sk_buff *skb,
273 } 273 }
274 274
275 /* node numbers are 1..n, not 0..n */ 275 /* node numbers are 1..n, not 0..n */
276 return (hashval % config->num_total_nodes) + 1; 276 return (((u64)hashval * config->num_total_nodes) >> 32) + 1;
277} 277}
278 278
279static inline int 279static inline int
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index aec157d0ad93..e53ae1ef8f5e 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -77,10 +77,13 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
77static inline unsigned int 77static inline unsigned int
78hash_by_src(const struct nf_conntrack_tuple *tuple) 78hash_by_src(const struct nf_conntrack_tuple *tuple)
79{ 79{
80 unsigned int hash;
81
80 /* Original src, to ensure we map it consistently if poss. */ 82 /* Original src, to ensure we map it consistently if poss. */
81 return jhash_3words((__force u32)tuple->src.u3.ip, 83 hash = jhash_3words((__force u32)tuple->src.u3.ip,
82 (__force u32)tuple->src.u.all, 84 (__force u32)tuple->src.u.all,
83 tuple->dst.protonum, 0) % nf_nat_htable_size; 85 tuple->dst.protonum, 0);
86 return ((u64)hash * nf_nat_htable_size) >> 32;
84} 87}
85 88
86/* Is this tuple already taken? (not by us) */ 89/* Is this tuple already taken? (not by us) */
@@ -211,7 +214,8 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
211 maxip = ntohl(range->max_ip); 214 maxip = ntohl(range->max_ip);
212 j = jhash_2words((__force u32)tuple->src.u3.ip, 215 j = jhash_2words((__force u32)tuple->src.u3.ip,
213 (__force u32)tuple->dst.u3.ip, 0); 216 (__force u32)tuple->dst.u3.ip, 0);
214 *var_ipp = htonl(minip + j % (maxip - minip + 1)); 217 j = ((u64)j * (maxip - minip + 1)) >> 32;
218 *var_ipp = htonl(minip + j);
215} 219}
216 220
217/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 221/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,