aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorHannes Frederic Sowa <hannes@stressinduktion.org>2013-10-23 14:06:00 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-25 19:03:39 -0400
commit66415cf8a1b99d101317f5aa08574b1ec8832672 (patch)
treeb67df35008f4bcec22b5f1730acaf16bae63c262 /net/core
parentf84be2bd96a108b09c8440263fa3adb3fb225fa3 (diff)
net: initialize hashrnd in flow_dissector with net_get_random_once
We also can defer the initialization of hashrnd in flow_dissector to its first use. Since net_get_random_once is irq safe now we don't have to audit the call paths if one of this functions get called by an interrupt handler. Cc: David S. Miller <davem@davemloft.net> Cc: Eric Dumazet <edumazet@google.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/flow_dissector.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index f8e25ac41c6c..5cac36e6ccd1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -184,6 +184,22 @@ ipv6:
184EXPORT_SYMBOL(skb_flow_dissect); 184EXPORT_SYMBOL(skb_flow_dissect);
185 185
186static u32 hashrnd __read_mostly; 186static u32 hashrnd __read_mostly;
187static __always_inline void __flow_hash_secret_init(void)
188{
189 net_get_random_once(&hashrnd, sizeof(hashrnd));
190}
191
192static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
193{
194 __flow_hash_secret_init();
195 return jhash_3words(a, b, c, hashrnd);
196}
197
198static __always_inline u32 __flow_hash_1word(u32 a)
199{
200 __flow_hash_secret_init();
201 return jhash_1word(a, hashrnd);
202}
187 203
188/* 204/*
189 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 205 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
@@ -210,9 +226,9 @@ void __skb_get_rxhash(struct sk_buff *skb)
210 swap(keys.port16[0], keys.port16[1]); 226 swap(keys.port16[0], keys.port16[1]);
211 } 227 }
212 228
213 hash = jhash_3words((__force u32)keys.dst, 229 hash = __flow_hash_3words((__force u32)keys.dst,
214 (__force u32)keys.src, 230 (__force u32)keys.src,
215 (__force u32)keys.ports, hashrnd); 231 (__force u32)keys.ports);
216 if (!hash) 232 if (!hash)
217 hash = 1; 233 hash = 1;
218 234
@@ -248,7 +264,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
248 hash = skb->sk->sk_hash; 264 hash = skb->sk->sk_hash;
249 else 265 else
250 hash = (__force u16) skb->protocol; 266 hash = (__force u16) skb->protocol;
251 hash = jhash_1word(hash, hashrnd); 267 hash = __flow_hash_1word(hash);
252 268
253 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 269 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
254} 270}
@@ -340,7 +356,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
340 else 356 else
341 hash = (__force u16) skb->protocol ^ 357 hash = (__force u16) skb->protocol ^
342 skb->rxhash; 358 skb->rxhash;
343 hash = jhash_1word(hash, hashrnd); 359 hash = __flow_hash_1word(hash);
344 queue_index = map->queues[ 360 queue_index = map->queues[
345 ((u64)hash * map->len) >> 32]; 361 ((u64)hash * map->len) >> 32];
346 } 362 }
@@ -395,11 +411,3 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
395 skb_set_queue_mapping(skb, queue_index); 411 skb_set_queue_mapping(skb, queue_index);
396 return netdev_get_tx_queue(dev, queue_index); 412 return netdev_get_tx_queue(dev, queue_index);
397} 413}
398
399static int __init initialize_hashrnd(void)
400{
401 get_random_bytes(&hashrnd, sizeof(hashrnd));
402 return 0;
403}
404
405late_initcall_sync(initialize_hashrnd);