aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/flow_dissector.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/flow_dissector.c')
-rw-r--r--net/core/flow_dissector.c91
1 files changed, 60 insertions, 31 deletions
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 8d7d0dd72db2..2fc5beaf5783 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -25,9 +25,35 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst)); 25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
26} 26}
27 27
28/**
29 * skb_flow_get_ports - extract the upper layer ports and return them
30 * @skb: buffer to extract the ports from
31 * @thoff: transport header offset
32 * @ip_proto: protocol for which to get port offset
33 *
34 * The function will try to retrieve the ports at offset thoff + poff where poff
35 * is the protocol port offset returned from proto_ports_offset
36 */
37__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
38{
39 int poff = proto_ports_offset(ip_proto);
40
41 if (poff >= 0) {
42 __be32 *ports, _ports;
43
44 ports = skb_header_pointer(skb, thoff + poff,
45 sizeof(_ports), &_ports);
46 if (ports)
47 return *ports;
48 }
49
50 return 0;
51}
52EXPORT_SYMBOL(skb_flow_get_ports);
53
28bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) 54bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
29{ 55{
30 int poff, nhoff = skb_network_offset(skb); 56 int nhoff = skb_network_offset(skb);
31 u8 ip_proto; 57 u8 ip_proto;
32 __be16 proto = skb->protocol; 58 __be16 proto = skb->protocol;
33 59
@@ -40,15 +66,15 @@ again:
40 struct iphdr _iph; 66 struct iphdr _iph;
41ip: 67ip:
42 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
43 if (!iph) 69 if (!iph || iph->ihl < 5)
44 return false; 70 return false;
71 nhoff += iph->ihl * 4;
45 72
73 ip_proto = iph->protocol;
46 if (ip_is_fragment(iph)) 74 if (ip_is_fragment(iph))
47 ip_proto = 0; 75 ip_proto = 0;
48 else 76
49 ip_proto = iph->protocol;
50 iph_to_flow_copy_addrs(flow, iph); 77 iph_to_flow_copy_addrs(flow, iph);
51 nhoff += iph->ihl * 4;
52 break; 78 break;
53 } 79 }
54 case __constant_htons(ETH_P_IPV6): { 80 case __constant_htons(ETH_P_IPV6): {
@@ -150,16 +176,7 @@ ipv6:
150 } 176 }
151 177
152 flow->ip_proto = ip_proto; 178 flow->ip_proto = ip_proto;
153 poff = proto_ports_offset(ip_proto); 179 flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
154 if (poff >= 0) {
155 __be32 *ports, _ports;
156
157 ports = skb_header_pointer(skb, nhoff + poff,
158 sizeof(_ports), &_ports);
159 if (ports)
160 flow->ports = *ports;
161 }
162
163 flow->thoff = (u16) nhoff; 180 flow->thoff = (u16) nhoff;
164 181
165 return true; 182 return true;
@@ -167,6 +184,22 @@ ipv6:
167EXPORT_SYMBOL(skb_flow_dissect); 184EXPORT_SYMBOL(skb_flow_dissect);
168 185
169static u32 hashrnd __read_mostly; 186static u32 hashrnd __read_mostly;
187static __always_inline void __flow_hash_secret_init(void)
188{
189 net_get_random_once(&hashrnd, sizeof(hashrnd));
190}
191
192static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
193{
194 __flow_hash_secret_init();
195 return jhash_3words(a, b, c, hashrnd);
196}
197
198static __always_inline u32 __flow_hash_1word(u32 a)
199{
200 __flow_hash_secret_init();
201 return jhash_1word(a, hashrnd);
202}
170 203
171/* 204/*
172 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 205 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
@@ -193,9 +226,9 @@ void __skb_get_rxhash(struct sk_buff *skb)
193 swap(keys.port16[0], keys.port16[1]); 226 swap(keys.port16[0], keys.port16[1]);
194 } 227 }
195 228
196 hash = jhash_3words((__force u32)keys.dst, 229 hash = __flow_hash_3words((__force u32)keys.dst,
197 (__force u32)keys.src, 230 (__force u32)keys.src,
198 (__force u32)keys.ports, hashrnd); 231 (__force u32)keys.ports);
199 if (!hash) 232 if (!hash)
200 hash = 1; 233 hash = 1;
201 234
@@ -231,7 +264,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
231 hash = skb->sk->sk_hash; 264 hash = skb->sk->sk_hash;
232 else 265 else
233 hash = (__force u16) skb->protocol; 266 hash = (__force u16) skb->protocol;
234 hash = jhash_1word(hash, hashrnd); 267 hash = __flow_hash_1word(hash);
235 268
236 return (u16) (((u64) hash * qcount) >> 32) + qoffset; 269 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
237} 270}
@@ -323,7 +356,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
323 else 356 else
324 hash = (__force u16) skb->protocol ^ 357 hash = (__force u16) skb->protocol ^
325 skb->rxhash; 358 skb->rxhash;
326 hash = jhash_1word(hash, hashrnd); 359 hash = __flow_hash_1word(hash);
327 queue_index = map->queues[ 360 queue_index = map->queues[
328 ((u64)hash * map->len) >> 32]; 361 ((u64)hash * map->len) >> 32];
329 } 362 }
@@ -362,27 +395,23 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
362EXPORT_SYMBOL(__netdev_pick_tx); 395EXPORT_SYMBOL(__netdev_pick_tx);
363 396
364struct netdev_queue *netdev_pick_tx(struct net_device *dev, 397struct netdev_queue *netdev_pick_tx(struct net_device *dev,
365 struct sk_buff *skb) 398 struct sk_buff *skb,
399 void *accel_priv)
366{ 400{
367 int queue_index = 0; 401 int queue_index = 0;
368 402
369 if (dev->real_num_tx_queues != 1) { 403 if (dev->real_num_tx_queues != 1) {
370 const struct net_device_ops *ops = dev->netdev_ops; 404 const struct net_device_ops *ops = dev->netdev_ops;
371 if (ops->ndo_select_queue) 405 if (ops->ndo_select_queue)
372 queue_index = ops->ndo_select_queue(dev, skb); 406 queue_index = ops->ndo_select_queue(dev, skb,
407 accel_priv);
373 else 408 else
374 queue_index = __netdev_pick_tx(dev, skb); 409 queue_index = __netdev_pick_tx(dev, skb);
375 queue_index = dev_cap_txqueue(dev, queue_index); 410
411 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index);
376 } 413 }
377 414
378 skb_set_queue_mapping(skb, queue_index); 415 skb_set_queue_mapping(skb, queue_index);
379 return netdev_get_tx_queue(dev, queue_index); 416 return netdev_get_tx_queue(dev, queue_index);
380} 417}
381
382static int __init initialize_hashrnd(void)
383{
384 get_random_bytes(&hashrnd, sizeof(hashrnd));
385 return 0;
386}
387
388late_initcall_sync(initialize_hashrnd);