aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2014-09-05 19:20:26 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-05 20:47:02 -0400
commit56193d1bce2b2759cb4bdcc00cd05544894a0c90 (patch)
tree621b2ce35961049dd263c2f83aee3d9ec39ac686 /net/core
parent2c048e646212f9880e6f201771a30daa963d7f8b (diff)
net: Add function for parsing the header length out of linear ethernet frames
This patch updates some of the flow_dissector api so that it can be used to parse the length of ethernet buffers stored in fragments. Most of the changes needed were to __skb_get_poff as it needed to be updated to support sending a linear buffer instead of a skb. I have split __skb_get_poff into two functions, the first is skb_get_poff and it retains the functionality of the original __skb_get_poff. The other function is __skb_get_poff which now works much like __skb_flow_dissect in relation to skb_flow_dissect in that it provides the same functionality but works with just a data buffer and hlen instead of needing an skb. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/flow_dissector.c46
2 files changed, 33 insertions, 15 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 37f8eb06fdee..fa5b7d0f77ac 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -113,7 +113,7 @@ static unsigned int pkt_type_offset(void)
113 113
114static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 114static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
115{ 115{
116 return __skb_get_poff((struct sk_buff *)(unsigned long) ctx); 116 return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
117} 117}
118 118
119static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 119static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 12f48ca7a0b0..8560dea58803 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -13,6 +13,7 @@
13#include <linux/if_pppox.h> 13#include <linux/if_pppox.h>
14#include <linux/ppp_defs.h> 14#include <linux/ppp_defs.h>
15#include <net/flow_keys.h> 15#include <net/flow_keys.h>
16#include <scsi/fc/fc_fcoe.h>
16 17
17/* copy saddr & daddr, possibly using 64bit load/store 18/* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr; 19 * Equivalent to : flow->src = iph->saddr;
@@ -117,6 +118,13 @@ ipv6:
117 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); 118 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
118 nhoff += sizeof(struct ipv6hdr); 119 nhoff += sizeof(struct ipv6hdr);
119 120
121 /* skip the flow label processing if skb is NULL. The
122 * assumption here is that if there is no skb we are not
123 * looking for flow info as much as we are length.
124 */
125 if (!skb)
126 break;
127
120 flow_label = ip6_flowlabel(iph); 128 flow_label = ip6_flowlabel(iph);
121 if (flow_label) { 129 if (flow_label) {
122 /* Awesome, IPv6 packet has a flow label so we can 130 /* Awesome, IPv6 packet has a flow label so we can
@@ -165,6 +173,9 @@ ipv6:
165 return false; 173 return false;
166 } 174 }
167 } 175 }
176 case htons(ETH_P_FCOE):
177 flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
178 /* fall through */
168 default: 179 default:
169 return false; 180 return false;
170 } 181 }
@@ -316,26 +327,18 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
316} 327}
317EXPORT_SYMBOL(__skb_tx_hash); 328EXPORT_SYMBOL(__skb_tx_hash);
318 329
319/* __skb_get_poff() returns the offset to the payload as far as it could 330u32 __skb_get_poff(const struct sk_buff *skb, void *data,
320 * be dissected. The main user is currently BPF, so that we can dynamically 331 const struct flow_keys *keys, int hlen)
321 * truncate packets without needing to push actual payload to the user
322 * space and can analyze headers only, instead.
323 */
324u32 __skb_get_poff(const struct sk_buff *skb)
325{ 332{
326 struct flow_keys keys; 333 u32 poff = keys->thoff;
327 u32 poff = 0;
328 334
329 if (!skb_flow_dissect(skb, &keys)) 335 switch (keys->ip_proto) {
330 return 0;
331
332 poff += keys.thoff;
333 switch (keys.ip_proto) {
334 case IPPROTO_TCP: { 336 case IPPROTO_TCP: {
335 const struct tcphdr *tcph; 337 const struct tcphdr *tcph;
336 struct tcphdr _tcph; 338 struct tcphdr _tcph;
337 339
338 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); 340 tcph = __skb_header_pointer(skb, poff, sizeof(_tcph),
341 data, hlen, &_tcph);
339 if (!tcph) 342 if (!tcph)
340 return poff; 343 return poff;
341 344
@@ -369,6 +372,21 @@ u32 __skb_get_poff(const struct sk_buff *skb)
369 return poff; 372 return poff;
370} 373}
371 374
375/* skb_get_poff() returns the offset to the payload as far as it could
376 * be dissected. The main user is currently BPF, so that we can dynamically
377 * truncate packets without needing to push actual payload to the user
378 * space and can analyze headers only, instead.
379 */
380u32 skb_get_poff(const struct sk_buff *skb)
381{
382 struct flow_keys keys;
383
384 if (!skb_flow_dissect(skb, &keys))
385 return 0;
386
387 return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
388}
389
372static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 390static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
373{ 391{
374#ifdef CONFIG_XPS 392#ifdef CONFIG_XPS