aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-03-25 16:19:58 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-26 12:44:44 -0400
commitf9ca8f74399f9195fd8e01f67a8424a8d33efa55 (patch)
tree8d46024f2e5c82b0c811c87c8a9924b0efdd28dd /drivers/net/xen-netback/netback.c
parentc1aad275b0293d2b1905ec95a945422262470684 (diff)
netback: set transport header before passing it to kernel
Currently, for the packets receives from netback, before doing header check, kernel just reset the transport header in netif_receive_skb() which pretends non l4 header. This is suboptimal for precise packet length estimation (introduced in 1def9238: net_sched: more precise pkt_len computation) which needs correct l4 header for gso packets. The patch just reuse the header probed by netback for partial checksum packets and tries to use skb_flow_dissect() for other cases, if both fail, just pretend no l4 header. Cc: Eric Dumazet <edumazet@google.com> Cc: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index aa28550fc9b6..fc8faa74b250 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,6 +39,7 @@
39#include <linux/udp.h> 39#include <linux/udp.h>
40 40
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/flow_keys.h>
42 43
43#include <xen/xen.h> 44#include <xen/xen.h>
44#include <xen/events.h> 45#include <xen/events.h>
@@ -1184,6 +1185,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1184 if (th >= skb_tail_pointer(skb)) 1185 if (th >= skb_tail_pointer(skb))
1185 goto out; 1186 goto out;
1186 1187
1188 skb_set_transport_header(skb, 4 * iph->ihl);
1187 skb->csum_start = th - skb->head; 1189 skb->csum_start = th - skb->head;
1188 switch (iph->protocol) { 1190 switch (iph->protocol) {
1189 case IPPROTO_TCP: 1191 case IPPROTO_TCP:
@@ -1495,6 +1497,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1495 1497
1496 skb->dev = vif->dev; 1498 skb->dev = vif->dev;
1497 skb->protocol = eth_type_trans(skb, skb->dev); 1499 skb->protocol = eth_type_trans(skb, skb->dev);
1500 skb_reset_network_header(skb);
1498 1501
1499 if (checksum_setup(vif, skb)) { 1502 if (checksum_setup(vif, skb)) {
1500 netdev_dbg(vif->dev, 1503 netdev_dbg(vif->dev,
@@ -1503,6 +1506,15 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1503 continue; 1506 continue;
1504 } 1507 }
1505 1508
1509 if (!skb_transport_header_was_set(skb)) {
1510 struct flow_keys keys;
1511
1512 if (skb_flow_dissect(skb, &keys))
1513 skb_set_transport_header(skb, keys.thoff);
1514 else
1515 skb_reset_transport_header(skb);
1516 }
1517
1506 vif->dev->stats.rx_bytes += skb->len; 1518 vif->dev->stats.rx_bytes += skb->len;
1507 vif->dev->stats.rx_packets++; 1519 vif->dev->stats.rx_packets++;
1508 1520