aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-08-11 18:33:54 -0400
committerOlof Johansson <olof@lixom.net>2013-08-11 18:33:54 -0400
commit4ddbed9618724d52a7a79c1e10ef5adb46fcccf7 (patch)
treeffe64efb333d7dcdb5b2cf43ffef97b25261702c /drivers/net/xen-netfront.c
parent16649596d701c0f4f767bbcad7da4d6343ba8a9e (diff)
parentfa8c5a811e0e7c3e1c49b2e58fcb4db549b5719a (diff)
Merge tag 'boards-3.12' of git://git.infradead.org/linux-mvebu into next/boards
From Jason Cooper: mvebu boards changes for v3.12 - convert kirkwood, dove, orion5x to DT init of mv643xx_eth - _lots_ of board code removal :) - convert kirkwood, dove and orion5x to DT init of clocksource and irqchip * tag 'boards-3.12' of git://git.infradead.org/linux-mvebu: ARM: plat-orion: add reg offset to DT irq driver stub ARM: kirkwood: remove obsolete SDIO clock gate workaround ARM: kirkwood: convert to DT irqchip and clocksource ARM: dove: convert to DT irqchip and clocksource ARM: orion5x: update intc device tree node to new reg layout ARM: kirkwood: move device tree nodes to DT irqchip and clocksource ARM: dove: move device tree nodes to DT irqchip and clocksource ARM: orion5x: remove legacy mv643xx_eth board setup ARM: kirkwood: remove legacy clk alias for mv643xx_eth ARM: kirkwood: remove redundant DT board files ARM: dove: remove legacy mv643xx_eth setup ARM: orion5x: add gigabit ethernet and mvmdio device tree nodes ARM: kirkwood: add gigabit ethernet and mvmdio device tree nodes ARM: dove: add gigabit ethernet and mvmdio device tree nodes + Linux 3.11-rc2 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff7f111fffee..36808bf25677 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -286,8 +286,7 @@ no_skb:
286 break; 286 break;
287 } 287 }
288 288
289 __skb_fill_page_desc(skb, 0, page, 0, 0); 289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290 skb_shinfo(skb)->nr_frags = 1;
291 __skb_queue_tail(&np->rx_batch, skb); 290 __skb_queue_tail(&np->rx_batch, skb);
292 } 291 }
293 292
@@ -831,7 +830,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 struct sk_buff_head *list) 830 struct sk_buff_head *list)
832{ 831{
833 struct skb_shared_info *shinfo = skb_shinfo(skb); 832 struct skb_shared_info *shinfo = skb_shinfo(skb);
834 int nr_frags = shinfo->nr_frags;
835 RING_IDX cons = np->rx.rsp_cons; 833 RING_IDX cons = np->rx.rsp_cons;
836 struct sk_buff *nskb; 834 struct sk_buff *nskb;
837 835
@@ -840,19 +838,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
840 RING_GET_RESPONSE(&np->rx, ++cons); 838 RING_GET_RESPONSE(&np->rx, ++cons);
841 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
842 840
843 __skb_fill_page_desc(skb, nr_frags, 841 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
844 skb_frag_page(nfrag), 842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
845 rx->offset, rx->status);
846 843
847 skb->data_len += rx->status; 844 BUG_ON(pull_to <= skb_headlen(skb));
845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 }
847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
848
849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 rx->offset, rx->status, PAGE_SIZE);
848 851
849 skb_shinfo(nskb)->nr_frags = 0; 852 skb_shinfo(nskb)->nr_frags = 0;
850 kfree_skb(nskb); 853 kfree_skb(nskb);
851
852 nr_frags++;
853 } 854 }
854 855
855 shinfo->nr_frags = nr_frags;
856 return cons; 856 return cons;
857} 857}
858 858
@@ -933,7 +933,8 @@ static int handle_incoming_queue(struct net_device *dev,
933 while ((skb = __skb_dequeue(rxq)) != NULL) { 933 while ((skb = __skb_dequeue(rxq)) != NULL) {
934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
935 935
936 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 936 if (pull_to > skb_headlen(skb))
937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
937 938
938 /* Ethernet work: Delayed to here as it peeks the header. */ 939 /* Ethernet work: Delayed to here as it peeks the header. */
939 skb->protocol = eth_type_trans(skb, dev); 940 skb->protocol = eth_type_trans(skb, dev);
@@ -1019,16 +1020,10 @@ err:
1019 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1020 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1021 skb->data_len = rx->status; 1022 skb->data_len = rx->status;
1023 skb->len += rx->status;
1022 1024
1023 i = xennet_fill_frags(np, skb, &tmpq); 1025 i = xennet_fill_frags(np, skb, &tmpq);
1024 1026
1025 /*
1026 * Truesize is the actual allocation size, even if the
1027 * allocation is only partially used.
1028 */
1029 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
1030 skb->len += skb->data_len;
1031
1032 if (rx->flags & XEN_NETRXF_csum_blank) 1027 if (rx->flags & XEN_NETRXF_csum_blank)
1033 skb->ip_summed = CHECKSUM_PARTIAL; 1028 skb->ip_summed = CHECKSUM_PARTIAL;
1034 else if (rx->flags & XEN_NETRXF_data_validated) 1029 else if (rx->flags & XEN_NETRXF_data_validated)