aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netfront.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff7f111fffee..36808bf25677 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -286,8 +286,7 @@ no_skb:
286 break; 286 break;
287 } 287 }
288 288
289 __skb_fill_page_desc(skb, 0, page, 0, 0); 289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290 skb_shinfo(skb)->nr_frags = 1;
291 __skb_queue_tail(&np->rx_batch, skb); 290 __skb_queue_tail(&np->rx_batch, skb);
292 } 291 }
293 292
@@ -831,7 +830,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 struct sk_buff_head *list) 830 struct sk_buff_head *list)
832{ 831{
833 struct skb_shared_info *shinfo = skb_shinfo(skb); 832 struct skb_shared_info *shinfo = skb_shinfo(skb);
834 int nr_frags = shinfo->nr_frags;
835 RING_IDX cons = np->rx.rsp_cons; 833 RING_IDX cons = np->rx.rsp_cons;
836 struct sk_buff *nskb; 834 struct sk_buff *nskb;
837 835
@@ -840,19 +838,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
840 RING_GET_RESPONSE(&np->rx, ++cons); 838 RING_GET_RESPONSE(&np->rx, ++cons);
841 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
842 840
843 __skb_fill_page_desc(skb, nr_frags, 841 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
844 skb_frag_page(nfrag), 842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
845 rx->offset, rx->status);
846 843
847 skb->data_len += rx->status; 844 BUG_ON(pull_to <= skb_headlen(skb));
845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 }
847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
848
849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 rx->offset, rx->status, PAGE_SIZE);
848 851
849 skb_shinfo(nskb)->nr_frags = 0; 852 skb_shinfo(nskb)->nr_frags = 0;
850 kfree_skb(nskb); 853 kfree_skb(nskb);
851
852 nr_frags++;
853 } 854 }
854 855
855 shinfo->nr_frags = nr_frags;
856 return cons; 856 return cons;
857} 857}
858 858
@@ -933,7 +933,8 @@ static int handle_incoming_queue(struct net_device *dev,
933 while ((skb = __skb_dequeue(rxq)) != NULL) { 933 while ((skb = __skb_dequeue(rxq)) != NULL) {
934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
935 935
936 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 936 if (pull_to > skb_headlen(skb))
937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
937 938
938 /* Ethernet work: Delayed to here as it peeks the header. */ 939 /* Ethernet work: Delayed to here as it peeks the header. */
939 skb->protocol = eth_type_trans(skb, dev); 940 skb->protocol = eth_type_trans(skb, dev);
@@ -1019,16 +1020,10 @@ err:
1019 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1020 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1021 skb->data_len = rx->status; 1022 skb->data_len = rx->status;
1023 skb->len += rx->status;
1022 1024
1023 i = xennet_fill_frags(np, skb, &tmpq); 1025 i = xennet_fill_frags(np, skb, &tmpq);
1024 1026
1025 /*
1026 * Truesize is the actual allocation size, even if the
1027 * allocation is only partially used.
1028 */
1029 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
1030 skb->len += skb->data_len;
1031
1032 if (rx->flags & XEN_NETRXF_csum_blank) 1027 if (rx->flags & XEN_NETRXF_csum_blank)
1033 skb->ip_summed = CHECKSUM_PARTIAL; 1028 skb->ip_summed = CHECKSUM_PARTIAL;
1034 else if (rx->flags & XEN_NETRXF_data_validated) 1029 else if (rx->flags & XEN_NETRXF_data_validated)