aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@suse.com>2013-07-17 03:09:37 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-17 15:51:19 -0400
commit093b9c71b6e450e375f4646ba86faed0195ec7df (patch)
tree1b86271f7c593a49e8dbe96da3f310d2aec9661e /drivers/net
parentfe5c3561e6f0ac7c9546209f01351113c1b77ec8 (diff)
xen-netfront: pull on receive skb may need to happen earlier
Due to commit 3683243b ("xen-netfront: use __pskb_pull_tail to ensure linear area is big enough on RX") xennet_fill_frags() may end up filling MAX_SKB_FRAGS + 1 fragments in a receive skb, and only reduce the fragment count subsequently via __pskb_pull_tail(). That's a result of xennet_get_responses() allowing a maximum of one more slot to be consumed (and intermediately transformed into a fragment) if the head slot has a size less than or equal to RX_COPY_THRESHOLD. Hence we need to adjust xennet_fill_frags() to pull earlier if we reached the maximum fragment count - due to the described behavior of xennet_get_responses() this guarantees that at least the first fragment will get completely consumed, and hence the fragment count reduced. In order to not needlessly call __pskb_pull_tail() twice, make the original call conditional upon the pull target not having been reached yet, and defer the newly added one as much as possible (an alternative would have been to always call the function right before the call to xennet_fill_frags(), but that would imply more frequent cases of needing to call it twice). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: stable@vger.kernel.org (3.6 onwards) Acked-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netfront.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff7f111fffee..36808bf25677 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -286,8 +286,7 @@ no_skb:
286 break; 286 break;
287 } 287 }
288 288
289 __skb_fill_page_desc(skb, 0, page, 0, 0); 289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290 skb_shinfo(skb)->nr_frags = 1;
291 __skb_queue_tail(&np->rx_batch, skb); 290 __skb_queue_tail(&np->rx_batch, skb);
292 } 291 }
293 292
@@ -831,7 +830,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 struct sk_buff_head *list) 830 struct sk_buff_head *list)
832{ 831{
833 struct skb_shared_info *shinfo = skb_shinfo(skb); 832 struct skb_shared_info *shinfo = skb_shinfo(skb);
834 int nr_frags = shinfo->nr_frags;
835 RING_IDX cons = np->rx.rsp_cons; 833 RING_IDX cons = np->rx.rsp_cons;
836 struct sk_buff *nskb; 834 struct sk_buff *nskb;
837 835
@@ -840,19 +838,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
840 RING_GET_RESPONSE(&np->rx, ++cons); 838 RING_GET_RESPONSE(&np->rx, ++cons);
841 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 839 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
842 840
843 __skb_fill_page_desc(skb, nr_frags, 841 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
844 skb_frag_page(nfrag), 842 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
845 rx->offset, rx->status);
846 843
847 skb->data_len += rx->status; 844 BUG_ON(pull_to <= skb_headlen(skb));
845 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 }
847 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
848
849 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 rx->offset, rx->status, PAGE_SIZE);
848 851
849 skb_shinfo(nskb)->nr_frags = 0; 852 skb_shinfo(nskb)->nr_frags = 0;
850 kfree_skb(nskb); 853 kfree_skb(nskb);
851
852 nr_frags++;
853 } 854 }
854 855
855 shinfo->nr_frags = nr_frags;
856 return cons; 856 return cons;
857} 857}
858 858
@@ -933,7 +933,8 @@ static int handle_incoming_queue(struct net_device *dev,
933 while ((skb = __skb_dequeue(rxq)) != NULL) { 933 while ((skb = __skb_dequeue(rxq)) != NULL) {
934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 934 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
935 935
936 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 936 if (pull_to > skb_headlen(skb))
937 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
937 938
938 /* Ethernet work: Delayed to here as it peeks the header. */ 939 /* Ethernet work: Delayed to here as it peeks the header. */
939 skb->protocol = eth_type_trans(skb, dev); 940 skb->protocol = eth_type_trans(skb, dev);
@@ -1019,16 +1020,10 @@ err:
1019 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1020 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1020 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1021 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1021 skb->data_len = rx->status; 1022 skb->data_len = rx->status;
1023 skb->len += rx->status;
1022 1024
1023 i = xennet_fill_frags(np, skb, &tmpq); 1025 i = xennet_fill_frags(np, skb, &tmpq);
1024 1026
1025 /*
1026 * Truesize is the actual allocation size, even if the
1027 * allocation is only partially used.
1028 */
1029 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
1030 skb->len += skb->data_len;
1031
1032 if (rx->flags & XEN_NETRXF_csum_blank) 1027 if (rx->flags & XEN_NETRXF_csum_blank)
1033 skb->ip_summed = CHECKSUM_PARTIAL; 1028 skb->ip_summed = CHECKSUM_PARTIAL;
1034 else if (rx->flags & XEN_NETRXF_data_validated) 1029 else if (rx->flags & XEN_NETRXF_data_validated)