aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@suse.com>2013-07-17 03:09:37 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-04 04:50:53 -0400
commitf4b5b99f18cd5984120ab2334aeadc5384f260cf (patch)
tree78f4a06114de78029d6b040a90871df430eeb2f4 /drivers/net/xen-netfront.c
parent0183a8ee8c5621a84b0e4ba0b627af73f563e37c (diff)
xen-netfront: pull on receive skb may need to happen earlier
commit 093b9c71b6e450e375f4646ba86faed0195ec7df upstream. Due to commit 3683243b ("xen-netfront: use __pskb_pull_tail to ensure linear area is big enough on RX") xennet_fill_frags() may end up filling MAX_SKB_FRAGS + 1 fragments in a receive skb, and only reduce the fragment count subsequently via __pskb_pull_tail(). That's a result of xennet_get_responses() allowing a maximum of one more slot to be consumed (and intermediately transformed into a fragment) if the head slot has a size less than or equal to RX_COPY_THRESHOLD. Hence we need to adjust xennet_fill_frags() to pull earlier if we reached the maximum fragment count - due to the described behavior of xennet_get_responses() this guarantees that at least the first fragment will get completely consumed, and hence the fragment count reduced. In order to not needlessly call __pskb_pull_tail() twice, make the original call conditional upon the pull target not having been reached yet, and defer the newly added one as much as possible (an alternative would have been to always call the function right before the call to xennet_fill_frags(), but that would imply more frequent cases of needing to call it twice). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1db101415069..0c01b8e33fe1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -276,8 +276,7 @@ no_skb:
276 break; 276 break;
277 } 277 }
278 278
279 __skb_fill_page_desc(skb, 0, page, 0, 0); 279 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
280 skb_shinfo(skb)->nr_frags = 1;
281 __skb_queue_tail(&np->rx_batch, skb); 280 __skb_queue_tail(&np->rx_batch, skb);
282 } 281 }
283 282
@@ -822,7 +821,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
822 struct sk_buff_head *list) 821 struct sk_buff_head *list)
823{ 822{
824 struct skb_shared_info *shinfo = skb_shinfo(skb); 823 struct skb_shared_info *shinfo = skb_shinfo(skb);
825 int nr_frags = shinfo->nr_frags;
826 RING_IDX cons = np->rx.rsp_cons; 824 RING_IDX cons = np->rx.rsp_cons;
827 struct sk_buff *nskb; 825 struct sk_buff *nskb;
828 826
@@ -831,19 +829,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 RING_GET_RESPONSE(&np->rx, ++cons); 829 RING_GET_RESPONSE(&np->rx, ++cons);
832 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 830 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
833 831
834 __skb_fill_page_desc(skb, nr_frags, 832 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
835 skb_frag_page(nfrag), 833 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
836 rx->offset, rx->status);
837 834
838 skb->data_len += rx->status; 835 BUG_ON(pull_to <= skb_headlen(skb));
836 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
837 }
838 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
839
840 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
841 rx->offset, rx->status, PAGE_SIZE);
839 842
840 skb_shinfo(nskb)->nr_frags = 0; 843 skb_shinfo(nskb)->nr_frags = 0;
841 kfree_skb(nskb); 844 kfree_skb(nskb);
842
843 nr_frags++;
844 } 845 }
845 846
846 shinfo->nr_frags = nr_frags;
847 return cons; 847 return cons;
848} 848}
849 849
@@ -929,7 +929,8 @@ static int handle_incoming_queue(struct net_device *dev,
929 while ((skb = __skb_dequeue(rxq)) != NULL) { 929 while ((skb = __skb_dequeue(rxq)) != NULL) {
930 int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 930 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
931 931
932 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 932 if (pull_to > skb_headlen(skb))
933 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
933 934
934 /* Ethernet work: Delayed to here as it peeks the header. */ 935 /* Ethernet work: Delayed to here as it peeks the header. */
935 skb->protocol = eth_type_trans(skb, dev); 936 skb->protocol = eth_type_trans(skb, dev);
@@ -1015,16 +1016,10 @@ err:
1015 skb_shinfo(skb)->frags[0].page_offset = rx->offset; 1016 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1016 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 1017 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1017 skb->data_len = rx->status; 1018 skb->data_len = rx->status;
1019 skb->len += rx->status;
1018 1020
1019 i = xennet_fill_frags(np, skb, &tmpq); 1021 i = xennet_fill_frags(np, skb, &tmpq);
1020 1022
1021 /*
1022 * Truesize is the actual allocation size, even if the
1023 * allocation is only partially used.
1024 */
1025 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
1026 skb->len += skb->data_len;
1027
1028 if (rx->flags & XEN_NETRXF_csum_blank) 1023 if (rx->flags & XEN_NETRXF_csum_blank)
1029 skb->ip_summed = CHECKSUM_PARTIAL; 1024 skb->ip_summed = CHECKSUM_PARTIAL;
1030 else if (rx->flags & XEN_NETRXF_data_validated) 1025 else if (rx->flags & XEN_NETRXF_data_validated)