aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2014-03-28 07:39:06 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-29 18:50:34 -0400
commita02eb4732cf975d7fc71b6d1a71c058c9988b949 (patch)
tree5a8d6dca5909de547b7a4d10c65329752d15d7ac /drivers/net/xen-netback/netback.c
parent0576eddf24df716d8570ef8ca11452a9f98eaab2 (diff)
xen-netback: worse-case estimate in xenvif_rx_action is underestimating
The worse-case estimate for skb ring slot usage in xenvif_rx_action() fails to take fragment page_offset into account. The page_offset does, however, affect the number of times the fragmentation code calls start_new_rx_buffer() (i.e. consume another slot) and the worse-case should assume that will always return true. This patch adds the page_offset into the DIV_ROUND_UP for each frag. Unfortunately some frontends aggressively limit the number of requests they post into the shared ring so to avoid an estimate that is 'too' pessimal it is capped at MAX_SKB_FRAGS. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 72314c7998fc..573f3e81e5d2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -493,9 +493,28 @@ static void xenvif_rx_action(struct xenvif *vif)
493 PAGE_SIZE); 493 PAGE_SIZE);
494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
495 unsigned int size; 495 unsigned int size;
496 unsigned int offset;
497
496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 498 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 499 offset = skb_shinfo(skb)->frags[i].page_offset;
500
501 /* For a worse-case estimate we need to factor in
502 * the fragment page offset as this will affect the
503 * number of times xenvif_gop_frag_copy() will
504 * call start_new_rx_buffer().
505 */
506 max_slots_needed += DIV_ROUND_UP(offset + size,
507 PAGE_SIZE);
498 } 508 }
509
510 /* To avoid the estimate becoming too pessimal for some
511 * frontends that limit posted rx requests, cap the estimate
512 * at MAX_SKB_FRAGS.
513 */
514 if (max_slots_needed > MAX_SKB_FRAGS)
515 max_slots_needed = MAX_SKB_FRAGS;
516
517 /* We may need one more slot for GSO metadata */
499 if (skb_is_gso(skb) && 518 if (skb_is_gso(skb) &&
500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 519 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 520 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))