aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-29 18:52:04 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-29 18:52:04 -0400
commitdf69491b7d1550137507a7eb5f2fc5dce0c1e534 (patch)
tree47e3b3c92652e36d5b98db6c15f5df4a303a9863
parent49d8137a4039c63c834827f4bfe875e27bb9c521 (diff)
parent1425c7a4e8d3d2eebf308bcbdc3fa3c1247686b4 (diff)
Merge branch 'xen-netback'
Paul Durrant says: ==================== xen-netback: fix rx slot estimation Sander Eikelenboom reported an issue with ring overflow in netback in 3.14-rc3. This turns outo be be because of a bug in the ring slot estimation code. This patch series fixes the slot estimation, fixes the BUG_ON() that was supposed to catch the issue that Sander ran into and also makes a small fix to start_new_rx_buffer(). v3: - Added a cap of MAX_SKB_FRAGS to estimate in patch #2 v2: - Added BUG_ON() to patch #1 - Added more explanation to patch #3 ==================== Reported-By: Sander Eikelenboom <linux@eikelenboom.it> Tested-By: Sander Eikelenboom <linux@eikelenboom.it> Acked-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/netback.c33
1 files changed, 29 insertions, 4 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 438d0c09b7e6..cd0bd95ccc14 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -192,8 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
192 * into multiple copies tend to give large frags their 192 * into multiple copies tend to give large frags their
193 * own buffers as before. 193 * own buffers as before.
194 */ 194 */
195 if ((offset + size > MAX_BUFFER_OFFSET) && 195 BUG_ON(size > MAX_BUFFER_OFFSET);
196 (size <= MAX_BUFFER_OFFSET) && offset && !head) 196 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
197 return true; 197 return true;
198 198
199 return false; 199 return false;
@@ -482,6 +482,8 @@ static void xenvif_rx_action(struct xenvif *vif)
482 482
483 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 483 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
484 RING_IDX max_slots_needed; 484 RING_IDX max_slots_needed;
485 RING_IDX old_req_cons;
486 RING_IDX ring_slots_used;
485 int i; 487 int i;
486 488
487 /* We need a cheap worse case estimate for the number of 489 /* We need a cheap worse case estimate for the number of
@@ -493,9 +495,28 @@ static void xenvif_rx_action(struct xenvif *vif)
493 PAGE_SIZE); 495 PAGE_SIZE);
494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 496 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
495 unsigned int size; 497 unsigned int size;
498 unsigned int offset;
499
496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 500 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 501 offset = skb_shinfo(skb)->frags[i].page_offset;
502
503 /* For a worse-case estimate we need to factor in
504 * the fragment page offset as this will affect the
505 * number of times xenvif_gop_frag_copy() will
506 * call start_new_rx_buffer().
507 */
508 max_slots_needed += DIV_ROUND_UP(offset + size,
509 PAGE_SIZE);
498 } 510 }
511
512 /* To avoid the estimate becoming too pessimal for some
513 * frontends that limit posted rx requests, cap the estimate
514 * at MAX_SKB_FRAGS.
515 */
516 if (max_slots_needed > MAX_SKB_FRAGS)
517 max_slots_needed = MAX_SKB_FRAGS;
518
519 /* We may need one more slot for GSO metadata */
499 if (skb_is_gso(skb) && 520 if (skb_is_gso(skb) &&
500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 521 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 522 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -511,8 +532,12 @@ static void xenvif_rx_action(struct xenvif *vif)
511 vif->rx_last_skb_slots = 0; 532 vif->rx_last_skb_slots = 0;
512 533
513 sco = (struct skb_cb_overlay *)skb->cb; 534 sco = (struct skb_cb_overlay *)skb->cb;
535
536 old_req_cons = vif->rx.req_cons;
514 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 537 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
515 BUG_ON(sco->meta_slots_used > max_slots_needed); 538 ring_slots_used = vif->rx.req_cons - old_req_cons;
539
540 BUG_ON(ring_slots_used > max_slots_needed);
516 541
517 __skb_queue_tail(&rxq, skb); 542 __skb_queue_tail(&rxq, skb);
518 } 543 }