aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-31 16:56:43 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-31 16:56:43 -0400
commit0b70195e0c3206103be991e196c26fcf168d0334 (patch)
tree61902c09bfa14034a82c151f46a4616a2f013653 /drivers/net/xen-netback/netback.c
parentce22bb6122e849e0d3a6857d03eb69a25bfdc6e9 (diff)
parent17e84a9253467552fb06f99c009bb0bc1d7bfd39 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/xen-netback/netback.c A bug fix overlapped with changing how the netback SKB control block is implemented. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cb784fe5220c..ae34f5fc7fbc 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -191,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
191 * into multiple copies tend to give large frags their 191 * into multiple copies tend to give large frags their
192 * own buffers as before. 192 * own buffers as before.
193 */ 193 */
194 if ((offset + size > MAX_BUFFER_OFFSET) && 194 BUG_ON(size > MAX_BUFFER_OFFSET);
195 (size <= MAX_BUFFER_OFFSET) && offset && !head) 195 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
196 return true; 196 return true;
197 197
198 return false; 198 return false;
@@ -511,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
511 511
512 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 512 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
513 RING_IDX max_slots_needed; 513 RING_IDX max_slots_needed;
514 RING_IDX old_req_cons;
515 RING_IDX ring_slots_used;
514 int i; 516 int i;
515 517
516 /* We need a cheap worse case estimate for the number of 518 /* We need a cheap worse case estimate for the number of
@@ -522,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
522 PAGE_SIZE); 524 PAGE_SIZE);
523 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 525 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
524 unsigned int size; 526 unsigned int size;
527 unsigned int offset;
528
525 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 529 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
526 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 530 offset = skb_shinfo(skb)->frags[i].page_offset;
531
532 /* For a worse-case estimate we need to factor in
533 * the fragment page offset as this will affect the
534 * number of times xenvif_gop_frag_copy() will
535 * call start_new_rx_buffer().
536 */
537 max_slots_needed += DIV_ROUND_UP(offset + size,
538 PAGE_SIZE);
527 } 539 }
540
541 /* To avoid the estimate becoming too pessimal for some
542 * frontends that limit posted rx requests, cap the estimate
543 * at MAX_SKB_FRAGS.
544 */
545 if (max_slots_needed > MAX_SKB_FRAGS)
546 max_slots_needed = MAX_SKB_FRAGS;
547
548 /* We may need one more slot for GSO metadata */
528 if (skb_is_gso(skb) && 549 if (skb_is_gso(skb) &&
529 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 550 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
530 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 551 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -539,8 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
539 } else 560 } else
540 vif->rx_last_skb_slots = 0; 561 vif->rx_last_skb_slots = 0;
541 562
563 old_req_cons = vif->rx.req_cons;
542 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); 564 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
543 BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed); 565 ring_slots_used = vif->rx.req_cons - old_req_cons;
566
567 BUG_ON(ring_slots_used > max_slots_needed);
544 568
545 __skb_queue_tail(&rxq, skb); 569 __skb_queue_tail(&rxq, skb);
546 } 570 }