aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2013-12-11 05:57:15 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-12 13:35:38 -0500
commit10574059ce0451c6572c85329c772aa15085f8eb (patch)
tree292a2a641e2d6d3ec960c72ef28310c60549d987
parentd55d282e6af88120ad90e93a88f70e3116dc0e3d (diff)
xen-netback: napi: fix abuse of budget
netback seems to be somewhat confused about the napi budget parameter. The parameter is supposed to limit the number of skbs processed in each poll, but netback has this confused with grant operations. This patch fixes that, properly limiting the work done in each poll. Note that this limit makes sure we do not process any more data from the shared ring than we intend to pass back from the poll. This is important to prevent tx_queue potentially growing without bound. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/netback.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d158fc40cff2..db79e29b3d09 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1445,14 +1445,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1445 return false; 1445 return false;
1446} 1446}
1447 1447
1448static unsigned xenvif_tx_build_gops(struct xenvif *vif) 1448static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1449{ 1449{
1450 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1450 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1451 struct sk_buff *skb; 1451 struct sk_buff *skb;
1452 int ret; 1452 int ret;
1453 1453
1454 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1454 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1455 < MAX_PENDING_REQS)) { 1455 < MAX_PENDING_REQS) &&
1456 (skb_queue_len(&vif->tx_queue) < budget)) {
1456 struct xen_netif_tx_request txreq; 1457 struct xen_netif_tx_request txreq;
1457 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1458 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1458 struct page *page; 1459 struct page *page;
@@ -1614,14 +1615,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1614} 1615}
1615 1616
1616 1617
1617static int xenvif_tx_submit(struct xenvif *vif, int budget) 1618static int xenvif_tx_submit(struct xenvif *vif)
1618{ 1619{
1619 struct gnttab_copy *gop = vif->tx_copy_ops; 1620 struct gnttab_copy *gop = vif->tx_copy_ops;
1620 struct sk_buff *skb; 1621 struct sk_buff *skb;
1621 int work_done = 0; 1622 int work_done = 0;
1622 1623
1623 while (work_done < budget && 1624 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1624 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1625 struct xen_netif_tx_request *txp; 1625 struct xen_netif_tx_request *txp;
1626 u16 pending_idx; 1626 u16 pending_idx;
1627 unsigned data_len; 1627 unsigned data_len;
@@ -1696,14 +1696,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1696 if (unlikely(!tx_work_todo(vif))) 1696 if (unlikely(!tx_work_todo(vif)))
1697 return 0; 1697 return 0;
1698 1698
1699 nr_gops = xenvif_tx_build_gops(vif); 1699 nr_gops = xenvif_tx_build_gops(vif, budget);
1700 1700
1701 if (nr_gops == 0) 1701 if (nr_gops == 0)
1702 return 0; 1702 return 0;
1703 1703
1704 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1704 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1705 1705
1706 work_done = xenvif_tx_submit(vif, nr_gops); 1706 work_done = xenvif_tx_submit(vif);
1707 1707
1708 return work_done; 1708 return work_done;
1709} 1709}