aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-24 19:59:49 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-26 16:33:42 -0400
commit869b9b19b3affd81cee853d33c0b124797f3c387 (patch)
tree04bdb4dff188ae19ca61f02692d9a6b3c6b03274 /drivers/net/xen-netback/netback.c
parentb8b6529f0536cf0d9e35e0833732199b66bf7cbc (diff)
xen-netback: Stop using xenvif_tx_pending_slots_available
Since the early days TX stops if there isn't enough free pending slots to consume a maximum sized (slot-wise) packet. Probably the reason for that is to avoid the case when we don't have enough free pending slot in the ring to finish the packet. But if we make sure that the pending ring has the same size as the shared ring, that shouldn't really happen. The frontend can only post packets which fit the to the free space of the shared ring. If it doesn't, the frontend has to stop, as it can only increase the req_prod when the whole packet fits onto the ring. This patch avoid using this checking, makes sure the 2 ring has the same size, and remove a checking from the callback. As now we don't stop the NAPI instance on this condition, we don't have to wake it up if we free pending slots up. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1e4628724778..5d2dd1d54173 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1167,8 +1167,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1167 struct sk_buff *skb; 1167 struct sk_buff *skb;
1168 int ret; 1168 int ret;
1169 1169
1170 while (xenvif_tx_pending_slots_available(vif) && 1170 while (skb_queue_len(&vif->tx_queue) < budget) {
1171 (skb_queue_len(&vif->tx_queue) < budget)) {
1172 struct xen_netif_tx_request txreq; 1171 struct xen_netif_tx_request txreq;
1173 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1172 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1174 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1173 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
@@ -1508,13 +1507,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1508 wake_up(&vif->dealloc_wq); 1507 wake_up(&vif->dealloc_wq);
1509 spin_unlock_irqrestore(&vif->callback_lock, flags); 1508 spin_unlock_irqrestore(&vif->callback_lock, flags);
1510 1509
1511 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx) &&
1512 xenvif_tx_pending_slots_available(vif)) {
1513 local_bh_disable();
1514 napi_schedule(&vif->napi);
1515 local_bh_enable();
1516 }
1517
1518 if (likely(zerocopy_success)) 1510 if (likely(zerocopy_success))
1519 vif->tx_zerocopy_success++; 1511 vif->tx_zerocopy_success++;
1520 else 1512 else
@@ -1706,8 +1698,7 @@ static inline int rx_work_todo(struct xenvif *vif)
1706static inline int tx_work_todo(struct xenvif *vif) 1698static inline int tx_work_todo(struct xenvif *vif)
1707{ 1699{
1708 1700
1709 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && 1701 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1710 xenvif_tx_pending_slots_available(vif))
1711 return 1; 1702 return 1;
1712 1703
1713 return 0; 1704 return 0;