aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2019-01-17 23:20:07 -0500
committerDavid S. Miller <davem@davemloft.net>2019-01-19 19:06:52 -0500
commitdf133f3f96257ee29696c0ed8bd198ec801dc810 (patch)
tree4df0a603924d61c182d4c153f99c20e77ab96bd2
parent3e64cf7a435ed0500e3adaa8aada2272d3ae8abc (diff)
virtio_net: bulk free tx skbs
Use napi_consume_skb() to get bulk free. Note that napi_consume_skb is safe to call in a non-napi context as long as the napi_budget flag is correct. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/virtio_net.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 023725086046..8fadd8eaf601 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1330 return stats.packets; 1330 return stats.packets;
1331} 1331}
1332 1332
1333static void free_old_xmit_skbs(struct send_queue *sq) 1333static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1334{ 1334{
1335 struct sk_buff *skb; 1335 struct sk_buff *skb;
1336 unsigned int len; 1336 unsigned int len;
@@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
1343 bytes += skb->len; 1343 bytes += skb->len;
1344 packets++; 1344 packets++;
1345 1345
1346 dev_consume_skb_any(skb); 1346 napi_consume_skb(skb, in_napi);
1347 } 1347 }
1348 1348
1349 /* Avoid overhead when no packets have been processed 1349 /* Avoid overhead when no packets have been processed
@@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
1369 return; 1369 return;
1370 1370
1371 if (__netif_tx_trylock(txq)) { 1371 if (__netif_tx_trylock(txq)) {
1372 free_old_xmit_skbs(sq); 1372 free_old_xmit_skbs(sq, true);
1373 __netif_tx_unlock(txq); 1373 __netif_tx_unlock(txq);
1374 } 1374 }
1375 1375
@@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
1446 1446
1447 __netif_tx_lock(txq, raw_smp_processor_id()); 1447 __netif_tx_lock(txq, raw_smp_processor_id());
1448 free_old_xmit_skbs(sq); 1448 free_old_xmit_skbs(sq, true);
1449 __netif_tx_unlock(txq); 1449 __netif_tx_unlock(txq);
1450 1450
1451 virtqueue_napi_complete(napi, sq->vq, 0); 1451 virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1514 bool use_napi = sq->napi.weight; 1514 bool use_napi = sq->napi.weight;
1515 1515
1516 /* Free up any pending old buffers before queueing new ones. */ 1516 /* Free up any pending old buffers before queueing new ones. */
1517 free_old_xmit_skbs(sq); 1517 free_old_xmit_skbs(sq, false);
1518 1518
1519 if (use_napi && kick) 1519 if (use_napi && kick)
1520 virtqueue_enable_cb_delayed(sq->vq); 1520 virtqueue_enable_cb_delayed(sq->vq);
@@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1557 if (!use_napi && 1557 if (!use_napi &&
1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1559 /* More just got used, free them then recheck. */ 1559 /* More just got used, free them then recheck. */
1560 free_old_xmit_skbs(sq); 1560 free_old_xmit_skbs(sq, false);
1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1562 netif_start_subqueue(dev, qnum); 1562 netif_start_subqueue(dev, qnum);
1563 virtqueue_disable_cb(sq->vq); 1563 virtqueue_disable_cb(sq->vq);