aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2014-07-02 11:09:15 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-08 14:21:03 -0400
commitf9feb1e6a25f9e197f9e6e6cb04bf04d2cccff93 (patch)
tree753ea80910d46b4cc41dd1fe1a9dbd592bb09991 /drivers/net
parentf50b407653f64e76d1c9abda61d0d85cde3ca9ca (diff)
xen-netfront: call netif_carrier_off() only once when disconnecting
In xennet_disconnect_backend(), netif_carrier_off() was called once per queue when it needs to only be called once. The queue locking around the netif_carrier_off() call looked very odd. I think they were supposed to synchronize any NAPI instances with the expectation that no further NAPI instances would be scheduled because of the carrier being off (see the check in xennet_rx_interrupt()). But I can't easily tell if this works correctly. Instead, add a napi_synchronize() call after disabling the interrupts. This is obviously correct as with no Rx interrupts, no further NAPI instances will be scheduled. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netfront.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6a37d62de40b..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1441 1441
1442 netif_carrier_off(info->netdev);
1443
1442 for (i = 0; i < num_queues; ++i) { 1444 for (i = 0; i < num_queues; ++i) {
1443 struct netfront_queue *queue = &info->queues[i]; 1445 struct netfront_queue *queue = &info->queues[i];
1444 1446
1445 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1446 spin_lock_bh(&queue->rx_lock);
1447 spin_lock_irq(&queue->tx_lock);
1448 netif_carrier_off(queue->info->netdev);
1449 spin_unlock_irq(&queue->tx_lock);
1450 spin_unlock_bh(&queue->rx_lock);
1451
1452 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1447 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1453 unbind_from_irqhandler(queue->tx_irq, queue); 1448 unbind_from_irqhandler(queue->tx_irq, queue);
1454 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1449 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1458 queue->tx_evtchn = queue->rx_evtchn = 0; 1453 queue->tx_evtchn = queue->rx_evtchn = 0;
1459 queue->tx_irq = queue->rx_irq = 0; 1454 queue->tx_irq = queue->rx_irq = 0;
1460 1455
1456 napi_synchronize(&queue->napi);
1457
1461 /* End access and free the pages */ 1458 /* End access and free the pages */
1462 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1463 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);