aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c134
1 files changed, 90 insertions, 44 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5a7872ac3566..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1287,7 +1287,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1287 1287
1288 if (likely(netif_carrier_ok(dev) && 1288 if (likely(netif_carrier_ok(dev) &&
1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1290 napi_schedule(&queue->napi); 1290 napi_schedule(&queue->napi);
1291 1291
1292 return IRQ_HANDLED; 1292 return IRQ_HANDLED;
1293} 1293}
@@ -1437,16 +1437,12 @@ static void xennet_end_access(int ref, void *page)
1437static void xennet_disconnect_backend(struct netfront_info *info) 1437static void xennet_disconnect_backend(struct netfront_info *info)
1438{ 1438{
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 struct netfront_queue *queue = NULL;
1441 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1442 1441
1442 netif_carrier_off(info->netdev);
1443
1443 for (i = 0; i < num_queues; ++i) { 1444 for (i = 0; i < num_queues; ++i) {
1444 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1445 struct netfront_queue *queue = &info->queues[i];
1445 spin_lock_bh(&queue->rx_lock);
1446 spin_lock_irq(&queue->tx_lock);
1447 netif_carrier_off(queue->info->netdev);
1448 spin_unlock_irq(&queue->tx_lock);
1449 spin_unlock_bh(&queue->rx_lock);
1450 1446
1451 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1447 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1452 unbind_from_irqhandler(queue->tx_irq, queue); 1448 unbind_from_irqhandler(queue->tx_irq, queue);
@@ -1457,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1457 queue->tx_evtchn = queue->rx_evtchn = 0; 1453 queue->tx_evtchn = queue->rx_evtchn = 0;
1458 queue->tx_irq = queue->rx_irq = 0; 1454 queue->tx_irq = queue->rx_irq = 0;
1459 1455
1456 napi_synchronize(&queue->napi);
1457
1460 /* End access and free the pages */ 1458 /* End access and free the pages */
1461 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1462 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1698,8 +1696,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
1698 goto exit_free_tx; 1696 goto exit_free_tx;
1699 } 1697 }
1700 1698
1701 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703 return 0; 1699 return 0;
1704 1700
1705 exit_free_tx: 1701 exit_free_tx:
@@ -1790,6 +1786,70 @@ error:
1790 return err; 1786 return err;
1791} 1787}
1792 1788
1789static void xennet_destroy_queues(struct netfront_info *info)
1790{
1791 unsigned int i;
1792
1793 rtnl_lock();
1794
1795 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1796 struct netfront_queue *queue = &info->queues[i];
1797
1798 if (netif_running(info->netdev))
1799 napi_disable(&queue->napi);
1800 netif_napi_del(&queue->napi);
1801 }
1802
1803 rtnl_unlock();
1804
1805 kfree(info->queues);
1806 info->queues = NULL;
1807}
1808
1809static int xennet_create_queues(struct netfront_info *info,
1810 unsigned int num_queues)
1811{
1812 unsigned int i;
1813 int ret;
1814
1815 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1816 GFP_KERNEL);
1817 if (!info->queues)
1818 return -ENOMEM;
1819
1820 rtnl_lock();
1821
1822 for (i = 0; i < num_queues; i++) {
1823 struct netfront_queue *queue = &info->queues[i];
1824
1825 queue->id = i;
1826 queue->info = info;
1827
1828 ret = xennet_init_queue(queue);
1829 if (ret < 0) {
1830 dev_warn(&info->netdev->dev, "only created %d queues\n",
1831 num_queues);
1832 num_queues = i;
1833 break;
1834 }
1835
1836 netif_napi_add(queue->info->netdev, &queue->napi,
1837 xennet_poll, 64);
1838 if (netif_running(info->netdev))
1839 napi_enable(&queue->napi);
1840 }
1841
1842 netif_set_real_num_tx_queues(info->netdev, num_queues);
1843
1844 rtnl_unlock();
1845
1846 if (num_queues == 0) {
1847 dev_err(&info->netdev->dev, "no queues\n");
1848 return -EINVAL;
1849 }
1850 return 0;
1851}
1852
1793/* Common code used when first setting up, and when resuming. */ 1853/* Common code used when first setting up, and when resuming. */
1794static int talk_to_netback(struct xenbus_device *dev, 1854static int talk_to_netback(struct xenbus_device *dev,
1795 struct netfront_info *info) 1855 struct netfront_info *info)
@@ -1826,42 +1886,20 @@ static int talk_to_netback(struct xenbus_device *dev,
1826 goto out; 1886 goto out;
1827 } 1887 }
1828 1888
1829 /* Allocate array of queues */ 1889 if (info->queues)
1830 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); 1890 xennet_destroy_queues(info);
1831 if (!info->queues) { 1891
1832 err = -ENOMEM; 1892 err = xennet_create_queues(info, num_queues);
1833 goto out; 1893 if (err < 0)
1834 } 1894 goto destroy_ring;
1835 rtnl_lock();
1836 netif_set_real_num_tx_queues(info->netdev, num_queues);
1837 rtnl_unlock();
1838 1895
1839 /* Create shared ring, alloc event channel -- for each queue */ 1896 /* Create shared ring, alloc event channel -- for each queue */
1840 for (i = 0; i < num_queues; ++i) { 1897 for (i = 0; i < num_queues; ++i) {
1841 queue = &info->queues[i]; 1898 queue = &info->queues[i];
1842 queue->id = i;
1843 queue->info = info;
1844 err = xennet_init_queue(queue);
1845 if (err) {
1846 /* xennet_init_queue() cleans up after itself on failure,
1847 * but we still have to clean up any previously initialised
1848 * queues. If i > 0, set num_queues to i, then goto
1849 * destroy_ring, which calls xennet_disconnect_backend()
1850 * to tidy up.
1851 */
1852 if (i > 0) {
1853 rtnl_lock();
1854 netif_set_real_num_tx_queues(info->netdev, i);
1855 rtnl_unlock();
1856 goto destroy_ring;
1857 } else {
1858 goto out;
1859 }
1860 }
1861 err = setup_netfront(dev, queue, feature_split_evtchn); 1899 err = setup_netfront(dev, queue, feature_split_evtchn);
1862 if (err) { 1900 if (err) {
1863 /* As for xennet_init_queue(), setup_netfront() will tidy 1901 /* setup_netfront() will tidy up the current
1864 * up the current queue on error, but we need to clean up 1902 * queue on error, but we need to clean up
1865 * those already allocated. 1903 * those already allocated.
1866 */ 1904 */
1867 if (i > 0) { 1905 if (i > 0) {
@@ -2005,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
2005 /* By now, the queue structures have been set up */ 2043 /* By now, the queue structures have been set up */
2006 for (j = 0; j < num_queues; ++j) { 2044 for (j = 0; j < num_queues; ++j) {
2007 queue = &np->queues[j]; 2045 queue = &np->queues[j];
2008 spin_lock_bh(&queue->rx_lock);
2009 spin_lock_irq(&queue->tx_lock);
2010 2046
2011 /* Step 1: Discard all pending TX packet fragments. */ 2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2012 xennet_release_tx_bufs(queue); 2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2013 2051
2014 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2015 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2016 skb_frag_t *frag; 2056 skb_frag_t *frag;
2017 const struct page *page; 2057 const struct page *page;
@@ -2035,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
2035 } 2075 }
2036 2076
2037 queue->rx.req_prod_pvt = requeue_idx; 2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2038 } 2080 }
2039 2081
2040 /* 2082 /*
@@ -2046,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
2046 netif_carrier_on(np->netdev); 2088 netif_carrier_on(np->netdev);
2047 for (j = 0; j < num_queues; ++j) { 2089 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2090 queue = &np->queues[j];
2091
2049 notify_remote_via_irq(queue->tx_irq); 2092 notify_remote_via_irq(queue->tx_irq);
2050 if (queue->tx_irq != queue->rx_irq) 2093 if (queue->tx_irq != queue->rx_irq)
2051 notify_remote_via_irq(queue->rx_irq); 2094 notify_remote_via_irq(queue->rx_irq);
2052 xennet_tx_buf_gc(queue);
2053 xennet_alloc_rx_buffers(queue);
2054 2095
2096 spin_lock_irq(&queue->tx_lock);
2097 xennet_tx_buf_gc(queue);
2055 spin_unlock_irq(&queue->tx_lock); 2098 spin_unlock_irq(&queue->tx_lock);
2099
2100 spin_lock_bh(&queue->rx_lock);
2101 xennet_alloc_rx_buffers(queue);
2056 spin_unlock_bh(&queue->rx_lock); 2102 spin_unlock_bh(&queue->rx_lock);
2057 } 2103 }
2058 2104