aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c199
1 files changed, 96 insertions, 103 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5a7872ac3566..ca82f545ec2c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -628,9 +628,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
628 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + 628 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
629 xennet_count_skb_frag_slots(skb); 629 xennet_count_skb_frag_slots(skb);
630 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 630 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
631 net_alert_ratelimited( 631 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
632 "xennet: skb rides the rocket: %d slots\n", slots); 632 slots, skb->len);
633 goto drop; 633 if (skb_linearize(skb))
634 goto drop;
634 } 635 }
635 636
636 spin_lock_irqsave(&queue->tx_lock, flags); 637 spin_lock_irqsave(&queue->tx_lock, flags);
@@ -1196,22 +1197,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
1196 spin_unlock_bh(&queue->rx_lock); 1197 spin_unlock_bh(&queue->rx_lock);
1197} 1198}
1198 1199
1199static void xennet_uninit(struct net_device *dev)
1200{
1201 struct netfront_info *np = netdev_priv(dev);
1202 unsigned int num_queues = dev->real_num_tx_queues;
1203 struct netfront_queue *queue;
1204 unsigned int i;
1205
1206 for (i = 0; i < num_queues; ++i) {
1207 queue = &np->queues[i];
1208 xennet_release_tx_bufs(queue);
1209 xennet_release_rx_bufs(queue);
1210 gnttab_free_grant_references(queue->gref_tx_head);
1211 gnttab_free_grant_references(queue->gref_rx_head);
1212 }
1213}
1214
1215static netdev_features_t xennet_fix_features(struct net_device *dev, 1200static netdev_features_t xennet_fix_features(struct net_device *dev,
1216 netdev_features_t features) 1201 netdev_features_t features)
1217{ 1202{
@@ -1287,7 +1272,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1287 1272
1288 if (likely(netif_carrier_ok(dev) && 1273 if (likely(netif_carrier_ok(dev) &&
1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 1274 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1290 napi_schedule(&queue->napi); 1275 napi_schedule(&queue->napi);
1291 1276
1292 return IRQ_HANDLED; 1277 return IRQ_HANDLED;
1293} 1278}
@@ -1313,7 +1298,6 @@ static void xennet_poll_controller(struct net_device *dev)
1313 1298
1314static const struct net_device_ops xennet_netdev_ops = { 1299static const struct net_device_ops xennet_netdev_ops = {
1315 .ndo_open = xennet_open, 1300 .ndo_open = xennet_open,
1316 .ndo_uninit = xennet_uninit,
1317 .ndo_stop = xennet_close, 1301 .ndo_stop = xennet_close,
1318 .ndo_start_xmit = xennet_start_xmit, 1302 .ndo_start_xmit = xennet_start_xmit,
1319 .ndo_change_mtu = xennet_change_mtu, 1303 .ndo_change_mtu = xennet_change_mtu,
@@ -1437,16 +1421,12 @@ static void xennet_end_access(int ref, void *page)
1437static void xennet_disconnect_backend(struct netfront_info *info) 1421static void xennet_disconnect_backend(struct netfront_info *info)
1438{ 1422{
1439 unsigned int i = 0; 1423 unsigned int i = 0;
1440 struct netfront_queue *queue = NULL;
1441 unsigned int num_queues = info->netdev->real_num_tx_queues; 1424 unsigned int num_queues = info->netdev->real_num_tx_queues;
1442 1425
1426 netif_carrier_off(info->netdev);
1427
1443 for (i = 0; i < num_queues; ++i) { 1428 for (i = 0; i < num_queues; ++i) {
1444 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1429 struct netfront_queue *queue = &info->queues[i];
1445 spin_lock_bh(&queue->rx_lock);
1446 spin_lock_irq(&queue->tx_lock);
1447 netif_carrier_off(queue->info->netdev);
1448 spin_unlock_irq(&queue->tx_lock);
1449 spin_unlock_bh(&queue->rx_lock);
1450 1430
1451 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1431 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1452 unbind_from_irqhandler(queue->tx_irq, queue); 1432 unbind_from_irqhandler(queue->tx_irq, queue);
@@ -1457,6 +1437,13 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1457 queue->tx_evtchn = queue->rx_evtchn = 0; 1437 queue->tx_evtchn = queue->rx_evtchn = 0;
1458 queue->tx_irq = queue->rx_irq = 0; 1438 queue->tx_irq = queue->rx_irq = 0;
1459 1439
1440 napi_synchronize(&queue->napi);
1441
1442 xennet_release_tx_bufs(queue);
1443 xennet_release_rx_bufs(queue);
1444 gnttab_free_grant_references(queue->gref_tx_head);
1445 gnttab_free_grant_references(queue->gref_rx_head);
1446
1460 /* End access and free the pages */ 1447 /* End access and free the pages */
1461 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1448 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1462 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1449 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1698,8 +1685,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
1698 goto exit_free_tx; 1685 goto exit_free_tx;
1699 } 1686 }
1700 1687
1701 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703 return 0; 1688 return 0;
1704 1689
1705 exit_free_tx: 1690 exit_free_tx:
@@ -1790,6 +1775,70 @@ error:
1790 return err; 1775 return err;
1791} 1776}
1792 1777
1778static void xennet_destroy_queues(struct netfront_info *info)
1779{
1780 unsigned int i;
1781
1782 rtnl_lock();
1783
1784 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1785 struct netfront_queue *queue = &info->queues[i];
1786
1787 if (netif_running(info->netdev))
1788 napi_disable(&queue->napi);
1789 netif_napi_del(&queue->napi);
1790 }
1791
1792 rtnl_unlock();
1793
1794 kfree(info->queues);
1795 info->queues = NULL;
1796}
1797
1798static int xennet_create_queues(struct netfront_info *info,
1799 unsigned int num_queues)
1800{
1801 unsigned int i;
1802 int ret;
1803
1804 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1805 GFP_KERNEL);
1806 if (!info->queues)
1807 return -ENOMEM;
1808
1809 rtnl_lock();
1810
1811 for (i = 0; i < num_queues; i++) {
1812 struct netfront_queue *queue = &info->queues[i];
1813
1814 queue->id = i;
1815 queue->info = info;
1816
1817 ret = xennet_init_queue(queue);
1818 if (ret < 0) {
1819 dev_warn(&info->netdev->dev,
1820 "only created %d queues\n", i);
1821 num_queues = i;
1822 break;
1823 }
1824
1825 netif_napi_add(queue->info->netdev, &queue->napi,
1826 xennet_poll, 64);
1827 if (netif_running(info->netdev))
1828 napi_enable(&queue->napi);
1829 }
1830
1831 netif_set_real_num_tx_queues(info->netdev, num_queues);
1832
1833 rtnl_unlock();
1834
1835 if (num_queues == 0) {
1836 dev_err(&info->netdev->dev, "no queues\n");
1837 return -EINVAL;
1838 }
1839 return 0;
1840}
1841
1793/* Common code used when first setting up, and when resuming. */ 1842/* Common code used when first setting up, and when resuming. */
1794static int talk_to_netback(struct xenbus_device *dev, 1843static int talk_to_netback(struct xenbus_device *dev,
1795 struct netfront_info *info) 1844 struct netfront_info *info)
@@ -1826,42 +1875,20 @@ static int talk_to_netback(struct xenbus_device *dev,
1826 goto out; 1875 goto out;
1827 } 1876 }
1828 1877
1829 /* Allocate array of queues */ 1878 if (info->queues)
1830 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); 1879 xennet_destroy_queues(info);
1831 if (!info->queues) { 1880
1832 err = -ENOMEM; 1881 err = xennet_create_queues(info, num_queues);
1833 goto out; 1882 if (err < 0)
1834 } 1883 goto destroy_ring;
1835 rtnl_lock();
1836 netif_set_real_num_tx_queues(info->netdev, num_queues);
1837 rtnl_unlock();
1838 1884
1839 /* Create shared ring, alloc event channel -- for each queue */ 1885 /* Create shared ring, alloc event channel -- for each queue */
1840 for (i = 0; i < num_queues; ++i) { 1886 for (i = 0; i < num_queues; ++i) {
1841 queue = &info->queues[i]; 1887 queue = &info->queues[i];
1842 queue->id = i;
1843 queue->info = info;
1844 err = xennet_init_queue(queue);
1845 if (err) {
1846 /* xennet_init_queue() cleans up after itself on failure,
1847 * but we still have to clean up any previously initialised
1848 * queues. If i > 0, set num_queues to i, then goto
1849 * destroy_ring, which calls xennet_disconnect_backend()
1850 * to tidy up.
1851 */
1852 if (i > 0) {
1853 rtnl_lock();
1854 netif_set_real_num_tx_queues(info->netdev, i);
1855 rtnl_unlock();
1856 goto destroy_ring;
1857 } else {
1858 goto out;
1859 }
1860 }
1861 err = setup_netfront(dev, queue, feature_split_evtchn); 1888 err = setup_netfront(dev, queue, feature_split_evtchn);
1862 if (err) { 1889 if (err) {
1863 /* As for xennet_init_queue(), setup_netfront() will tidy 1890 /* setup_netfront() will tidy up the current
1864 * up the current queue on error, but we need to clean up 1891 * queue on error, but we need to clean up
1865 * those already allocated. 1892 * those already allocated.
1866 */ 1893 */
1867 if (i > 0) { 1894 if (i > 0) {
@@ -1963,7 +1990,7 @@ abort_transaction_no_dev_fatal:
1963 info->queues = NULL; 1990 info->queues = NULL;
1964 rtnl_lock(); 1991 rtnl_lock();
1965 netif_set_real_num_tx_queues(info->netdev, 0); 1992 netif_set_real_num_tx_queues(info->netdev, 0);
1966 rtnl_lock(); 1993 rtnl_unlock();
1967 out: 1994 out:
1968 return err; 1995 return err;
1969} 1996}
@@ -1972,10 +1999,7 @@ static int xennet_connect(struct net_device *dev)
1972{ 1999{
1973 struct netfront_info *np = netdev_priv(dev); 2000 struct netfront_info *np = netdev_priv(dev);
1974 unsigned int num_queues = 0; 2001 unsigned int num_queues = 0;
1975 int i, requeue_idx, err; 2002 int err;
1976 struct sk_buff *skb;
1977 grant_ref_t ref;
1978 struct xen_netif_rx_request *req;
1979 unsigned int feature_rx_copy; 2003 unsigned int feature_rx_copy;
1980 unsigned int j = 0; 2004 unsigned int j = 0;
1981 struct netfront_queue *queue = NULL; 2005 struct netfront_queue *queue = NULL;
@@ -2002,43 +2026,8 @@ static int xennet_connect(struct net_device *dev)
2002 netdev_update_features(dev); 2026 netdev_update_features(dev);
2003 rtnl_unlock(); 2027 rtnl_unlock();
2004 2028
2005 /* By now, the queue structures have been set up */
2006 for (j = 0; j < num_queues; ++j) {
2007 queue = &np->queues[j];
2008 spin_lock_bh(&queue->rx_lock);
2009 spin_lock_irq(&queue->tx_lock);
2010
2011 /* Step 1: Discard all pending TX packet fragments. */
2012 xennet_release_tx_bufs(queue);
2013
2014 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2015 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2016 skb_frag_t *frag;
2017 const struct page *page;
2018 if (!queue->rx_skbs[i])
2019 continue;
2020
2021 skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
2022 ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
2023 req = RING_GET_REQUEST(&queue->rx, requeue_idx);
2024
2025 frag = &skb_shinfo(skb)->frags[0];
2026 page = skb_frag_page(frag);
2027 gnttab_grant_foreign_access_ref(
2028 ref, queue->info->xbdev->otherend_id,
2029 pfn_to_mfn(page_to_pfn(page)),
2030 0);
2031 req->gref = ref;
2032 req->id = requeue_idx;
2033
2034 requeue_idx++;
2035 }
2036
2037 queue->rx.req_prod_pvt = requeue_idx;
2038 }
2039
2040 /* 2029 /*
2041 * Step 3: All public and private state should now be sane. Get 2030 * All public and private state should now be sane. Get
2042 * ready to start sending and receiving packets and give the driver 2031 * ready to start sending and receiving packets and give the driver
2043 * domain a kick because we've probably just requeued some 2032 * domain a kick because we've probably just requeued some
2044 * packets. 2033 * packets.
@@ -2046,13 +2035,17 @@ static int xennet_connect(struct net_device *dev)
2046 netif_carrier_on(np->netdev); 2035 netif_carrier_on(np->netdev);
2047 for (j = 0; j < num_queues; ++j) { 2036 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2037 queue = &np->queues[j];
2038
2049 notify_remote_via_irq(queue->tx_irq); 2039 notify_remote_via_irq(queue->tx_irq);
2050 if (queue->tx_irq != queue->rx_irq) 2040 if (queue->tx_irq != queue->rx_irq)
2051 notify_remote_via_irq(queue->rx_irq); 2041 notify_remote_via_irq(queue->rx_irq);
2052 xennet_tx_buf_gc(queue);
2053 xennet_alloc_rx_buffers(queue);
2054 2042
2043 spin_lock_irq(&queue->tx_lock);
2044 xennet_tx_buf_gc(queue);
2055 spin_unlock_irq(&queue->tx_lock); 2045 spin_unlock_irq(&queue->tx_lock);
2046
2047 spin_lock_bh(&queue->rx_lock);
2048 xennet_alloc_rx_buffers(queue);
2056 spin_unlock_bh(&queue->rx_lock); 2049 spin_unlock_bh(&queue->rx_lock);
2057 } 2050 }
2058 2051