aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-01 01:23:58 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-01 01:23:58 -0400
commit92b85671cd89a16500b7b027b17433f21da99be3 (patch)
tree3c5b9020cf35f9bac5d7ffd185ca7cdf86e375e4
parent081e83a78db9b0ae1f5eabc2dedecc865f509b98 (diff)
parent69cb85242f4ff1cbbac5a45c05223600084760e8 (diff)
Merge branch 'xen-netfront'
David Vrabel says: ==================== xen-netfront: more multiqueue fixes A few more xen-netfront fixes for the multiqueue support added in 3.16-rc1. It would be great if these could make it into 3.16 but I suspect it's a little late for that now. The second patch fixes a significant resource leak that prevents guests from migrating more than a handful of times. These have been tested by repeatedly migrating a guest over 250 times (it would previously fail with this guest after only 8 iterations). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netfront.c74
1 files changed, 10 insertions, 64 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 055222bae6e4..28204bc4f369 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1196,22 +1196,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
1196 spin_unlock_bh(&queue->rx_lock); 1196 spin_unlock_bh(&queue->rx_lock);
1197} 1197}
1198 1198
1199static void xennet_uninit(struct net_device *dev)
1200{
1201 struct netfront_info *np = netdev_priv(dev);
1202 unsigned int num_queues = dev->real_num_tx_queues;
1203 struct netfront_queue *queue;
1204 unsigned int i;
1205
1206 for (i = 0; i < num_queues; ++i) {
1207 queue = &np->queues[i];
1208 xennet_release_tx_bufs(queue);
1209 xennet_release_rx_bufs(queue);
1210 gnttab_free_grant_references(queue->gref_tx_head);
1211 gnttab_free_grant_references(queue->gref_rx_head);
1212 }
1213}
1214
1215static netdev_features_t xennet_fix_features(struct net_device *dev, 1199static netdev_features_t xennet_fix_features(struct net_device *dev,
1216 netdev_features_t features) 1200 netdev_features_t features)
1217{ 1201{
@@ -1313,7 +1297,6 @@ static void xennet_poll_controller(struct net_device *dev)
1313 1297
1314static const struct net_device_ops xennet_netdev_ops = { 1298static const struct net_device_ops xennet_netdev_ops = {
1315 .ndo_open = xennet_open, 1299 .ndo_open = xennet_open,
1316 .ndo_uninit = xennet_uninit,
1317 .ndo_stop = xennet_close, 1300 .ndo_stop = xennet_close,
1318 .ndo_start_xmit = xennet_start_xmit, 1301 .ndo_start_xmit = xennet_start_xmit,
1319 .ndo_change_mtu = xennet_change_mtu, 1302 .ndo_change_mtu = xennet_change_mtu,
@@ -1455,6 +1438,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1455 1438
1456 napi_synchronize(&queue->napi); 1439 napi_synchronize(&queue->napi);
1457 1440
1441 xennet_release_tx_bufs(queue);
1442 xennet_release_rx_bufs(queue);
1443 gnttab_free_grant_references(queue->gref_tx_head);
1444 gnttab_free_grant_references(queue->gref_rx_head);
1445
1458 /* End access and free the pages */ 1446 /* End access and free the pages */
1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1447 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1448 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1827,8 +1815,8 @@ static int xennet_create_queues(struct netfront_info *info,
1827 1815
1828 ret = xennet_init_queue(queue); 1816 ret = xennet_init_queue(queue);
1829 if (ret < 0) { 1817 if (ret < 0) {
1830 dev_warn(&info->netdev->dev, "only created %d queues\n", 1818 dev_warn(&info->netdev->dev,
1831 num_queues); 1819 "only created %d queues\n", i);
1832 num_queues = i; 1820 num_queues = i;
1833 break; 1821 break;
1834 } 1822 }
@@ -2001,7 +1989,7 @@ abort_transaction_no_dev_fatal:
2001 info->queues = NULL; 1989 info->queues = NULL;
2002 rtnl_lock(); 1990 rtnl_lock();
2003 netif_set_real_num_tx_queues(info->netdev, 0); 1991 netif_set_real_num_tx_queues(info->netdev, 0);
2004 rtnl_lock(); 1992 rtnl_unlock();
2005 out: 1993 out:
2006 return err; 1994 return err;
2007} 1995}
@@ -2010,10 +1998,7 @@ static int xennet_connect(struct net_device *dev)
2010{ 1998{
2011 struct netfront_info *np = netdev_priv(dev); 1999 struct netfront_info *np = netdev_priv(dev);
2012 unsigned int num_queues = 0; 2000 unsigned int num_queues = 0;
2013 int i, requeue_idx, err; 2001 int err;
2014 struct sk_buff *skb;
2015 grant_ref_t ref;
2016 struct xen_netif_rx_request *req;
2017 unsigned int feature_rx_copy; 2002 unsigned int feature_rx_copy;
2018 unsigned int j = 0; 2003 unsigned int j = 0;
2019 struct netfront_queue *queue = NULL; 2004 struct netfront_queue *queue = NULL;
@@ -2040,47 +2025,8 @@ static int xennet_connect(struct net_device *dev)
2040 netdev_update_features(dev); 2025 netdev_update_features(dev);
2041 rtnl_unlock(); 2026 rtnl_unlock();
2042 2027
2043 /* By now, the queue structures have been set up */
2044 for (j = 0; j < num_queues; ++j) {
2045 queue = &np->queues[j];
2046
2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2051
2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2056 skb_frag_t *frag;
2057 const struct page *page;
2058 if (!queue->rx_skbs[i])
2059 continue;
2060
2061 skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
2062 ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
2063 req = RING_GET_REQUEST(&queue->rx, requeue_idx);
2064
2065 frag = &skb_shinfo(skb)->frags[0];
2066 page = skb_frag_page(frag);
2067 gnttab_grant_foreign_access_ref(
2068 ref, queue->info->xbdev->otherend_id,
2069 pfn_to_mfn(page_to_pfn(page)),
2070 0);
2071 req->gref = ref;
2072 req->id = requeue_idx;
2073
2074 requeue_idx++;
2075 }
2076
2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2080 }
2081
2082 /* 2028 /*
2083 * Step 3: All public and private state should now be sane. Get 2029 * All public and private state should now be sane. Get
2084 * ready to start sending and receiving packets and give the driver 2030 * ready to start sending and receiving packets and give the driver
2085 * domain a kick because we've probably just requeued some 2031 * domain a kick because we've probably just requeued some
2086 * packets. 2032 * packets.