diff options
author | David Vrabel <david.vrabel@citrix.com> | 2014-07-31 12:38:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-08-01 01:23:52 -0400 |
commit | a5b5dc3ce4df4f05f4d81c7d3c56a7604b242093 (patch) | |
tree | d8866e761fbccad617f7fe073694513f8158b2ba /drivers/net/xen-netfront.c | |
parent | db8c8ab61a28d7e3eb86d247b342a853263262c3 (diff) |
xen-netfront: release per-queue Tx and Rx resource when disconnecting
Since netfront may reconnect to a backend with a different number of
queues, all per-queue Rx and Tx resources (skbs and grant references)
should be freed when disconnecting.
Without this fix, the Tx and Rx grant refs are not released and
netfront will exhaust them after only a few reconnections. netfront
will fail to connect when no free grant references are available.
Since all Rx bufs are freed and reallocated instead of reused this
will add some additional delay to the reconnection but this is
expected to be small compared to the time taken by any backend hotplug
scripts etc.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 68 |
1 files changed, 7 insertions, 61 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 1cc46d00d20a..0b133a3d4312 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1196,22 +1196,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue) | |||
1196 | spin_unlock_bh(&queue->rx_lock); | 1196 | spin_unlock_bh(&queue->rx_lock); |
1197 | } | 1197 | } |
1198 | 1198 | ||
1199 | static void xennet_uninit(struct net_device *dev) | ||
1200 | { | ||
1201 | struct netfront_info *np = netdev_priv(dev); | ||
1202 | unsigned int num_queues = dev->real_num_tx_queues; | ||
1203 | struct netfront_queue *queue; | ||
1204 | unsigned int i; | ||
1205 | |||
1206 | for (i = 0; i < num_queues; ++i) { | ||
1207 | queue = &np->queues[i]; | ||
1208 | xennet_release_tx_bufs(queue); | ||
1209 | xennet_release_rx_bufs(queue); | ||
1210 | gnttab_free_grant_references(queue->gref_tx_head); | ||
1211 | gnttab_free_grant_references(queue->gref_rx_head); | ||
1212 | } | ||
1213 | } | ||
1214 | |||
1215 | static netdev_features_t xennet_fix_features(struct net_device *dev, | 1199 | static netdev_features_t xennet_fix_features(struct net_device *dev, |
1216 | netdev_features_t features) | 1200 | netdev_features_t features) |
1217 | { | 1201 | { |
@@ -1313,7 +1297,6 @@ static void xennet_poll_controller(struct net_device *dev) | |||
1313 | 1297 | ||
1314 | static const struct net_device_ops xennet_netdev_ops = { | 1298 | static const struct net_device_ops xennet_netdev_ops = { |
1315 | .ndo_open = xennet_open, | 1299 | .ndo_open = xennet_open, |
1316 | .ndo_uninit = xennet_uninit, | ||
1317 | .ndo_stop = xennet_close, | 1300 | .ndo_stop = xennet_close, |
1318 | .ndo_start_xmit = xennet_start_xmit, | 1301 | .ndo_start_xmit = xennet_start_xmit, |
1319 | .ndo_change_mtu = xennet_change_mtu, | 1302 | .ndo_change_mtu = xennet_change_mtu, |
@@ -1455,6 +1438,11 @@ static void xennet_disconnect_backend(struct netfront_info *info) | |||
1455 | 1438 | ||
1456 | napi_synchronize(&queue->napi); | 1439 | napi_synchronize(&queue->napi); |
1457 | 1440 | ||
1441 | xennet_release_tx_bufs(queue); | ||
1442 | xennet_release_rx_bufs(queue); | ||
1443 | gnttab_free_grant_references(queue->gref_tx_head); | ||
1444 | gnttab_free_grant_references(queue->gref_rx_head); | ||
1445 | |||
1458 | /* End access and free the pages */ | 1446 | /* End access and free the pages */ |
1459 | xennet_end_access(queue->tx_ring_ref, queue->tx.sring); | 1447 | xennet_end_access(queue->tx_ring_ref, queue->tx.sring); |
1460 | xennet_end_access(queue->rx_ring_ref, queue->rx.sring); | 1448 | xennet_end_access(queue->rx_ring_ref, queue->rx.sring); |
@@ -2010,10 +1998,7 @@ static int xennet_connect(struct net_device *dev) | |||
2010 | { | 1998 | { |
2011 | struct netfront_info *np = netdev_priv(dev); | 1999 | struct netfront_info *np = netdev_priv(dev); |
2012 | unsigned int num_queues = 0; | 2000 | unsigned int num_queues = 0; |
2013 | int i, requeue_idx, err; | 2001 | int err; |
2014 | struct sk_buff *skb; | ||
2015 | grant_ref_t ref; | ||
2016 | struct xen_netif_rx_request *req; | ||
2017 | unsigned int feature_rx_copy; | 2002 | unsigned int feature_rx_copy; |
2018 | unsigned int j = 0; | 2003 | unsigned int j = 0; |
2019 | struct netfront_queue *queue = NULL; | 2004 | struct netfront_queue *queue = NULL; |
@@ -2040,47 +2025,8 @@ static int xennet_connect(struct net_device *dev) | |||
2040 | netdev_update_features(dev); | 2025 | netdev_update_features(dev); |
2041 | rtnl_unlock(); | 2026 | rtnl_unlock(); |
2042 | 2027 | ||
2043 | /* By now, the queue structures have been set up */ | ||
2044 | for (j = 0; j < num_queues; ++j) { | ||
2045 | queue = &np->queues[j]; | ||
2046 | |||
2047 | /* Step 1: Discard all pending TX packet fragments. */ | ||
2048 | spin_lock_irq(&queue->tx_lock); | ||
2049 | xennet_release_tx_bufs(queue); | ||
2050 | spin_unlock_irq(&queue->tx_lock); | ||
2051 | |||
2052 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | ||
2053 | spin_lock_bh(&queue->rx_lock); | ||
2054 | |||
2055 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | ||
2056 | skb_frag_t *frag; | ||
2057 | const struct page *page; | ||
2058 | if (!queue->rx_skbs[i]) | ||
2059 | continue; | ||
2060 | |||
2061 | skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i); | ||
2062 | ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i); | ||
2063 | req = RING_GET_REQUEST(&queue->rx, requeue_idx); | ||
2064 | |||
2065 | frag = &skb_shinfo(skb)->frags[0]; | ||
2066 | page = skb_frag_page(frag); | ||
2067 | gnttab_grant_foreign_access_ref( | ||
2068 | ref, queue->info->xbdev->otherend_id, | ||
2069 | pfn_to_mfn(page_to_pfn(page)), | ||
2070 | 0); | ||
2071 | req->gref = ref; | ||
2072 | req->id = requeue_idx; | ||
2073 | |||
2074 | requeue_idx++; | ||
2075 | } | ||
2076 | |||
2077 | queue->rx.req_prod_pvt = requeue_idx; | ||
2078 | |||
2079 | spin_unlock_bh(&queue->rx_lock); | ||
2080 | } | ||
2081 | |||
2082 | /* | 2028 | /* |
2083 | * Step 3: All public and private state should now be sane. Get | 2029 | * All public and private state should now be sane. Get |
2084 | * ready to start sending and receiving packets and give the driver | 2030 | * ready to start sending and receiving packets and give the driver |
2085 | * domain a kick because we've probably just requeued some | 2031 | * domain a kick because we've probably just requeued some |
2086 | * packets. | 2032 | * packets. |