diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 15:34:42 -0500 |
commit | 8e22e1b3499a446df48c2b26667ca36c55bf864c (patch) | |
tree | 5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /drivers/net/xen-netfront.c | |
parent | 00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff) | |
parent | 64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff) |
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed
drivers. Otherwise we'll have chaos even before 4.12 started in
earnest.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a479cd99911d..1e4125a98291 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
281 | { | 281 | { |
282 | RING_IDX req_prod = queue->rx.req_prod_pvt; | 282 | RING_IDX req_prod = queue->rx.req_prod_pvt; |
283 | int notify; | 283 | int notify; |
284 | int err = 0; | ||
284 | 285 | ||
285 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) | 286 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) |
286 | return; | 287 | return; |
@@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
295 | struct xen_netif_rx_request *req; | 296 | struct xen_netif_rx_request *req; |
296 | 297 | ||
297 | skb = xennet_alloc_one_rx_buffer(queue); | 298 | skb = xennet_alloc_one_rx_buffer(queue); |
298 | if (!skb) | 299 | if (!skb) { |
300 | err = -ENOMEM; | ||
299 | break; | 301 | break; |
302 | } | ||
300 | 303 | ||
301 | id = xennet_rxidx(req_prod); | 304 | id = xennet_rxidx(req_prod); |
302 | 305 | ||
@@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
320 | 323 | ||
321 | queue->rx.req_prod_pvt = req_prod; | 324 | queue->rx.req_prod_pvt = req_prod; |
322 | 325 | ||
323 | /* Not enough requests? Try again later. */ | 326 | /* Try again later if there are not enough requests or skb allocation |
324 | if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { | 327 | * failed. |
328 | * Enough requests is quantified as the sum of newly created slots and | ||
329 | * the unconsumed slots at the backend. | ||
330 | */ | ||
331 | if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || | ||
332 | unlikely(err)) { | ||
325 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); | 333 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); |
326 | return; | 334 | return; |
327 | } | 335 | } |
@@ -1379,6 +1387,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) | |||
1379 | for (i = 0; i < num_queues && info->queues; ++i) { | 1387 | for (i = 0; i < num_queues && info->queues; ++i) { |
1380 | struct netfront_queue *queue = &info->queues[i]; | 1388 | struct netfront_queue *queue = &info->queues[i]; |
1381 | 1389 | ||
1390 | del_timer_sync(&queue->rx_refill_timer); | ||
1391 | |||
1382 | if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) | 1392 | if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) |
1383 | unbind_from_irqhandler(queue->tx_irq, queue); | 1393 | unbind_from_irqhandler(queue->tx_irq, queue); |
1384 | if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { | 1394 | if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { |
@@ -1733,7 +1743,6 @@ static void xennet_destroy_queues(struct netfront_info *info) | |||
1733 | 1743 | ||
1734 | if (netif_running(info->netdev)) | 1744 | if (netif_running(info->netdev)) |
1735 | napi_disable(&queue->napi); | 1745 | napi_disable(&queue->napi); |
1736 | del_timer_sync(&queue->rx_refill_timer); | ||
1737 | netif_napi_del(&queue->napi); | 1746 | netif_napi_del(&queue->napi); |
1738 | } | 1747 | } |
1739 | 1748 | ||
@@ -1822,27 +1831,19 @@ static int talk_to_netback(struct xenbus_device *dev, | |||
1822 | xennet_destroy_queues(info); | 1831 | xennet_destroy_queues(info); |
1823 | 1832 | ||
1824 | err = xennet_create_queues(info, &num_queues); | 1833 | err = xennet_create_queues(info, &num_queues); |
1825 | if (err < 0) | 1834 | if (err < 0) { |
1826 | goto destroy_ring; | 1835 | xenbus_dev_fatal(dev, err, "creating queues"); |
1836 | kfree(info->queues); | ||
1837 | info->queues = NULL; | ||
1838 | goto out; | ||
1839 | } | ||
1827 | 1840 | ||
1828 | /* Create shared ring, alloc event channel -- for each queue */ | 1841 | /* Create shared ring, alloc event channel -- for each queue */ |
1829 | for (i = 0; i < num_queues; ++i) { | 1842 | for (i = 0; i < num_queues; ++i) { |
1830 | queue = &info->queues[i]; | 1843 | queue = &info->queues[i]; |
1831 | err = setup_netfront(dev, queue, feature_split_evtchn); | 1844 | err = setup_netfront(dev, queue, feature_split_evtchn); |
1832 | if (err) { | 1845 | if (err) |
1833 | /* setup_netfront() will tidy up the current | 1846 | goto destroy_ring; |
1834 | * queue on error, but we need to clean up | ||
1835 | * those already allocated. | ||
1836 | */ | ||
1837 | if (i > 0) { | ||
1838 | rtnl_lock(); | ||
1839 | netif_set_real_num_tx_queues(info->netdev, i); | ||
1840 | rtnl_unlock(); | ||
1841 | goto destroy_ring; | ||
1842 | } else { | ||
1843 | goto out; | ||
1844 | } | ||
1845 | } | ||
1846 | } | 1847 | } |
1847 | 1848 | ||
1848 | again: | 1849 | again: |
@@ -1932,9 +1933,10 @@ abort_transaction_no_dev_fatal: | |||
1932 | xenbus_transaction_end(xbt, 1); | 1933 | xenbus_transaction_end(xbt, 1); |
1933 | destroy_ring: | 1934 | destroy_ring: |
1934 | xennet_disconnect_backend(info); | 1935 | xennet_disconnect_backend(info); |
1935 | kfree(info->queues); | 1936 | xennet_destroy_queues(info); |
1936 | info->queues = NULL; | ||
1937 | out: | 1937 | out: |
1938 | unregister_netdev(info->netdev); | ||
1939 | xennet_free_netdev(info->netdev); | ||
1938 | return err; | 1940 | return err; |
1939 | } | 1941 | } |
1940 | 1942 | ||