aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv/netvsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r--drivers/net/hyperv/netvsc.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index aa95e81af6e5..4123d081b1c7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -93,6 +93,11 @@ static void free_netvsc_device(struct rcu_head *head)
93 = container_of(head, struct netvsc_device, rcu); 93 = container_of(head, struct netvsc_device, rcu);
94 int i; 94 int i;
95 95
96 kfree(nvdev->extension);
97 vfree(nvdev->recv_buf);
98 vfree(nvdev->send_buf);
99 kfree(nvdev->send_section_map);
100
96 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 101 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
97 vfree(nvdev->chan_table[i].mrc.slots); 102 vfree(nvdev->chan_table[i].mrc.slots);
98 103
@@ -218,12 +223,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
218 net_device->recv_buf_gpadl_handle = 0; 223 net_device->recv_buf_gpadl_handle = 0;
219 } 224 }
220 225
221 if (net_device->recv_buf) {
222 /* Free up the receive buffer */
223 vfree(net_device->recv_buf);
224 net_device->recv_buf = NULL;
225 }
226
227 if (net_device->send_buf_gpadl_handle) { 226 if (net_device->send_buf_gpadl_handle) {
228 ret = vmbus_teardown_gpadl(device->channel, 227 ret = vmbus_teardown_gpadl(device->channel,
229 net_device->send_buf_gpadl_handle); 228 net_device->send_buf_gpadl_handle);
@@ -238,12 +237,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
238 } 237 }
239 net_device->send_buf_gpadl_handle = 0; 238 net_device->send_buf_gpadl_handle = 0;
240 } 239 }
241 if (net_device->send_buf) {
242 /* Free up the send buffer */
243 vfree(net_device->send_buf);
244 net_device->send_buf = NULL;
245 }
246 kfree(net_device->send_section_map);
247} 240}
248 241
249int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) 242int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -580,26 +573,29 @@ void netvsc_device_remove(struct hv_device *device)
580 = rtnl_dereference(net_device_ctx->nvdev); 573 = rtnl_dereference(net_device_ctx->nvdev);
581 int i; 574 int i;
582 575
583 cancel_work_sync(&net_device->subchan_work);
584
585 netvsc_revoke_buf(device, net_device); 576 netvsc_revoke_buf(device, net_device);
586 577
587 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 578 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
588 579
580 /* And disassociate NAPI context from device */
581 for (i = 0; i < net_device->num_chn; i++)
582 netif_napi_del(&net_device->chan_table[i].napi);
583
589 /* 584 /*
590 * At this point, no one should be accessing net_device 585 * At this point, no one should be accessing net_device
591 * except in here 586 * except in here
592 */ 587 */
593 netdev_dbg(ndev, "net device safe to remove\n"); 588 netdev_dbg(ndev, "net device safe to remove\n");
594 589
590 /* older versions require that buffer be revoked before close */
591 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
592 netvsc_teardown_gpadl(device, net_device);
593
595 /* Now, we can close the channel safely */ 594 /* Now, we can close the channel safely */
596 vmbus_close(device->channel); 595 vmbus_close(device->channel);
597 596
598 netvsc_teardown_gpadl(device, net_device); 597 if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
599 598 netvsc_teardown_gpadl(device, net_device);
600 /* And dissassociate NAPI context from device */
601 for (i = 0; i < net_device->num_chn; i++)
602 netif_napi_del(&net_device->chan_table[i].napi);
603 599
604 /* Release all resources */ 600 /* Release all resources */
605 free_netvsc_device_rcu(net_device); 601 free_netvsc_device_rcu(net_device);
@@ -663,14 +659,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
663 queue_sends = 659 queue_sends =
664 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); 660 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
665 661
666 if (net_device->destroy && queue_sends == 0) 662 if (unlikely(net_device->destroy)) {
667 wake_up(&net_device->wait_drain); 663 if (queue_sends == 0)
664 wake_up(&net_device->wait_drain);
665 } else {
666 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
668 667
669 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 668 if (netif_tx_queue_stopped(txq) &&
670 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 669 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
671 queue_sends < 1)) { 670 queue_sends < 1)) {
672 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); 671 netif_tx_wake_queue(txq);
673 ndev_ctx->eth_stats.wake_queue++; 672 ndev_ctx->eth_stats.wake_queue++;
673 }
674 } 674 }
675} 675}
676 676