diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/net/hyperv/netvsc.c | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r-- | drivers/net/hyperv/netvsc.c | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index aa95e81af6e5..4123d081b1c7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -93,6 +93,11 @@ static void free_netvsc_device(struct rcu_head *head) | |||
93 | = container_of(head, struct netvsc_device, rcu); | 93 | = container_of(head, struct netvsc_device, rcu); |
94 | int i; | 94 | int i; |
95 | 95 | ||
96 | kfree(nvdev->extension); | ||
97 | vfree(nvdev->recv_buf); | ||
98 | vfree(nvdev->send_buf); | ||
99 | kfree(nvdev->send_section_map); | ||
100 | |||
96 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) | 101 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) |
97 | vfree(nvdev->chan_table[i].mrc.slots); | 102 | vfree(nvdev->chan_table[i].mrc.slots); |
98 | 103 | ||
@@ -218,12 +223,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device, | |||
218 | net_device->recv_buf_gpadl_handle = 0; | 223 | net_device->recv_buf_gpadl_handle = 0; |
219 | } | 224 | } |
220 | 225 | ||
221 | if (net_device->recv_buf) { | ||
222 | /* Free up the receive buffer */ | ||
223 | vfree(net_device->recv_buf); | ||
224 | net_device->recv_buf = NULL; | ||
225 | } | ||
226 | |||
227 | if (net_device->send_buf_gpadl_handle) { | 226 | if (net_device->send_buf_gpadl_handle) { |
228 | ret = vmbus_teardown_gpadl(device->channel, | 227 | ret = vmbus_teardown_gpadl(device->channel, |
229 | net_device->send_buf_gpadl_handle); | 228 | net_device->send_buf_gpadl_handle); |
@@ -238,12 +237,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device, | |||
238 | } | 237 | } |
239 | net_device->send_buf_gpadl_handle = 0; | 238 | net_device->send_buf_gpadl_handle = 0; |
240 | } | 239 | } |
241 | if (net_device->send_buf) { | ||
242 | /* Free up the send buffer */ | ||
243 | vfree(net_device->send_buf); | ||
244 | net_device->send_buf = NULL; | ||
245 | } | ||
246 | kfree(net_device->send_section_map); | ||
247 | } | 240 | } |
248 | 241 | ||
249 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) | 242 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) |
@@ -580,26 +573,29 @@ void netvsc_device_remove(struct hv_device *device) | |||
580 | = rtnl_dereference(net_device_ctx->nvdev); | 573 | = rtnl_dereference(net_device_ctx->nvdev); |
581 | int i; | 574 | int i; |
582 | 575 | ||
583 | cancel_work_sync(&net_device->subchan_work); | ||
584 | |||
585 | netvsc_revoke_buf(device, net_device); | 576 | netvsc_revoke_buf(device, net_device); |
586 | 577 | ||
587 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); | 578 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
588 | 579 | ||
580 | /* And disassociate NAPI context from device */ | ||
581 | for (i = 0; i < net_device->num_chn; i++) | ||
582 | netif_napi_del(&net_device->chan_table[i].napi); | ||
583 | |||
589 | /* | 584 | /* |
590 | * At this point, no one should be accessing net_device | 585 | * At this point, no one should be accessing net_device |
591 | * except in here | 586 | * except in here |
592 | */ | 587 | */ |
593 | netdev_dbg(ndev, "net device safe to remove\n"); | 588 | netdev_dbg(ndev, "net device safe to remove\n"); |
594 | 589 | ||
590 | /* older versions require that buffer be revoked before close */ | ||
591 | if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4) | ||
592 | netvsc_teardown_gpadl(device, net_device); | ||
593 | |||
595 | /* Now, we can close the channel safely */ | 594 | /* Now, we can close the channel safely */ |
596 | vmbus_close(device->channel); | 595 | vmbus_close(device->channel); |
597 | 596 | ||
598 | netvsc_teardown_gpadl(device, net_device); | 597 | if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4) |
599 | 598 | netvsc_teardown_gpadl(device, net_device); | |
600 | /* And dissassociate NAPI context from device */ | ||
601 | for (i = 0; i < net_device->num_chn; i++) | ||
602 | netif_napi_del(&net_device->chan_table[i].napi); | ||
603 | 599 | ||
604 | /* Release all resources */ | 600 | /* Release all resources */ |
605 | free_netvsc_device_rcu(net_device); | 601 | free_netvsc_device_rcu(net_device); |
@@ -663,14 +659,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, | |||
663 | queue_sends = | 659 | queue_sends = |
664 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); | 660 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); |
665 | 661 | ||
666 | if (net_device->destroy && queue_sends == 0) | 662 | if (unlikely(net_device->destroy)) { |
667 | wake_up(&net_device->wait_drain); | 663 | if (queue_sends == 0) |
664 | wake_up(&net_device->wait_drain); | ||
665 | } else { | ||
666 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | ||
668 | 667 | ||
669 | if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && | 668 | if (netif_tx_queue_stopped(txq) && |
670 | (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || | 669 | (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || |
671 | queue_sends < 1)) { | 670 | queue_sends < 1)) { |
672 | netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); | 671 | netif_tx_wake_queue(txq); |
673 | ndev_ctx->eth_stats.wake_queue++; | 672 | ndev_ctx->eth_stats.wake_queue++; |
673 | } | ||
674 | } | 674 | } |
675 | } | 675 | } |
676 | 676 | ||