aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/net/hyperv
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c293
-rw-r--r--drivers/net/hyperv/rndis_filter.c68
4 files changed, 225 insertions, 190 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 0db3bd1ea06f..32861036c3fc 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
173 struct list_head req_list; 173 struct list_head req_list;
174 174
175 struct work_struct mcast_work; 175 struct work_struct mcast_work;
176 u32 filter;
176 177
177 bool link_state; /* 0 - link up, 1 - link down */ 178 bool link_state; /* 0 - link up, 1 - link down */
178 179
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
212 213
213void rndis_set_subchannel(struct work_struct *w); 214void rndis_set_subchannel(struct work_struct *w);
214bool rndis_filter_opened(const struct netvsc_device *nvdev);
215int rndis_filter_open(struct netvsc_device *nvdev); 215int rndis_filter_open(struct netvsc_device *nvdev);
216int rndis_filter_close(struct netvsc_device *nvdev); 216int rndis_filter_close(struct netvsc_device *nvdev);
217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index aa95e81af6e5..4123d081b1c7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -93,6 +93,11 @@ static void free_netvsc_device(struct rcu_head *head)
93 = container_of(head, struct netvsc_device, rcu); 93 = container_of(head, struct netvsc_device, rcu);
94 int i; 94 int i;
95 95
96 kfree(nvdev->extension);
97 vfree(nvdev->recv_buf);
98 vfree(nvdev->send_buf);
99 kfree(nvdev->send_section_map);
100
96 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 101 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
97 vfree(nvdev->chan_table[i].mrc.slots); 102 vfree(nvdev->chan_table[i].mrc.slots);
98 103
@@ -218,12 +223,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
218 net_device->recv_buf_gpadl_handle = 0; 223 net_device->recv_buf_gpadl_handle = 0;
219 } 224 }
220 225
221 if (net_device->recv_buf) {
222 /* Free up the receive buffer */
223 vfree(net_device->recv_buf);
224 net_device->recv_buf = NULL;
225 }
226
227 if (net_device->send_buf_gpadl_handle) { 226 if (net_device->send_buf_gpadl_handle) {
228 ret = vmbus_teardown_gpadl(device->channel, 227 ret = vmbus_teardown_gpadl(device->channel,
229 net_device->send_buf_gpadl_handle); 228 net_device->send_buf_gpadl_handle);
@@ -238,12 +237,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
238 } 237 }
239 net_device->send_buf_gpadl_handle = 0; 238 net_device->send_buf_gpadl_handle = 0;
240 } 239 }
241 if (net_device->send_buf) {
242 /* Free up the send buffer */
243 vfree(net_device->send_buf);
244 net_device->send_buf = NULL;
245 }
246 kfree(net_device->send_section_map);
247} 240}
248 241
249int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) 242int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -580,26 +573,29 @@ void netvsc_device_remove(struct hv_device *device)
580 = rtnl_dereference(net_device_ctx->nvdev); 573 = rtnl_dereference(net_device_ctx->nvdev);
581 int i; 574 int i;
582 575
583 cancel_work_sync(&net_device->subchan_work);
584
585 netvsc_revoke_buf(device, net_device); 576 netvsc_revoke_buf(device, net_device);
586 577
587 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 578 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
588 579
580 /* And disassociate NAPI context from device */
581 for (i = 0; i < net_device->num_chn; i++)
582 netif_napi_del(&net_device->chan_table[i].napi);
583
589 /* 584 /*
590 * At this point, no one should be accessing net_device 585 * At this point, no one should be accessing net_device
591 * except in here 586 * except in here
592 */ 587 */
593 netdev_dbg(ndev, "net device safe to remove\n"); 588 netdev_dbg(ndev, "net device safe to remove\n");
594 589
590 /* older versions require that buffer be revoked before close */
591 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
592 netvsc_teardown_gpadl(device, net_device);
593
595 /* Now, we can close the channel safely */ 594 /* Now, we can close the channel safely */
596 vmbus_close(device->channel); 595 vmbus_close(device->channel);
597 596
598 netvsc_teardown_gpadl(device, net_device); 597 if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
599 598 netvsc_teardown_gpadl(device, net_device);
600 /* And dissassociate NAPI context from device */
601 for (i = 0; i < net_device->num_chn; i++)
602 netif_napi_del(&net_device->chan_table[i].napi);
603 599
604 /* Release all resources */ 600 /* Release all resources */
605 free_netvsc_device_rcu(net_device); 601 free_netvsc_device_rcu(net_device);
@@ -663,14 +659,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
663 queue_sends = 659 queue_sends =
664 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); 660 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
665 661
666 if (net_device->destroy && queue_sends == 0) 662 if (unlikely(net_device->destroy)) {
667 wake_up(&net_device->wait_drain); 663 if (queue_sends == 0)
664 wake_up(&net_device->wait_drain);
665 } else {
666 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
668 667
669 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 668 if (netif_tx_queue_stopped(txq) &&
670 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 669 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
671 queue_sends < 1)) { 670 queue_sends < 1)) {
672 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); 671 netif_tx_wake_queue(txq);
673 ndev_ctx->eth_stats.wake_queue++; 672 ndev_ctx->eth_stats.wake_queue++;
673 }
674 } 674 }
675} 675}
676 676
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cdb78eefab67..f28c85d212ce 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,7 +46,10 @@
46 46
47#include "hyperv_net.h" 47#include "hyperv_net.h"
48 48
49#define RING_SIZE_MIN 64 49#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
50 53
51#define LINKCHANGE_INT (2 * HZ) 54#define LINKCHANGE_INT (2 * HZ)
52#define VF_TAKEOVER_INT (HZ / 10) 55#define VF_TAKEOVER_INT (HZ / 10)
@@ -89,15 +92,20 @@ static void netvsc_change_rx_flags(struct net_device *net, int change)
89static void netvsc_set_rx_mode(struct net_device *net) 92static void netvsc_set_rx_mode(struct net_device *net)
90{ 93{
91 struct net_device_context *ndev_ctx = netdev_priv(net); 94 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 95 struct net_device *vf_netdev;
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 96 struct netvsc_device *nvdev;
94 97
98 rcu_read_lock();
99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
95 if (vf_netdev) { 100 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net); 101 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net); 102 dev_mc_sync(vf_netdev, net);
98 } 103 }
99 104
100 rndis_filter_update(nvdev); 105 nvdev = rcu_dereference(ndev_ctx->nvdev);
106 if (nvdev)
107 rndis_filter_update(nvdev);
108 rcu_read_unlock();
101} 109}
102 110
103static int netvsc_open(struct net_device *net) 111static int netvsc_open(struct net_device *net)
@@ -118,10 +126,8 @@ static int netvsc_open(struct net_device *net)
118 } 126 }
119 127
120 rdev = nvdev->extension; 128 rdev = nvdev->extension;
121 if (!rdev->link_state) { 129 if (!rdev->link_state)
122 netif_carrier_on(net); 130 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
125 131
126 if (vf_netdev) { 132 if (vf_netdev) {
127 /* Setting synthetic device up transparently sets 133 /* Setting synthetic device up transparently sets
@@ -137,36 +143,25 @@ static int netvsc_open(struct net_device *net)
137 return 0; 143 return 0;
138} 144}
139 145
140static int netvsc_close(struct net_device *net) 146static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
141{ 147{
142 struct net_device_context *net_device_ctx = netdev_priv(net); 148 unsigned int retry = 0;
143 struct net_device *vf_netdev 149 int i;
144 = rtnl_dereference(net_device_ctx->vf_netdev);
145 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
146 int ret = 0;
147 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
148 struct vmbus_channel *chn;
149
150 netif_tx_disable(net);
151
152 /* No need to close rndis filter if it is removed already */
153 if (!nvdev)
154 goto out;
155
156 ret = rndis_filter_close(nvdev);
157 if (ret != 0) {
158 netdev_err(net, "unable to close device (ret %d).\n", ret);
159 return ret;
160 }
161 150
162 /* Ensure pending bytes in ring are read */ 151 /* Ensure pending bytes in ring are read */
163 while (true) { 152 for (;;) {
164 aread = 0; 153 u32 aread = 0;
154
165 for (i = 0; i < nvdev->num_chn; i++) { 155 for (i = 0; i < nvdev->num_chn; i++) {
166 chn = nvdev->chan_table[i].channel; 156 struct vmbus_channel *chn
157 = nvdev->chan_table[i].channel;
158
167 if (!chn) 159 if (!chn)
168 continue; 160 continue;
169 161
162 /* make sure receive not running now */
163 napi_synchronize(&nvdev->chan_table[i].napi);
164
170 aread = hv_get_bytes_to_read(&chn->inbound); 165 aread = hv_get_bytes_to_read(&chn->inbound);
171 if (aread) 166 if (aread)
172 break; 167 break;
@@ -176,22 +171,40 @@ static int netvsc_close(struct net_device *net)
176 break; 171 break;
177 } 172 }
178 173
179 retry++; 174 if (aread == 0)
180 if (retry > retry_max || aread == 0) 175 return 0;
181 break;
182 176
183 msleep(msec); 177 if (++retry > RETRY_MAX)
178 return -ETIMEDOUT;
184 179
185 if (msec < 1000) 180 usleep_range(RETRY_US_LO, RETRY_US_HI);
186 msec *= 2;
187 } 181 }
182}
188 183
189 if (aread) { 184static int netvsc_close(struct net_device *net)
190 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 185{
191 ret = -ETIMEDOUT; 186 struct net_device_context *net_device_ctx = netdev_priv(net);
187 struct net_device *vf_netdev
188 = rtnl_dereference(net_device_ctx->vf_netdev);
189 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
190 int ret;
191
192 netif_tx_disable(net);
193
194 /* No need to close rndis filter if it is removed already */
195 if (!nvdev)
196 return 0;
197
198 ret = rndis_filter_close(nvdev);
199 if (ret != 0) {
200 netdev_err(net, "unable to close device (ret %d).\n", ret);
201 return ret;
192 } 202 }
193 203
194out: 204 ret = netvsc_wait_until_empty(nvdev);
205 if (ret)
206 netdev_err(net, "Ring buffer not empty after closing rndis\n");
207
195 if (vf_netdev) 208 if (vf_netdev)
196 dev_close(vf_netdev); 209 dev_close(vf_netdev);
197 210
@@ -840,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,
840 } 853 }
841} 854}
842 855
856static int netvsc_detach(struct net_device *ndev,
857 struct netvsc_device *nvdev)
858{
859 struct net_device_context *ndev_ctx = netdev_priv(ndev);
860 struct hv_device *hdev = ndev_ctx->device_ctx;
861 int ret;
862
863 /* Don't try continuing to try and setup sub channels */
864 if (cancel_work_sync(&nvdev->subchan_work))
865 nvdev->num_chn = 1;
866
867 /* If device was up (receiving) then shutdown */
868 if (netif_running(ndev)) {
869 netif_tx_disable(ndev);
870
871 ret = rndis_filter_close(nvdev);
872 if (ret) {
873 netdev_err(ndev,
874 "unable to close device (ret %d).\n", ret);
875 return ret;
876 }
877
878 ret = netvsc_wait_until_empty(nvdev);
879 if (ret) {
880 netdev_err(ndev,
881 "Ring buffer not empty after closing rndis\n");
882 return ret;
883 }
884 }
885
886 netif_device_detach(ndev);
887
888 rndis_filter_device_remove(hdev, nvdev);
889
890 return 0;
891}
892
893static int netvsc_attach(struct net_device *ndev,
894 struct netvsc_device_info *dev_info)
895{
896 struct net_device_context *ndev_ctx = netdev_priv(ndev);
897 struct hv_device *hdev = ndev_ctx->device_ctx;
898 struct netvsc_device *nvdev;
899 struct rndis_device *rdev;
900 int ret;
901
902 nvdev = rndis_filter_device_add(hdev, dev_info);
903 if (IS_ERR(nvdev))
904 return PTR_ERR(nvdev);
905
906 /* Note: enable and attach happen when sub-channels setup */
907
908 netif_carrier_off(ndev);
909
910 if (netif_running(ndev)) {
911 ret = rndis_filter_open(nvdev);
912 if (ret)
913 return ret;
914
915 rdev = nvdev->extension;
916 if (!rdev->link_state)
917 netif_carrier_on(ndev);
918 }
919
920 return 0;
921}
922
843static int netvsc_set_channels(struct net_device *net, 923static int netvsc_set_channels(struct net_device *net,
844 struct ethtool_channels *channels) 924 struct ethtool_channels *channels)
845{ 925{
846 struct net_device_context *net_device_ctx = netdev_priv(net); 926 struct net_device_context *net_device_ctx = netdev_priv(net);
847 struct hv_device *dev = net_device_ctx->device_ctx;
848 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 927 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
849 unsigned int orig, count = channels->combined_count; 928 unsigned int orig, count = channels->combined_count;
850 struct netvsc_device_info device_info; 929 struct netvsc_device_info device_info;
851 bool was_opened; 930 int ret;
852 int ret = 0;
853 931
854 /* We do not support separate count for rx, tx, or other */ 932 /* We do not support separate count for rx, tx, or other */
855 if (count == 0 || 933 if (count == 0 ||
@@ -866,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,
866 return -EINVAL; 944 return -EINVAL;
867 945
868 orig = nvdev->num_chn; 946 orig = nvdev->num_chn;
869 was_opened = rndis_filter_opened(nvdev);
870 if (was_opened)
871 rndis_filter_close(nvdev);
872 947
873 memset(&device_info, 0, sizeof(device_info)); 948 memset(&device_info, 0, sizeof(device_info));
874 device_info.num_chn = count; 949 device_info.num_chn = count;
@@ -877,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,
877 device_info.recv_sections = nvdev->recv_section_cnt; 952 device_info.recv_sections = nvdev->recv_section_cnt;
878 device_info.recv_section_size = nvdev->recv_section_size; 953 device_info.recv_section_size = nvdev->recv_section_size;
879 954
880 rndis_filter_device_remove(dev, nvdev); 955 ret = netvsc_detach(net, nvdev);
956 if (ret)
957 return ret;
881 958
882 nvdev = rndis_filter_device_add(dev, &device_info); 959 ret = netvsc_attach(net, &device_info);
883 if (IS_ERR(nvdev)) { 960 if (ret) {
884 ret = PTR_ERR(nvdev);
885 device_info.num_chn = orig; 961 device_info.num_chn = orig;
886 nvdev = rndis_filter_device_add(dev, &device_info); 962 if (netvsc_attach(net, &device_info))
887 963 netdev_err(net, "restoring channel setting failed\n");
888 if (IS_ERR(nvdev)) {
889 netdev_err(net, "restoring channel setting failed: %ld\n",
890 PTR_ERR(nvdev));
891 return ret;
892 }
893 } 964 }
894 965
895 if (was_opened)
896 rndis_filter_open(nvdev);
897
898 /* We may have missed link change notifications */
899 net_device_ctx->last_reconfig = 0;
900 schedule_delayed_work(&net_device_ctx->dwork, 0);
901
902 return ret; 966 return ret;
903} 967}
904 968
@@ -964,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
964 struct net_device_context *ndevctx = netdev_priv(ndev); 1028 struct net_device_context *ndevctx = netdev_priv(ndev);
965 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1029 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
966 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1030 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
967 struct hv_device *hdev = ndevctx->device_ctx;
968 int orig_mtu = ndev->mtu; 1031 int orig_mtu = ndev->mtu;
969 struct netvsc_device_info device_info; 1032 struct netvsc_device_info device_info;
970 bool was_opened;
971 int ret = 0; 1033 int ret = 0;
972 1034
973 if (!nvdev || nvdev->destroy) 1035 if (!nvdev || nvdev->destroy)
@@ -980,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
980 return ret; 1042 return ret;
981 } 1043 }
982 1044
983 netif_device_detach(ndev);
984 was_opened = rndis_filter_opened(nvdev);
985 if (was_opened)
986 rndis_filter_close(nvdev);
987
988 memset(&device_info, 0, sizeof(device_info)); 1045 memset(&device_info, 0, sizeof(device_info));
989 device_info.num_chn = nvdev->num_chn; 1046 device_info.num_chn = nvdev->num_chn;
990 device_info.send_sections = nvdev->send_section_cnt; 1047 device_info.send_sections = nvdev->send_section_cnt;
@@ -992,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
992 device_info.recv_sections = nvdev->recv_section_cnt; 1049 device_info.recv_sections = nvdev->recv_section_cnt;
993 device_info.recv_section_size = nvdev->recv_section_size; 1050 device_info.recv_section_size = nvdev->recv_section_size;
994 1051
995 rndis_filter_device_remove(hdev, nvdev); 1052 ret = netvsc_detach(ndev, nvdev);
1053 if (ret)
1054 goto rollback_vf;
996 1055
997 ndev->mtu = mtu; 1056 ndev->mtu = mtu;
998 1057
999 nvdev = rndis_filter_device_add(hdev, &device_info); 1058 ret = netvsc_attach(ndev, &device_info);
1000 if (IS_ERR(nvdev)) { 1059 if (ret)
1001 ret = PTR_ERR(nvdev); 1060 goto rollback;
1002
1003 /* Attempt rollback to original MTU */
1004 ndev->mtu = orig_mtu;
1005 nvdev = rndis_filter_device_add(hdev, &device_info);
1006
1007 if (vf_netdev)
1008 dev_set_mtu(vf_netdev, orig_mtu);
1009
1010 if (IS_ERR(nvdev)) {
1011 netdev_err(ndev, "restoring mtu failed: %ld\n",
1012 PTR_ERR(nvdev));
1013 return ret;
1014 }
1015 }
1016 1061
1017 if (was_opened) 1062 return 0;
1018 rndis_filter_open(nvdev);
1019 1063
1020 netif_device_attach(ndev); 1064rollback:
1065 /* Attempt rollback to original MTU */
1066 ndev->mtu = orig_mtu;
1021 1067
1022 /* We may have missed link change notifications */ 1068 if (netvsc_attach(ndev, &device_info))
1023 schedule_delayed_work(&ndevctx->dwork, 0); 1069 netdev_err(ndev, "restoring mtu failed\n");
1070rollback_vf:
1071 if (vf_netdev)
1072 dev_set_mtu(vf_netdev, orig_mtu);
1024 1073
1025 return ret; 1074 return ret;
1026} 1075}
@@ -1526,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1526{ 1575{
1527 struct net_device_context *ndevctx = netdev_priv(ndev); 1576 struct net_device_context *ndevctx = netdev_priv(ndev);
1528 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1577 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1529 struct hv_device *hdev = ndevctx->device_ctx;
1530 struct netvsc_device_info device_info; 1578 struct netvsc_device_info device_info;
1531 struct ethtool_ringparam orig; 1579 struct ethtool_ringparam orig;
1532 u32 new_tx, new_rx; 1580 u32 new_tx, new_rx;
1533 bool was_opened;
1534 int ret = 0; 1581 int ret = 0;
1535 1582
1536 if (!nvdev || nvdev->destroy) 1583 if (!nvdev || nvdev->destroy)
@@ -1555,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1555 device_info.recv_sections = new_rx; 1602 device_info.recv_sections = new_rx;
1556 device_info.recv_section_size = nvdev->recv_section_size; 1603 device_info.recv_section_size = nvdev->recv_section_size;
1557 1604
1558 netif_device_detach(ndev); 1605 ret = netvsc_detach(ndev, nvdev);
1559 was_opened = rndis_filter_opened(nvdev); 1606 if (ret)
1560 if (was_opened) 1607 return ret;
1561 rndis_filter_close(nvdev);
1562
1563 rndis_filter_device_remove(hdev, nvdev);
1564
1565 nvdev = rndis_filter_device_add(hdev, &device_info);
1566 if (IS_ERR(nvdev)) {
1567 ret = PTR_ERR(nvdev);
1568 1608
1609 ret = netvsc_attach(ndev, &device_info);
1610 if (ret) {
1569 device_info.send_sections = orig.tx_pending; 1611 device_info.send_sections = orig.tx_pending;
1570 device_info.recv_sections = orig.rx_pending; 1612 device_info.recv_sections = orig.rx_pending;
1571 nvdev = rndis_filter_device_add(hdev, &device_info);
1572 if (IS_ERR(nvdev)) {
1573 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1574 PTR_ERR(nvdev));
1575 return ret;
1576 }
1577 }
1578
1579 if (was_opened)
1580 rndis_filter_open(nvdev);
1581 netif_device_attach(ndev);
1582 1613
1583 /* We may have missed link change notifications */ 1614 if (netvsc_attach(ndev, &device_info))
1584 ndevctx->last_reconfig = 0; 1615 netdev_err(ndev, "restoring ringparam failed");
1585 schedule_delayed_work(&ndevctx->dwork, 0); 1616 }
1586 1617
1587 return ret; 1618 return ret;
1588} 1619}
@@ -1846,8 +1877,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1846 1877
1847 /* set multicast etc flags on VF */ 1878 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); 1879 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1880
1881 /* sync address list from ndev to VF */
1882 netif_addr_lock_bh(ndev);
1849 dev_uc_sync(vf_netdev, ndev); 1883 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev); 1884 dev_mc_sync(vf_netdev, ndev);
1885 netif_addr_unlock_bh(ndev);
1851 1886
1852 if (netif_running(ndev)) { 1887 if (netif_running(ndev)) {
1853 ret = dev_open(vf_netdev); 1888 ret = dev_open(vf_netdev);
@@ -2063,8 +2098,8 @@ no_net:
2063static int netvsc_remove(struct hv_device *dev) 2098static int netvsc_remove(struct hv_device *dev)
2064{ 2099{
2065 struct net_device_context *ndev_ctx; 2100 struct net_device_context *ndev_ctx;
2066 struct net_device *vf_netdev; 2101 struct net_device *vf_netdev, *net;
2067 struct net_device *net; 2102 struct netvsc_device *nvdev;
2068 2103
2069 net = hv_get_drvdata(dev); 2104 net = hv_get_drvdata(dev);
2070 if (net == NULL) { 2105 if (net == NULL) {
@@ -2074,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)
2074 2109
2075 ndev_ctx = netdev_priv(net); 2110 ndev_ctx = netdev_priv(net);
2076 2111
2077 netif_device_detach(net);
2078
2079 cancel_delayed_work_sync(&ndev_ctx->dwork); 2112 cancel_delayed_work_sync(&ndev_ctx->dwork);
2080 2113
2114 rcu_read_lock();
2115 nvdev = rcu_dereference(ndev_ctx->nvdev);
2116
2117 if (nvdev)
2118 cancel_work_sync(&nvdev->subchan_work);
2119
2081 /* 2120 /*
2082 * Call to the vsc driver to let it know that the device is being 2121 * Call to the vsc driver to let it know that the device is being
2083 * removed. Also blocks mtu and channel changes. 2122 * removed. Also blocks mtu and channel changes.
@@ -2087,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)
2087 if (vf_netdev) 2126 if (vf_netdev)
2088 netvsc_unregister_vf(vf_netdev); 2127 netvsc_unregister_vf(vf_netdev);
2089 2128
2129 if (nvdev)
2130 rndis_filter_device_remove(dev, nvdev);
2131
2090 unregister_netdevice(net); 2132 unregister_netdevice(net);
2091 2133
2092 rndis_filter_device_remove(dev,
2093 rtnl_dereference(ndev_ctx->nvdev));
2094 rtnl_unlock(); 2134 rtnl_unlock();
2135 rcu_read_unlock();
2095 2136
2096 hv_set_drvdata(dev, NULL); 2137 hv_set_drvdata(dev, NULL);
2097 2138
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2dc00f714482..020f8bc54386 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -267,13 +267,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,
267 } 267 }
268} 268}
269 269
270static void rndis_filter_receive_response(struct rndis_device *dev, 270static void rndis_filter_receive_response(struct net_device *ndev,
271 struct rndis_message *resp) 271 struct netvsc_device *nvdev,
272 const struct rndis_message *resp)
272{ 273{
274 struct rndis_device *dev = nvdev->extension;
273 struct rndis_request *request = NULL; 275 struct rndis_request *request = NULL;
274 bool found = false; 276 bool found = false;
275 unsigned long flags; 277 unsigned long flags;
276 struct net_device *ndev = dev->ndev; 278
279 /* This should never happen, it means control message
280 * response received after device removed.
281 */
282 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
283 netdev_err(ndev,
284 "got rndis message uninitialized\n");
285 return;
286 }
277 287
278 spin_lock_irqsave(&dev->request_lock, flags); 288 spin_lock_irqsave(&dev->request_lock, flags);
279 list_for_each_entry(request, &dev->req_list, list_ent) { 289 list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,7 +365,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
355 365
356static int rndis_filter_receive_data(struct net_device *ndev, 366static int rndis_filter_receive_data(struct net_device *ndev,
357 struct netvsc_device *nvdev, 367 struct netvsc_device *nvdev,
358 struct rndis_device *dev,
359 struct rndis_message *msg, 368 struct rndis_message *msg,
360 struct vmbus_channel *channel, 369 struct vmbus_channel *channel,
361 void *data, u32 data_buflen) 370 void *data, u32 data_buflen)
@@ -375,7 +384,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
375 * should be the data packet size plus the trailer padding size 384 * should be the data packet size plus the trailer padding size
376 */ 385 */
377 if (unlikely(data_buflen < rndis_pkt->data_len)) { 386 if (unlikely(data_buflen < rndis_pkt->data_len)) {
378 netdev_err(dev->ndev, "rndis message buffer " 387 netdev_err(ndev, "rndis message buffer "
379 "overflow detected (got %u, min %u)" 388 "overflow detected (got %u, min %u)"
380 "...dropping this message!\n", 389 "...dropping this message!\n",
381 data_buflen, rndis_pkt->data_len); 390 data_buflen, rndis_pkt->data_len);
@@ -403,35 +412,20 @@ int rndis_filter_receive(struct net_device *ndev,
403 void *data, u32 buflen) 412 void *data, u32 buflen)
404{ 413{
405 struct net_device_context *net_device_ctx = netdev_priv(ndev); 414 struct net_device_context *net_device_ctx = netdev_priv(ndev);
406 struct rndis_device *rndis_dev = net_dev->extension;
407 struct rndis_message *rndis_msg = data; 415 struct rndis_message *rndis_msg = data;
408 416
409 /* Make sure the rndis device state is initialized */
410 if (unlikely(!rndis_dev)) {
411 netif_dbg(net_device_ctx, rx_err, ndev,
412 "got rndis message but no rndis device!\n");
413 return NVSP_STAT_FAIL;
414 }
415
416 if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
417 netif_dbg(net_device_ctx, rx_err, ndev,
418 "got rndis message uninitialized\n");
419 return NVSP_STAT_FAIL;
420 }
421
422 if (netif_msg_rx_status(net_device_ctx)) 417 if (netif_msg_rx_status(net_device_ctx))
423 dump_rndis_message(ndev, rndis_msg); 418 dump_rndis_message(ndev, rndis_msg);
424 419
425 switch (rndis_msg->ndis_msg_type) { 420 switch (rndis_msg->ndis_msg_type) {
426 case RNDIS_MSG_PACKET: 421 case RNDIS_MSG_PACKET:
427 return rndis_filter_receive_data(ndev, net_dev, 422 return rndis_filter_receive_data(ndev, net_dev, rndis_msg,
428 rndis_dev, rndis_msg,
429 channel, data, buflen); 423 channel, data, buflen);
430 case RNDIS_MSG_INIT_C: 424 case RNDIS_MSG_INIT_C:
431 case RNDIS_MSG_QUERY_C: 425 case RNDIS_MSG_QUERY_C:
432 case RNDIS_MSG_SET_C: 426 case RNDIS_MSG_SET_C:
433 /* completion msgs */ 427 /* completion msgs */
434 rndis_filter_receive_response(rndis_dev, rndis_msg); 428 rndis_filter_receive_response(ndev, net_dev, rndis_msg);
435 break; 429 break;
436 430
437 case RNDIS_MSG_INDICATE: 431 case RNDIS_MSG_INDICATE:
@@ -828,13 +822,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
828 struct rndis_set_request *set; 822 struct rndis_set_request *set;
829 int ret; 823 int ret;
830 824
825 if (dev->filter == new_filter)
826 return 0;
827
831 request = get_rndis_request(dev, RNDIS_MSG_SET, 828 request = get_rndis_request(dev, RNDIS_MSG_SET,
832 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + 829 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
833 sizeof(u32)); 830 sizeof(u32));
834 if (!request) 831 if (!request)
835 return -ENOMEM; 832 return -ENOMEM;
836 833
837
838 /* Setup the rndis set */ 834 /* Setup the rndis set */
839 set = &request->request_msg.msg.set_req; 835 set = &request->request_msg.msg.set_req;
840 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; 836 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
@@ -845,8 +841,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
845 &new_filter, sizeof(u32)); 841 &new_filter, sizeof(u32));
846 842
847 ret = rndis_filter_send_request(dev, request); 843 ret = rndis_filter_send_request(dev, request);
848 if (ret == 0) 844 if (ret == 0) {
849 wait_for_completion(&request->wait_event); 845 wait_for_completion(&request->wait_event);
846 dev->filter = new_filter;
847 }
850 848
851 put_rndis_request(dev, request); 849 put_rndis_request(dev, request);
852 850
@@ -864,9 +862,9 @@ static void rndis_set_multicast(struct work_struct *w)
864 filter = NDIS_PACKET_TYPE_PROMISCUOUS; 862 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
865 } else { 863 } else {
866 if (flags & IFF_ALLMULTI) 864 if (flags & IFF_ALLMULTI)
867 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST; 865 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
868 if (flags & IFF_BROADCAST) 866 if (flags & IFF_BROADCAST)
869 flags |= NDIS_PACKET_TYPE_BROADCAST; 867 filter |= NDIS_PACKET_TYPE_BROADCAST;
870 } 868 }
871 869
872 rndis_filter_set_packet_filter(rdev, filter); 870 rndis_filter_set_packet_filter(rdev, filter);
@@ -1124,6 +1122,7 @@ void rndis_set_subchannel(struct work_struct *w)
1124 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1122 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1125 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1123 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1126 1124
1125 netif_device_attach(ndev);
1127 rtnl_unlock(); 1126 rtnl_unlock();
1128 return; 1127 return;
1129 1128
@@ -1134,6 +1133,8 @@ failed:
1134 1133
1135 nvdev->max_chn = 1; 1134 nvdev->max_chn = 1;
1136 nvdev->num_chn = 1; 1135 nvdev->num_chn = 1;
1136
1137 netif_device_attach(ndev);
1137unlock: 1138unlock:
1138 rtnl_unlock(); 1139 rtnl_unlock();
1139} 1140}
@@ -1336,6 +1337,10 @@ out:
1336 net_device->num_chn = 1; 1337 net_device->num_chn = 1;
1337 } 1338 }
1338 1339
1340 /* No sub channels, device is ready */
1341 if (net_device->num_chn == 1)
1342 netif_device_attach(net);
1343
1339 return net_device; 1344 return net_device;
1340 1345
1341err_dev_remv: 1346err_dev_remv:
@@ -1348,16 +1353,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
1348{ 1353{
1349 struct rndis_device *rndis_dev = net_dev->extension; 1354 struct rndis_device *rndis_dev = net_dev->extension;
1350 1355
1351 /* Don't try and setup sub channels if about to halt */
1352 cancel_work_sync(&net_dev->subchan_work);
1353
1354 /* Halt and release the rndis device */ 1356 /* Halt and release the rndis device */
1355 rndis_filter_halt_device(net_dev, rndis_dev); 1357 rndis_filter_halt_device(net_dev, rndis_dev);
1356 1358
1357 net_dev->extension = NULL; 1359 net_dev->extension = NULL;
1358 1360
1359 netvsc_device_remove(dev); 1361 netvsc_device_remove(dev);
1360 kfree(rndis_dev);
1361} 1362}
1362 1363
1363int rndis_filter_open(struct netvsc_device *nvdev) 1364int rndis_filter_open(struct netvsc_device *nvdev)
@@ -1375,10 +1376,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)
1375 1376
1376 return rndis_filter_close_device(nvdev->extension); 1377 return rndis_filter_close_device(nvdev->extension);
1377} 1378}
1378
1379bool rndis_filter_opened(const struct netvsc_device *nvdev)
1380{
1381 const struct rndis_device *dev = nvdev->extension;
1382
1383 return dev->state == RNDIS_DEV_DATAINITIALIZED;
1384}