diff options
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r-- | drivers/net/hyperv/hyperv_net.h | 24 | ||||
-rw-r--r-- | drivers/net/hyperv/netvsc.c | 19 | ||||
-rw-r--r-- | drivers/net/hyperv/netvsc_drv.c | 105 |
3 files changed, 66 insertions, 82 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 467fb8b4d083..591af71eae56 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -644,12 +644,6 @@ struct netvsc_reconfig { | |||
644 | u32 event; | 644 | u32 event; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | struct garp_wrk { | ||
648 | struct work_struct dwrk; | ||
649 | struct net_device *netdev; | ||
650 | struct netvsc_device *netvsc_dev; | ||
651 | }; | ||
652 | |||
653 | /* The context of the netvsc device */ | 647 | /* The context of the netvsc device */ |
654 | struct net_device_context { | 648 | struct net_device_context { |
655 | /* point back to our device context */ | 649 | /* point back to our device context */ |
@@ -667,7 +661,6 @@ struct net_device_context { | |||
667 | 661 | ||
668 | struct work_struct work; | 662 | struct work_struct work; |
669 | u32 msg_enable; /* debug level */ | 663 | u32 msg_enable; /* debug level */ |
670 | struct garp_wrk gwrk; | ||
671 | 664 | ||
672 | struct netvsc_stats __percpu *tx_stats; | 665 | struct netvsc_stats __percpu *tx_stats; |
673 | struct netvsc_stats __percpu *rx_stats; | 666 | struct netvsc_stats __percpu *rx_stats; |
@@ -678,6 +671,15 @@ struct net_device_context { | |||
678 | 671 | ||
679 | /* the device is going away */ | 672 | /* the device is going away */ |
680 | bool start_remove; | 673 | bool start_remove; |
674 | |||
675 | /* State to manage the associated VF interface. */ | ||
676 | struct net_device *vf_netdev; | ||
677 | bool vf_inject; | ||
678 | atomic_t vf_use_cnt; | ||
679 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
680 | u32 vf_alloc; | ||
681 | /* Serial number of the VF to team with */ | ||
682 | u32 vf_serial; | ||
681 | }; | 683 | }; |
682 | 684 | ||
683 | /* Per netvsc device */ | 685 | /* Per netvsc device */ |
@@ -733,15 +735,7 @@ struct netvsc_device { | |||
733 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ | 735 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ |
734 | u32 pkt_align; /* alignment bytes, e.g. 8 */ | 736 | u32 pkt_align; /* alignment bytes, e.g. 8 */ |
735 | 737 | ||
736 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
737 | u32 vf_alloc; | ||
738 | /* Serial number of the VF to team with */ | ||
739 | u32 vf_serial; | ||
740 | atomic_t open_cnt; | 738 | atomic_t open_cnt; |
741 | /* State to manage the associated VF interface. */ | ||
742 | bool vf_inject; | ||
743 | struct net_device *vf_netdev; | ||
744 | atomic_t vf_use_cnt; | ||
745 | }; | 739 | }; |
746 | 740 | ||
747 | static inline struct netvsc_device * | 741 | static inline struct netvsc_device * |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 20e09174ff62..410fb8e81376 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void) | |||
77 | init_waitqueue_head(&net_device->wait_drain); | 77 | init_waitqueue_head(&net_device->wait_drain); |
78 | net_device->destroy = false; | 78 | net_device->destroy = false; |
79 | atomic_set(&net_device->open_cnt, 0); | 79 | atomic_set(&net_device->open_cnt, 0); |
80 | atomic_set(&net_device->vf_use_cnt, 0); | ||
81 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 80 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
82 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 81 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
83 | 82 | ||
84 | net_device->vf_netdev = NULL; | ||
85 | net_device->vf_inject = false; | ||
86 | |||
87 | return net_device; | 83 | return net_device; |
88 | } | 84 | } |
89 | 85 | ||
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev, | |||
1106 | nvscdev->send_table[i] = tab[i]; | 1102 | nvscdev->send_table[i] = tab[i]; |
1107 | } | 1103 | } |
1108 | 1104 | ||
1109 | static void netvsc_send_vf(struct netvsc_device *nvdev, | 1105 | static void netvsc_send_vf(struct net_device_context *net_device_ctx, |
1110 | struct nvsp_message *nvmsg) | 1106 | struct nvsp_message *nvmsg) |
1111 | { | 1107 | { |
1112 | nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; | 1108 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1113 | nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | 1109 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | static inline void netvsc_receive_inband(struct hv_device *hdev, | 1112 | static inline void netvsc_receive_inband(struct hv_device *hdev, |
1117 | struct netvsc_device *nvdev, | 1113 | struct net_device_context *net_device_ctx, |
1118 | struct nvsp_message *nvmsg) | 1114 | struct nvsp_message *nvmsg) |
1119 | { | 1115 | { |
1120 | switch (nvmsg->hdr.msg_type) { | 1116 | switch (nvmsg->hdr.msg_type) { |
1121 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: | 1117 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: |
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, | |||
1123 | break; | 1119 | break; |
1124 | 1120 | ||
1125 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: | 1121 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: |
1126 | netvsc_send_vf(nvdev, nvmsg); | 1122 | netvsc_send_vf(net_device_ctx, nvmsg); |
1127 | break; | 1123 | break; |
1128 | } | 1124 | } |
1129 | } | 1125 | } |
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1136 | struct vmpacket_descriptor *desc) | 1132 | struct vmpacket_descriptor *desc) |
1137 | { | 1133 | { |
1138 | struct nvsp_message *nvmsg; | 1134 | struct nvsp_message *nvmsg; |
1135 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | ||
1139 | 1136 | ||
1140 | nvmsg = (struct nvsp_message *)((unsigned long) | 1137 | nvmsg = (struct nvsp_message *)((unsigned long) |
1141 | desc + (desc->offset8 << 3)); | 1138 | desc + (desc->offset8 << 3)); |
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1150 | break; | 1147 | break; |
1151 | 1148 | ||
1152 | case VM_PKT_DATA_INBAND: | 1149 | case VM_PKT_DATA_INBAND: |
1153 | netvsc_receive_inband(device, net_device, nvmsg); | 1150 | netvsc_receive_inband(device, net_device_ctx, nvmsg); |
1154 | break; | 1151 | break; |
1155 | 1152 | ||
1156 | default: | 1153 | default: |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 41bd952cc28d..3ba29fc80d05 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
658 | struct sk_buff *skb; | 658 | struct sk_buff *skb; |
659 | struct sk_buff *vf_skb; | 659 | struct sk_buff *vf_skb; |
660 | struct netvsc_stats *rx_stats; | 660 | struct netvsc_stats *rx_stats; |
661 | struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; | ||
662 | u32 bytes_recvd = packet->total_data_buflen; | 661 | u32 bytes_recvd = packet->total_data_buflen; |
663 | int ret = 0; | 662 | int ret = 0; |
664 | 663 | ||
665 | if (!net || net->reg_state != NETREG_REGISTERED) | 664 | if (!net || net->reg_state != NETREG_REGISTERED) |
666 | return NVSP_STAT_FAIL; | 665 | return NVSP_STAT_FAIL; |
667 | 666 | ||
668 | if (READ_ONCE(netvsc_dev->vf_inject)) { | 667 | if (READ_ONCE(net_device_ctx->vf_inject)) { |
669 | atomic_inc(&netvsc_dev->vf_use_cnt); | 668 | atomic_inc(&net_device_ctx->vf_use_cnt); |
670 | if (!READ_ONCE(netvsc_dev->vf_inject)) { | 669 | if (!READ_ONCE(net_device_ctx->vf_inject)) { |
671 | /* | 670 | /* |
672 | * We raced; just move on. | 671 | * We raced; just move on. |
673 | */ | 672 | */ |
674 | atomic_dec(&netvsc_dev->vf_use_cnt); | 673 | atomic_dec(&net_device_ctx->vf_use_cnt); |
675 | goto vf_injection_done; | 674 | goto vf_injection_done; |
676 | } | 675 | } |
677 | 676 | ||
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
683 | * the host). Deliver these via the VF interface | 682 | * the host). Deliver these via the VF interface |
684 | * in the guest. | 683 | * in the guest. |
685 | */ | 684 | */ |
686 | vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, | 685 | vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev, |
687 | csum_info, *data, vlan_tci); | 686 | packet, csum_info, *data, |
687 | vlan_tci); | ||
688 | if (vf_skb != NULL) { | 688 | if (vf_skb != NULL) { |
689 | ++netvsc_dev->vf_netdev->stats.rx_packets; | 689 | ++net_device_ctx->vf_netdev->stats.rx_packets; |
690 | netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; | 690 | net_device_ctx->vf_netdev->stats.rx_bytes += |
691 | bytes_recvd; | ||
691 | netif_receive_skb(vf_skb); | 692 | netif_receive_skb(vf_skb); |
692 | } else { | 693 | } else { |
693 | ++net->stats.rx_dropped; | 694 | ++net->stats.rx_dropped; |
694 | ret = NVSP_STAT_FAIL; | 695 | ret = NVSP_STAT_FAIL; |
695 | } | 696 | } |
696 | atomic_dec(&netvsc_dev->vf_use_cnt); | 697 | atomic_dec(&net_device_ctx->vf_use_cnt); |
697 | return ret; | 698 | return ret; |
698 | } | 699 | } |
699 | 700 | ||
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev) | |||
1150 | free_netdev(netdev); | 1151 | free_netdev(netdev); |
1151 | } | 1152 | } |
1152 | 1153 | ||
1153 | static void netvsc_notify_peers(struct work_struct *wrk) | ||
1154 | { | ||
1155 | struct garp_wrk *gwrk; | ||
1156 | |||
1157 | gwrk = container_of(wrk, struct garp_wrk, dwrk); | ||
1158 | |||
1159 | netdev_notify_peers(gwrk->netdev); | ||
1160 | |||
1161 | atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); | ||
1162 | } | ||
1163 | |||
1164 | static struct net_device *get_netvsc_net_device(char *mac) | 1154 | static struct net_device *get_netvsc_net_device(char *mac) |
1165 | { | 1155 | { |
1166 | struct net_device *dev, *found = NULL; | 1156 | struct net_device *dev, *found = NULL; |
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1203 | 1193 | ||
1204 | net_device_ctx = netdev_priv(ndev); | 1194 | net_device_ctx = netdev_priv(ndev); |
1205 | netvsc_dev = net_device_ctx->nvdev; | 1195 | netvsc_dev = net_device_ctx->nvdev; |
1206 | if (netvsc_dev == NULL) | 1196 | if (!netvsc_dev || net_device_ctx->vf_netdev) |
1207 | return NOTIFY_DONE; | 1197 | return NOTIFY_DONE; |
1208 | 1198 | ||
1209 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); | 1199 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); |
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1211 | * Take a reference on the module. | 1201 | * Take a reference on the module. |
1212 | */ | 1202 | */ |
1213 | try_module_get(THIS_MODULE); | 1203 | try_module_get(THIS_MODULE); |
1214 | netvsc_dev->vf_netdev = vf_netdev; | 1204 | net_device_ctx->vf_netdev = vf_netdev; |
1215 | return NOTIFY_OK; | 1205 | return NOTIFY_OK; |
1216 | } | 1206 | } |
1217 | 1207 | ||
1208 | static void netvsc_inject_enable(struct net_device_context *net_device_ctx) | ||
1209 | { | ||
1210 | net_device_ctx->vf_inject = true; | ||
1211 | } | ||
1212 | |||
1213 | static void netvsc_inject_disable(struct net_device_context *net_device_ctx) | ||
1214 | { | ||
1215 | net_device_ctx->vf_inject = false; | ||
1216 | |||
1217 | /* Wait for currently active users to drain out. */ | ||
1218 | while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) | ||
1219 | udelay(50); | ||
1220 | } | ||
1218 | 1221 | ||
1219 | static int netvsc_vf_up(struct net_device *vf_netdev) | 1222 | static int netvsc_vf_up(struct net_device *vf_netdev) |
1220 | { | 1223 | { |
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1233 | net_device_ctx = netdev_priv(ndev); | 1236 | net_device_ctx = netdev_priv(ndev); |
1234 | netvsc_dev = net_device_ctx->nvdev; | 1237 | netvsc_dev = net_device_ctx->nvdev; |
1235 | 1238 | ||
1236 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1239 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1237 | return NOTIFY_DONE; | 1240 | return NOTIFY_DONE; |
1238 | 1241 | ||
1239 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); | 1242 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); |
1240 | netvsc_dev->vf_inject = true; | 1243 | netvsc_inject_enable(net_device_ctx); |
1241 | 1244 | ||
1242 | /* | 1245 | /* |
1243 | * Open the device before switching data path. | 1246 | * Open the device before switching data path. |
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1252 | 1255 | ||
1253 | netif_carrier_off(ndev); | 1256 | netif_carrier_off(ndev); |
1254 | 1257 | ||
1255 | /* | 1258 | /* Now notify peers through VF device. */ |
1256 | * Now notify peers. We are scheduling work to | 1259 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); |
1257 | * notify peers; take a reference to prevent | ||
1258 | * the VF interface from vanishing. | ||
1259 | */ | ||
1260 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1261 | net_device_ctx->gwrk.netdev = vf_netdev; | ||
1262 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1263 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1264 | 1260 | ||
1265 | return NOTIFY_OK; | 1261 | return NOTIFY_OK; |
1266 | } | 1262 | } |
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev) | |||
1283 | net_device_ctx = netdev_priv(ndev); | 1279 | net_device_ctx = netdev_priv(ndev); |
1284 | netvsc_dev = net_device_ctx->nvdev; | 1280 | netvsc_dev = net_device_ctx->nvdev; |
1285 | 1281 | ||
1286 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1282 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1287 | return NOTIFY_DONE; | 1283 | return NOTIFY_DONE; |
1288 | 1284 | ||
1289 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); | 1285 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); |
1290 | netvsc_dev->vf_inject = false; | 1286 | netvsc_inject_disable(net_device_ctx); |
1291 | /* | ||
1292 | * Wait for currently active users to | ||
1293 | * drain out. | ||
1294 | */ | ||
1295 | |||
1296 | while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) | ||
1297 | udelay(50); | ||
1298 | netvsc_switch_datapath(ndev, false); | 1287 | netvsc_switch_datapath(ndev, false); |
1299 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); | 1288 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); |
1300 | rndis_filter_close(netvsc_dev); | 1289 | rndis_filter_close(netvsc_dev); |
1301 | netif_carrier_on(ndev); | 1290 | netif_carrier_on(ndev); |
1302 | /* | 1291 | |
1303 | * Notify peers. | 1292 | /* Now notify peers through netvsc device. */ |
1304 | */ | 1293 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); |
1305 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1306 | net_device_ctx->gwrk.netdev = ndev; | ||
1307 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1308 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1309 | 1294 | ||
1310 | return NOTIFY_OK; | 1295 | return NOTIFY_OK; |
1311 | } | 1296 | } |
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) | |||
1327 | 1312 | ||
1328 | net_device_ctx = netdev_priv(ndev); | 1313 | net_device_ctx = netdev_priv(ndev); |
1329 | netvsc_dev = net_device_ctx->nvdev; | 1314 | netvsc_dev = net_device_ctx->nvdev; |
1330 | if (netvsc_dev == NULL) | 1315 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1331 | return NOTIFY_DONE; | 1316 | return NOTIFY_DONE; |
1332 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); | 1317 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); |
1333 | 1318 | netvsc_inject_disable(net_device_ctx); | |
1334 | netvsc_dev->vf_netdev = NULL; | 1319 | net_device_ctx->vf_netdev = NULL; |
1335 | module_put(THIS_MODULE); | 1320 | module_put(THIS_MODULE); |
1336 | return NOTIFY_OK; | 1321 | return NOTIFY_OK; |
1337 | } | 1322 | } |
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev, | |||
1377 | 1362 | ||
1378 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 1363 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
1379 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 1364 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
1380 | INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); | ||
1381 | 1365 | ||
1382 | spin_lock_init(&net_device_ctx->lock); | 1366 | spin_lock_init(&net_device_ctx->lock); |
1383 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); | 1367 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
1384 | 1368 | ||
1369 | atomic_set(&net_device_ctx->vf_use_cnt, 0); | ||
1370 | net_device_ctx->vf_netdev = NULL; | ||
1371 | net_device_ctx->vf_inject = false; | ||
1372 | |||
1385 | net->netdev_ops = &device_ops; | 1373 | net->netdev_ops = &device_ops; |
1386 | 1374 | ||
1387 | net->hw_features = NETVSC_HW_FEATURES; | 1375 | net->hw_features = NETVSC_HW_FEATURES; |
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this, | |||
1494 | { | 1482 | { |
1495 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); | 1483 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); |
1496 | 1484 | ||
1497 | /* Avoid Vlan, Bonding dev with same MAC registering as VF */ | 1485 | /* Avoid Vlan dev with same MAC registering as VF */ |
1498 | if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) | 1486 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) |
1487 | return NOTIFY_DONE; | ||
1488 | |||
1489 | /* Avoid Bonding master dev with same MAC registering as VF */ | ||
1490 | if (event_dev->priv_flags & IFF_BONDING && | ||
1491 | event_dev->flags & IFF_MASTER) | ||
1499 | return NOTIFY_DONE; | 1492 | return NOTIFY_DONE; |
1500 | 1493 | ||
1501 | switch (event) { | 1494 | switch (event) { |