aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVadim Lomovtsev <vlomovtsev@marvell.com>2019-02-20 06:02:43 -0500
committerDavid S. Miller <davem@davemloft.net>2019-02-22 14:43:44 -0500
commit2ecbe4f4a027890a5d74a5100075aa6a373bea2c (patch)
treebef3411a1e1ffb00811d7c498f0090286b80a59d
parentf6d25aca1ba3f46b76dabf6023a0dc2062dc792e (diff)
net: thunderx: replace global nicvf_rx_mode_wq work queue for all VFs to private for each of them.
Having one work queue for receive mode configuration ndo_set_rx_mode() call for all VFs results in making each of them wait till the set_rx_mode() call completes for another VF if any of close, set receive mode and change flags calls being already invoked. Potentially this could cause device state change before appropriate call of receive mode configuration completes, so the call itself became meaningless, corrupt data or break configuration sequence. We don't need any delays in NIC VF configuration sequence so having delayed work call with 0 delay has no sense. This commit is to implement one work queue for each NIC VF for set_rx_mode task and to let them work independently and replacing delayed_work with work_struct. Signed-off-by: Vadim Lomovtsev <vlomovtsev@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c30
2 files changed, 19 insertions, 15 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..376a96bce33f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
271}; 271};
272 272
273struct nicvf_work { 273struct nicvf_work {
274 struct delayed_work work; 274 struct work_struct work;
275 u8 mode; 275 u8 mode;
276 struct xcast_addr_list *mc; 276 struct xcast_addr_list *mc;
277}; 277};
@@ -327,6 +327,8 @@ struct nicvf {
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */ 328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock; 329 spinlock_t rx_mode_wq_lock;
330 /* workqueue for handling kernel ndo_set_rx_mode() calls */
331 struct workqueue_struct *nicvf_rx_mode_wq;
330 332
331 /* PTP timestamp */ 333 /* PTP timestamp */
332 struct cavium_ptp *ptp_clock; 334 struct cavium_ptp *ptp_clock;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..abf24e7dff2d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
68MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
70 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 71static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
75{ 72{
76 if (nic->sqs_mode) 73 if (nic->sqs_mode)
@@ -1311,6 +1308,9 @@ int nicvf_stop(struct net_device *netdev)
1311 struct nicvf_cq_poll *cq_poll = NULL; 1308 struct nicvf_cq_poll *cq_poll = NULL;
1312 union nic_mbx mbx = {}; 1309 union nic_mbx mbx = {};
1313 1310
1311 /* wait till all queued set_rx_mode tasks completes */
1312 drain_workqueue(nic->nicvf_rx_mode_wq);
1313
1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315 nicvf_send_msg_to_pf(nic, &mbx); 1315 nicvf_send_msg_to_pf(nic, &mbx);
1316 1316
@@ -1418,6 +1418,9 @@ int nicvf_open(struct net_device *netdev)
1418 struct nicvf_cq_poll *cq_poll = NULL; 1418 struct nicvf_cq_poll *cq_poll = NULL;
1419 union nic_mbx mbx = {}; 1419 union nic_mbx mbx = {};
1420 1420
1421 /* wait till all queued set_rx_mode tasks completes if any */
1422 drain_workqueue(nic->nicvf_rx_mode_wq);
1423
1421 netif_carrier_off(netdev); 1424 netif_carrier_off(netdev);
1422 1425
1423 err = nicvf_register_misc_interrupt(nic); 1426 err = nicvf_register_misc_interrupt(nic);
@@ -1973,7 +1976,7 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1973static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 1976static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1974{ 1977{
1975 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, 1978 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1976 work.work); 1979 work);
1977 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 1980 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1978 u8 mode; 1981 u8 mode;
1979 struct xcast_addr_list *mc; 1982 struct xcast_addr_list *mc;
@@ -2030,7 +2033,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2030 kfree(nic->rx_mode_work.mc); 2033 kfree(nic->rx_mode_work.mc);
2031 nic->rx_mode_work.mc = mc_list; 2034 nic->rx_mode_work.mc = mc_list;
2032 nic->rx_mode_work.mode = mode; 2035 nic->rx_mode_work.mode = mode;
2033 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); 2036 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2034 spin_unlock(&nic->rx_mode_wq_lock); 2037 spin_unlock(&nic->rx_mode_wq_lock);
2035} 2038}
2036 2039
@@ -2187,7 +2190,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2187 2190
2188 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2191 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2189 2192
2190 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2193 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2194 WQ_MEM_RECLAIM,
2195 nic->vf_id);
2196 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2191 spin_lock_init(&nic->rx_mode_wq_lock); 2197 spin_lock_init(&nic->rx_mode_wq_lock);
2192 2198
2193 err = register_netdev(netdev); 2199 err = register_netdev(netdev);
@@ -2228,13 +2234,15 @@ static void nicvf_remove(struct pci_dev *pdev)
2228 nic = netdev_priv(netdev); 2234 nic = netdev_priv(netdev);
2229 pnetdev = nic->pnicvf->netdev; 2235 pnetdev = nic->pnicvf->netdev;
2230 2236
2231 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2232
2233 /* Check if this Qset is assigned to different VF. 2237 /* Check if this Qset is assigned to different VF.
2234 * If yes, clean primary and all secondary Qsets. 2238 * If yes, clean primary and all secondary Qsets.
2235 */ 2239 */
2236 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 2240 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2237 unregister_netdev(pnetdev); 2241 unregister_netdev(pnetdev);
2242 if (nic->nicvf_rx_mode_wq) {
2243 destroy_workqueue(nic->nicvf_rx_mode_wq);
2244 nic->nicvf_rx_mode_wq = NULL;
2245 }
2238 nicvf_unregister_interrupts(nic); 2246 nicvf_unregister_interrupts(nic);
2239 pci_set_drvdata(pdev, NULL); 2247 pci_set_drvdata(pdev, NULL);
2240 if (nic->drv_stats) 2248 if (nic->drv_stats)
@@ -2261,17 +2269,11 @@ static struct pci_driver nicvf_driver = {
2261static int __init nicvf_init_module(void) 2269static int __init nicvf_init_module(void)
2262{ 2270{
2263 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2271 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2264 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2265 WQ_MEM_RECLAIM);
2266 return pci_register_driver(&nicvf_driver); 2272 return pci_register_driver(&nicvf_driver);
2267} 2273}
2268 2274
2269static void __exit nicvf_cleanup_module(void) 2275static void __exit nicvf_cleanup_module(void)
2270{ 2276{
2271 if (nicvf_rx_mode_wq) {
2272 destroy_workqueue(nicvf_rx_mode_wq);
2273 nicvf_rx_mode_wq = NULL;
2274 }
2275 pci_unregister_driver(&nicvf_driver); 2277 pci_unregister_driver(&nicvf_driver);
2276} 2278}
2277 2279