aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuval Mintz <Yuval.Mintz@qlogic.com>2014-03-23 12:12:24 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-25 21:07:03 -0400
commit370d4a26590fcc7510ad4a8432e4982a209f1b59 (patch)
treec6047611d7d2661fd10e0117411c4762336a3b26
parent42f8277f56cf4a9570b1f0fe10a4fec3f48c832a (diff)
bnx2x: Create workqueue for IOV related tasks
The bnx2x sriov mechanisms were done in the bnx2x slowpath workitem which runs on the bnx2x's workqueue; This workitem is also responsible for the bottom half of interrupt handling in the driver, and specifically it also receives FW notifications of ramrod completions, allowing other flows to progress. The original design of the sriov reltaed-flows was based on the notion such flows must not sleep, since their context is the slowpath workitem. Otherwise, we might reach timeouts - those flows may wait for ramrod completion that will never arrive as the workitem wlll not be re-scheduled until that same flow will be over. In more recent time bnx2x started supporting features in which the VF interface can be configured by the tools accessing the PF on the hypervisor. This support created possible races on the VF-PF lock (which is taken either when the PF is handling a VF message or when the PF is doing some slowpath work on behalf of the VF) which may cause timeouts on the VF side and lags on the PF side. This patch changes the scheme - it creates a new workqueue for sriov related tasks and moves all handling currently done in the slowpath task into the the new workqueue. Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com> Signed-off-by: Ariel Elior <Ariel.Elior@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c103
5 files changed, 147 insertions, 78 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index f33fab6abb95..8e35dbaca76e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1155,10 +1155,6 @@ struct bnx2x_port {
1155 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 1155 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
1156 1156
1157/* slow path */ 1157/* slow path */
1158
1159/* slow path work-queue */
1160extern struct workqueue_struct *bnx2x_wq;
1161
1162#define BNX2X_MAX_NUM_OF_VFS 64 1158#define BNX2X_MAX_NUM_OF_VFS 64
1163#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ 1159#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
1164#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) 1160#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
@@ -1416,6 +1412,12 @@ enum sp_rtnl_flag {
1416 BNX2X_SP_RTNL_GET_DRV_VERSION, 1412 BNX2X_SP_RTNL_GET_DRV_VERSION,
1417}; 1413};
1418 1414
1415enum bnx2x_iov_flag {
1416 BNX2X_IOV_HANDLE_VF_MSG,
1417 BNX2X_IOV_CONT_VFOP,
1418 BNX2X_IOV_HANDLE_FLR,
1419};
1420
1419struct bnx2x_prev_path_list { 1421struct bnx2x_prev_path_list {
1420 struct list_head list; 1422 struct list_head list;
1421 u8 bus; 1423 u8 bus;
@@ -1614,6 +1616,8 @@ struct bnx2x {
1614 int mrrs; 1616 int mrrs;
1615 1617
1616 struct delayed_work sp_task; 1618 struct delayed_work sp_task;
1619 struct delayed_work iov_task;
1620
1617 atomic_t interrupt_occurred; 1621 atomic_t interrupt_occurred;
1618 struct delayed_work sp_rtnl_task; 1622 struct delayed_work sp_rtnl_task;
1619 1623
@@ -1897,6 +1901,9 @@ struct bnx2x {
1897 /* operation indication for the sp_rtnl task */ 1901 /* operation indication for the sp_rtnl task */
1898 unsigned long sp_rtnl_state; 1902 unsigned long sp_rtnl_state;
1899 1903
1904 /* Indication of the IOV tasks */
1905 unsigned long iov_task_state;
1906
1900 /* DCBX Negotiation results */ 1907 /* DCBX Negotiation results */
1901 struct dcbx_features dcbx_local_feat; 1908 struct dcbx_features dcbx_local_feat;
1902 u32 dcbx_error; 1909 u32 dcbx_error;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index faef7b19a529..b5c7f77e8108 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -120,7 +120,8 @@ static int debug;
120module_param(debug, int, S_IRUGO); 120module_param(debug, int, S_IRUGO);
121MODULE_PARM_DESC(debug, " Default debug msglevel"); 121MODULE_PARM_DESC(debug, " Default debug msglevel");
122 122
123struct workqueue_struct *bnx2x_wq; 123static struct workqueue_struct *bnx2x_wq;
124struct workqueue_struct *bnx2x_iov_wq;
124 125
125struct bnx2x_mac_vals { 126struct bnx2x_mac_vals {
126 u32 xmac_addr; 127 u32 xmac_addr;
@@ -1857,7 +1858,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1857 return; 1858 return;
1858#endif 1859#endif
1859 /* SRIOV: reschedule any 'in_progress' operations */ 1860 /* SRIOV: reschedule any 'in_progress' operations */
1860 bnx2x_iov_sp_event(bp, cid, true); 1861 bnx2x_iov_sp_event(bp, cid);
1861 1862
1862 smp_mb__before_atomic_inc(); 1863 smp_mb__before_atomic_inc();
1863 atomic_inc(&bp->cq_spq_left); 1864 atomic_inc(&bp->cq_spq_left);
@@ -4160,7 +4161,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4160 bnx2x_handle_drv_info_req(bp); 4161 bnx2x_handle_drv_info_req(bp);
4161 4162
4162 if (val & DRV_STATUS_VF_DISABLED) 4163 if (val & DRV_STATUS_VF_DISABLED)
4163 bnx2x_vf_handle_flr_event(bp); 4164 bnx2x_schedule_iov_task(bp,
4165 BNX2X_IOV_HANDLE_FLR);
4164 4166
4165 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 4167 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4166 bnx2x_pmf_update(bp); 4168 bnx2x_pmf_update(bp);
@@ -5351,8 +5353,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5351 /* handle eq element */ 5353 /* handle eq element */
5352 switch (opcode) { 5354 switch (opcode) {
5353 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5355 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5354 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); 5356 bnx2x_vf_mbx_schedule(bp,
5355 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); 5357 &elem->message.data.vf_pf_event);
5356 continue; 5358 continue;
5357 5359
5358 case EVENT_RING_OPCODE_STAT_QUERY: 5360 case EVENT_RING_OPCODE_STAT_QUERY:
@@ -5567,13 +5569,6 @@ static void bnx2x_sp_task(struct work_struct *work)
5567 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5569 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5568 } 5570 }
5569 5571
5570 /* must be called after the EQ processing (since eq leads to sriov
5571 * ramrod completion flows).
5572 * This flow may have been scheduled by the arrival of a ramrod
5573 * completion, or by the sriov code rescheduling itself.
5574 */
5575 bnx2x_iov_sp_task(bp);
5576
5577 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5572 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5578 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5573 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5579 &bp->sp_state)) { 5574 &bp->sp_state)) {
@@ -8990,6 +8985,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
8990 synchronize_irq(bp->pdev->irq); 8985 synchronize_irq(bp->pdev->irq);
8991 8986
8992 flush_workqueue(bnx2x_wq); 8987 flush_workqueue(bnx2x_wq);
8988 flush_workqueue(bnx2x_iov_wq);
8993 8989
8994 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8990 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8995 BNX2X_F_STATE_STARTED && tout--) 8991 BNX2X_F_STATE_STARTED && tout--)
@@ -11877,6 +11873,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11877 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11873 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11878 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11874 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11879 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 11875 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11876 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
11880 if (IS_PF(bp)) { 11877 if (IS_PF(bp)) {
11881 rc = bnx2x_get_hwinfo(bp); 11878 rc = bnx2x_get_hwinfo(bp);
11882 if (rc) 11879 if (rc)
@@ -13499,11 +13496,18 @@ static int __init bnx2x_init(void)
13499 pr_err("Cannot create workqueue\n"); 13496 pr_err("Cannot create workqueue\n");
13500 return -ENOMEM; 13497 return -ENOMEM;
13501 } 13498 }
13499 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
13500 if (!bnx2x_iov_wq) {
13501 pr_err("Cannot create iov workqueue\n");
13502 destroy_workqueue(bnx2x_wq);
13503 return -ENOMEM;
13504 }
13502 13505
13503 ret = pci_register_driver(&bnx2x_pci_driver); 13506 ret = pci_register_driver(&bnx2x_pci_driver);
13504 if (ret) { 13507 if (ret) {
13505 pr_err("Cannot register driver\n"); 13508 pr_err("Cannot register driver\n");
13506 destroy_workqueue(bnx2x_wq); 13509 destroy_workqueue(bnx2x_wq);
13510 destroy_workqueue(bnx2x_iov_wq);
13507 } 13511 }
13508 return ret; 13512 return ret;
13509} 13513}
@@ -13515,6 +13519,7 @@ static void __exit bnx2x_cleanup(void)
13515 pci_unregister_driver(&bnx2x_pci_driver); 13519 pci_unregister_driver(&bnx2x_pci_driver);
13516 13520
13517 destroy_workqueue(bnx2x_wq); 13521 destroy_workqueue(bnx2x_wq);
13522 destroy_workqueue(bnx2x_iov_wq);
13518 13523
13519 /* Free globally allocated resources */ 13524 /* Free globally allocated resources */
13520 list_for_each_safe(pos, q, &bnx2x_prev_list) { 13525 list_for_each_safe(pos, q, &bnx2x_prev_list) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 61e6f606d8a4..8e2b191234f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2042,6 +2042,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
2042 goto failed; 2042 goto failed;
2043 } 2043 }
2044 2044
2045 /* Prepare the VFs event synchronization mechanism */
2046 mutex_init(&bp->vfdb->event_mutex);
2047
2045 return 0; 2048 return 0;
2046failed: 2049failed:
2047 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2050 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -2469,7 +2472,7 @@ get_vf:
2469 return 0; 2472 return 0;
2470 } 2473 }
2471 /* SRIOV: reschedule any 'in_progress' operations */ 2474 /* SRIOV: reschedule any 'in_progress' operations */
2472 bnx2x_iov_sp_event(bp, cid, false); 2475 bnx2x_iov_sp_event(bp, cid);
2473 2476
2474 return 0; 2477 return 0;
2475} 2478}
@@ -2506,7 +2509,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2506 } 2509 }
2507} 2510}
2508 2511
2509void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2512void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid)
2510{ 2513{
2511 struct bnx2x_virtf *vf; 2514 struct bnx2x_virtf *vf;
2512 2515
@@ -2518,8 +2521,7 @@ void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2518 if (vf) { 2521 if (vf) {
2519 /* set in_progress flag */ 2522 /* set in_progress flag */
2520 atomic_set(&vf->op_in_progress, 1); 2523 atomic_set(&vf->op_in_progress, 1);
2521 if (queue_work) 2524 bnx2x_schedule_iov_task(bp, BNX2X_IOV_CONT_VFOP);
2522 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2523 } 2525 }
2524} 2526}
2525 2527
@@ -2604,7 +2606,7 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2604 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2606 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2605} 2607}
2606 2608
2607void bnx2x_iov_sp_task(struct bnx2x *bp) 2609void bnx2x_iov_vfop_cont(struct bnx2x *bp)
2608{ 2610{
2609 int i; 2611 int i;
2610 2612
@@ -3875,3 +3877,32 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
3875 bnx2x_post_vf_bulletin(bp, vf_idx); 3877 bnx2x_post_vf_bulletin(bp, vf_idx);
3876 } 3878 }
3877} 3879}
3880
3881void bnx2x_iov_task(struct work_struct *work)
3882{
3883 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3884
3885 if (!netif_running(bp->dev))
3886 return;
3887
3888 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3889 &bp->iov_task_state))
3890 bnx2x_vf_handle_flr_event(bp);
3891
3892 if (test_and_clear_bit(BNX2X_IOV_CONT_VFOP,
3893 &bp->iov_task_state))
3894 bnx2x_iov_vfop_cont(bp);
3895
3896 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3897 &bp->iov_task_state))
3898 bnx2x_vf_mbx(bp);
3899}
3900
3901void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3902{
3903 smp_mb__before_clear_bit();
3904 set_bit(flag, &bp->iov_task_state);
3905 smp_mb__after_clear_bit();
3906 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3907 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3908}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b1dc751c6175..87f7c9743f71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -30,6 +30,8 @@ enum sample_bulletin_result {
30 30
31#ifdef CONFIG_BNX2X_SRIOV 31#ifdef CONFIG_BNX2X_SRIOV
32 32
33extern struct workqueue_struct *bnx2x_iov_wq;
34
33/* The bnx2x device structure holds vfdb structure described below. 35/* The bnx2x device structure holds vfdb structure described below.
34 * The VF array is indexed by the relative vfid. 36 * The VF array is indexed by the relative vfid.
35 */ 37 */
@@ -346,11 +348,6 @@ struct bnx2x_vf_mbx {
346 u32 vf_addr_hi; 348 u32 vf_addr_hi;
347 349
348 struct vfpf_first_tlv first_tlv; /* saved VF request header */ 350 struct vfpf_first_tlv first_tlv; /* saved VF request header */
349
350 u8 flags;
351#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
352 * more then one pending msg
353 */
354}; 351};
355 352
356struct bnx2x_vf_sp { 353struct bnx2x_vf_sp {
@@ -427,6 +424,10 @@ struct bnx2x_vfdb {
427 /* the number of msix vectors belonging to this PF designated for VFs */ 424 /* the number of msix vectors belonging to this PF designated for VFs */
428 u16 vf_sbs_pool; 425 u16 vf_sbs_pool;
429 u16 first_vf_igu_entry; 426 u16 first_vf_igu_entry;
427
428 /* sp_rtnl synchronization */
429 struct mutex event_mutex;
430 u64 event_occur;
430}; 431};
431 432
432/* queue access */ 433/* queue access */
@@ -476,13 +477,14 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
476void bnx2x_iov_init_dmae(struct bnx2x *bp); 477void bnx2x_iov_init_dmae(struct bnx2x *bp);
477void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 478void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
478 struct bnx2x_queue_sp_obj **q_obj); 479 struct bnx2x_queue_sp_obj **q_obj);
479void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); 480void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid);
480int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); 481int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
481void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); 482void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
482void bnx2x_iov_storm_stats_update(struct bnx2x *bp); 483void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
483void bnx2x_iov_sp_task(struct bnx2x *bp);
484/* global vf mailbox routines */ 484/* global vf mailbox routines */
485void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); 485void bnx2x_vf_mbx(struct bnx2x *bp);
486void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
487 struct vf_pf_event_data *vfpf_event);
486void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); 488void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
487 489
488/* CORE VF API */ 490/* CORE VF API */
@@ -520,7 +522,8 @@ enum {
520 else { \ 522 else { \
521 DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ 523 DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
522 atomic_set(&vf->op_in_progress, 1); \ 524 atomic_set(&vf->op_in_progress, 1); \
523 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ 525 bnx2x_schedule_iov_task(bp, \
526 BNX2X_IOV_CONT_VFOP); \
524 return; \ 527 return; \
525 } \ 528 } \
526 } while (0) 529 } while (0)
@@ -785,18 +788,21 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
785int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 788int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
786void bnx2x_iov_channel_down(struct bnx2x *bp); 789void bnx2x_iov_channel_down(struct bnx2x *bp);
787 790
791void bnx2x_iov_task(struct work_struct *work);
792
793void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
794
788#else /* CONFIG_BNX2X_SRIOV */ 795#else /* CONFIG_BNX2X_SRIOV */
789 796
790static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 797static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
791 struct bnx2x_queue_sp_obj **q_obj) {} 798 struct bnx2x_queue_sp_obj **q_obj) {}
792static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, 799static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) {}
793 bool queue_work) {}
794static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} 800static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
795static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, 801static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
796 union event_ring_elem *elem) {return 1; } 802 union event_ring_elem *elem) {return 1; }
797static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} 803static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
798static inline void bnx2x_vf_mbx(struct bnx2x *bp, 804static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
799 struct vf_pf_event_data *vfpf_event) {} 805 struct vf_pf_event_data *vfpf_event) {}
800static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } 806static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
801static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} 807static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
802static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } 808static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@@ -843,5 +849,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
843static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 849static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
844static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} 850static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
845 851
852static inline void bnx2x_iov_task(struct work_struct *work) {}
853void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
854
846#endif /* CONFIG_BNX2X_SRIOV */ 855#endif /* CONFIG_BNX2X_SRIOV */
847#endif /* bnx2x_sriov.h */ 856#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 1117ed7776b6..63c95658ba60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1089,9 +1089,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1089 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1089 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1090 mmiowb(); 1090 mmiowb();
1091 1091
1092 /* initiate dmae to send the response */
1093 mbx->flags &= ~VF_MSG_INPROCESS;
1094
1095 /* copy the response header including status-done field, 1092 /* copy the response header including status-done field,
1096 * must be last dmae, must be after FW is acked 1093 * must be last dmae, must be after FW is acked
1097 */ 1094 */
@@ -2059,13 +2056,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
2059 } 2056 }
2060} 2057}
2061 2058
2062/* handle new vf-pf message */ 2059void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2063void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) 2060 struct vf_pf_event_data *vfpf_event)
2064{ 2061{
2065 struct bnx2x_virtf *vf;
2066 struct bnx2x_vf_mbx *mbx;
2067 u8 vf_idx; 2062 u8 vf_idx;
2068 int rc;
2069 2063
2070 DP(BNX2X_MSG_IOV, 2064 DP(BNX2X_MSG_IOV,
2071 "vf pf event received: vfid %d, address_hi %x, address lo %x", 2065 "vf pf event received: vfid %d, address_hi %x, address lo %x",
@@ -2077,50 +2071,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
2077 BNX2X_NR_VIRTFN(bp)) { 2071 BNX2X_NR_VIRTFN(bp)) {
2078 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", 2072 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
2079 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); 2073 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
2080 goto mbx_done; 2074 return;
2081 } 2075 }
2076
2082 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); 2077 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
2083 mbx = BP_VF_MBX(bp, vf_idx);
2084 2078
2085 /* verify an event is not currently being processed - 2079 /* Update VFDB with current message and schedule its handling */
2086 * debug failsafe only 2080 mutex_lock(&BP_VFDB(bp)->event_mutex);
2087 */ 2081 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
2088 if (mbx->flags & VF_MSG_INPROCESS) { 2082 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
2089 BNX2X_ERR("Previous message is still being processed, vf_id %d\n", 2083 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2090 vfpf_event->vf_id); 2084 mutex_unlock(&BP_VFDB(bp)->event_mutex);
2091 goto mbx_done;
2092 }
2093 vf = BP_VF(bp, vf_idx);
2094 2085
2095 /* save the VF message address */ 2086 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
2096 mbx->vf_addr_hi = vfpf_event->msg_addr_hi; 2087}
2097 mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
2098 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
2099 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
2100 2088
2101 /* dmae to get the VF request */ 2089/* handle new vf-pf messages */
2102 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, 2090void bnx2x_vf_mbx(struct bnx2x *bp)
2103 mbx->vf_addr_hi, mbx->vf_addr_lo, 2091{
2104 sizeof(union vfpf_tlvs)/4); 2092 struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
2105 if (rc) { 2093 u64 events;
2106 BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); 2094 u8 vf_idx;
2107 goto mbx_error; 2095 int rc;
2108 }
2109 2096
2110 /* process the VF message header */ 2097 if (!vfdb)
2111 mbx->first_tlv = mbx->msg->req.first_tlv; 2098 return;
2112 2099
2113 /* Clean response buffer to refrain from falsely seeing chains */ 2100 mutex_lock(&vfdb->event_mutex);
2114 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); 2101 events = vfdb->event_occur;
2102 vfdb->event_occur = 0;
2103 mutex_unlock(&vfdb->event_mutex);
2115 2104
2116 /* dispatch the request (will prepare the response) */ 2105 for_each_vf(bp, vf_idx) {
2117 bnx2x_vf_mbx_request(bp, vf, mbx); 2106 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
2118 goto mbx_done; 2107 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2119 2108
2120mbx_error: 2109 /* Handle VFs which have pending events */
2121 bnx2x_vf_release(bp, vf, false); /* non blocking */ 2110 if (!(events & (1ULL << vf_idx)))
2122mbx_done: 2111 continue;
2123 return; 2112
2113 DP(BNX2X_MSG_IOV,
2114 "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
2115 vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
2116 mbx->first_tlv.resp_msg_offset);
2117
2118 /* dmae to get the VF request */
2119 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
2120 vf->abs_vfid, mbx->vf_addr_hi,
2121 mbx->vf_addr_lo,
2122 sizeof(union vfpf_tlvs)/4);
2123 if (rc) {
2124 BNX2X_ERR("Failed to copy request VF %d\n",
2125 vf->abs_vfid);
2126 bnx2x_vf_release(bp, vf, false); /* non blocking */
2127 return;
2128 }
2129
2130 /* process the VF message header */
2131 mbx->first_tlv = mbx->msg->req.first_tlv;
2132
2133 /* Clean response buffer to refrain from falsely
2134 * seeing chains.
2135 */
2136 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2137
2138 /* dispatch the request (will prepare the response) */
2139 bnx2x_vf_mbx_request(bp, vf, mbx);
2140 }
2124} 2141}
2125 2142
2126/* propagate local bulletin board to vf */ 2143/* propagate local bulletin board to vf */