diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 200 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 247 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 102 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 154 |
5 files changed, 664 insertions, 45 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index af34ee36dab8..5219373ecaf1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1379,6 +1379,7 @@ struct bnx2x { | |||
1379 | int mrrs; | 1379 | int mrrs; |
1380 | 1380 | ||
1381 | struct delayed_work sp_task; | 1381 | struct delayed_work sp_task; |
1382 | atomic_t interrupt_occurred; | ||
1382 | struct delayed_work sp_rtnl_task; | 1383 | struct delayed_work sp_rtnl_task; |
1383 | 1384 | ||
1384 | struct delayed_work period_task; | 1385 | struct delayed_work period_task; |
@@ -1870,6 +1871,11 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); | |||
1870 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | 1871 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, |
1871 | bool with_comp, u8 comp_type); | 1872 | bool with_comp, u8 comp_type); |
1872 | 1873 | ||
1874 | void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, | ||
1875 | u8 src_type, u8 dst_type); | ||
1876 | int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); | ||
1877 | void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl); | ||
1878 | |||
1873 | u8 bnx2x_is_pcie_pending(struct pci_dev *dev); | 1879 | u8 bnx2x_is_pcie_pending(struct pci_dev *dev); |
1874 | 1880 | ||
1875 | void bnx2x_calc_fc_adv(struct bnx2x *bp); | 1881 | void bnx2x_calc_fc_adv(struct bnx2x *bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e55de72a05ca..e6738f6984ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -347,6 +347,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) | |||
347 | #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" | 347 | #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" |
348 | #define DMAE_DP_DST_NONE "dst_addr [none]" | 348 | #define DMAE_DP_DST_NONE "dst_addr [none]" |
349 | 349 | ||
350 | void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) | ||
351 | { | ||
352 | u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; | ||
353 | |||
354 | switch (dmae->opcode & DMAE_COMMAND_DST) { | ||
355 | case DMAE_CMD_DST_PCI: | ||
356 | if (src_type == DMAE_CMD_SRC_PCI) | ||
357 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
358 | "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" | ||
359 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
360 | dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, | ||
361 | dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, | ||
362 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
363 | dmae->comp_val); | ||
364 | else | ||
365 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
366 | "src [%08x], len [%d*4], dst [%x:%08x]\n" | ||
367 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
368 | dmae->opcode, dmae->src_addr_lo >> 2, | ||
369 | dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, | ||
370 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
371 | dmae->comp_val); | ||
372 | break; | ||
373 | case DMAE_CMD_DST_GRC: | ||
374 | if (src_type == DMAE_CMD_SRC_PCI) | ||
375 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
376 | "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" | ||
377 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
378 | dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, | ||
379 | dmae->len, dmae->dst_addr_lo >> 2, | ||
380 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
381 | dmae->comp_val); | ||
382 | else | ||
383 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
384 | "src [%08x], len [%d*4], dst [%08x]\n" | ||
385 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
386 | dmae->opcode, dmae->src_addr_lo >> 2, | ||
387 | dmae->len, dmae->dst_addr_lo >> 2, | ||
388 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
389 | dmae->comp_val); | ||
390 | break; | ||
391 | default: | ||
392 | if (src_type == DMAE_CMD_SRC_PCI) | ||
393 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
394 | "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" | ||
395 | "comp_addr [%x:%08x] comp_val 0x%08x\n", | ||
396 | dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, | ||
397 | dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
398 | dmae->comp_val); | ||
399 | else | ||
400 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
401 | "src_addr [%08x] len [%d * 4] dst_addr [none]\n" | ||
402 | "comp_addr [%x:%08x] comp_val 0x%08x\n", | ||
403 | dmae->opcode, dmae->src_addr_lo >> 2, | ||
404 | dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
405 | dmae->comp_val); | ||
406 | break; | ||
407 | } | ||
408 | } | ||
350 | 409 | ||
351 | /* copy command into DMAE command memory and set DMAE command go */ | 410 | /* copy command into DMAE command memory and set DMAE command go */ |
352 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) | 411 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) |
@@ -397,7 +456,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | |||
397 | return opcode; | 456 | return opcode; |
398 | } | 457 | } |
399 | 458 | ||
400 | static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, | 459 | void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, |
401 | struct dmae_command *dmae, | 460 | struct dmae_command *dmae, |
402 | u8 src_type, u8 dst_type) | 461 | u8 src_type, u8 dst_type) |
403 | { | 462 | { |
@@ -413,9 +472,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, | |||
413 | dmae->comp_val = DMAE_COMP_VAL; | 472 | dmae->comp_val = DMAE_COMP_VAL; |
414 | } | 473 | } |
415 | 474 | ||
416 | /* issue a dmae command over the init-channel and wailt for completion */ | 475 | /* issue a dmae command over the init-channel and wait for completion */ |
417 | static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, | 476 | int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) |
418 | struct dmae_command *dmae) | ||
419 | { | 477 | { |
420 | u32 *wb_comp = bnx2x_sp(bp, wb_comp); | 478 | u32 *wb_comp = bnx2x_sp(bp, wb_comp); |
421 | int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; | 479 | int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; |
@@ -1600,6 +1658,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) | |||
1600 | 1658 | ||
1601 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); | 1659 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); |
1602 | 1660 | ||
1661 | /* schedule the sp task and mark that interrupt occurred (runs from ISR) */ | ||
1662 | static int bnx2x_schedule_sp_task(struct bnx2x *bp) | ||
1663 | { | ||
1664 | /* Set the interrupt occurred bit for the sp-task to recognize it | ||
1665 | * must ack the interrupt and transition according to the IGU | ||
1666 | * state machine. | ||
1667 | */ | ||
1668 | atomic_set(&bp->interrupt_occurred, 1); | ||
1669 | |||
1670 | /* The sp_task must execute only after this bit | ||
1671 | * is set, otherwise we will get out of sync and miss all | ||
1672 | * further interrupts. Hence, the barrier. | ||
1673 | */ | ||
1674 | smp_wmb(); | ||
1675 | |||
1676 | /* schedule sp_task to workqueue */ | ||
1677 | return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | ||
1678 | } | ||
1603 | 1679 | ||
1604 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | 1680 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) |
1605 | { | 1681 | { |
@@ -1614,6 +1690,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1614 | fp->index, cid, command, bp->state, | 1690 | fp->index, cid, command, bp->state, |
1615 | rr_cqe->ramrod_cqe.ramrod_type); | 1691 | rr_cqe->ramrod_cqe.ramrod_type); |
1616 | 1692 | ||
1693 | /* If cid is within VF range, replace the slowpath object with the | ||
1694 | * one corresponding to this VF | ||
1695 | */ | ||
1696 | if (cid >= BNX2X_FIRST_VF_CID && | ||
1697 | cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) | ||
1698 | bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); | ||
1699 | |||
1617 | switch (command) { | 1700 | switch (command) { |
1618 | case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): | 1701 | case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): |
1619 | DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); | 1702 | DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); |
@@ -1665,6 +1748,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1665 | #else | 1748 | #else |
1666 | return; | 1749 | return; |
1667 | #endif | 1750 | #endif |
1751 | /* SRIOV: reschedule any 'in_progress' operations */ | ||
1752 | bnx2x_iov_sp_event(bp, cid, true); | ||
1668 | 1753 | ||
1669 | smp_mb__before_atomic_inc(); | 1754 | smp_mb__before_atomic_inc(); |
1670 | atomic_inc(&bp->cq_spq_left); | 1755 | atomic_inc(&bp->cq_spq_left); |
@@ -1690,8 +1775,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1690 | clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); | 1775 | clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); |
1691 | smp_mb__after_clear_bit(); | 1776 | smp_mb__after_clear_bit(); |
1692 | 1777 | ||
1693 | /* schedule workqueue to send ack to MCP */ | 1778 | /* schedule the sp task as mcp ack is required */ |
1694 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 1779 | bnx2x_schedule_sp_task(bp); |
1695 | } | 1780 | } |
1696 | 1781 | ||
1697 | return; | 1782 | return; |
@@ -1751,7 +1836,11 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1751 | } | 1836 | } |
1752 | 1837 | ||
1753 | if (unlikely(status & 0x1)) { | 1838 | if (unlikely(status & 0x1)) { |
1754 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 1839 | |
1840 | /* schedule sp task to perform default status block work, ack | ||
1841 | * attentions and enable interrupts. | ||
1842 | */ | ||
1843 | bnx2x_schedule_sp_task(bp); | ||
1755 | 1844 | ||
1756 | status &= ~0x1; | 1845 | status &= ~0x1; |
1757 | if (!status) | 1846 | if (!status) |
@@ -4833,7 +4922,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
4833 | u8 echo; | 4922 | u8 echo; |
4834 | u32 cid; | 4923 | u32 cid; |
4835 | u8 opcode; | 4924 | u8 opcode; |
4836 | int spqe_cnt = 0; | 4925 | int rc, spqe_cnt = 0; |
4837 | struct bnx2x_queue_sp_obj *q_obj; | 4926 | struct bnx2x_queue_sp_obj *q_obj; |
4838 | struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; | 4927 | struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; |
4839 | struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; | 4928 | struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; |
@@ -4864,12 +4953,23 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
4864 | 4953 | ||
4865 | elem = &bp->eq_ring[EQ_DESC(sw_cons)]; | 4954 | elem = &bp->eq_ring[EQ_DESC(sw_cons)]; |
4866 | 4955 | ||
4956 | rc = bnx2x_iov_eq_sp_event(bp, elem); | ||
4957 | if (!rc) { | ||
4958 | DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", | ||
4959 | rc); | ||
4960 | goto next_spqe; | ||
4961 | } | ||
4867 | cid = SW_CID(elem->message.data.cfc_del_event.cid); | 4962 | cid = SW_CID(elem->message.data.cfc_del_event.cid); |
4868 | opcode = elem->message.opcode; | 4963 | opcode = elem->message.opcode; |
4869 | 4964 | ||
4870 | 4965 | ||
4871 | /* handle eq element */ | 4966 | /* handle eq element */ |
4872 | switch (opcode) { | 4967 | switch (opcode) { |
4968 | case EVENT_RING_OPCODE_VF_PF_CHANNEL: | ||
4969 | DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); | ||
4970 | bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); | ||
4971 | continue; | ||
4972 | |||
4873 | case EVENT_RING_OPCODE_STAT_QUERY: | 4973 | case EVENT_RING_OPCODE_STAT_QUERY: |
4874 | DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, | 4974 | DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, |
4875 | "got statistics comp event %d\n", | 4975 | "got statistics comp event %d\n", |
@@ -5035,50 +5135,65 @@ next_spqe: | |||
5035 | static void bnx2x_sp_task(struct work_struct *work) | 5135 | static void bnx2x_sp_task(struct work_struct *work) |
5036 | { | 5136 | { |
5037 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); | 5137 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); |
5038 | u16 status; | ||
5039 | 5138 | ||
5040 | status = bnx2x_update_dsb_idx(bp); | 5139 | DP(BNX2X_MSG_SP, "sp task invoked\n"); |
5041 | /* if (status == 0) */ | ||
5042 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ | ||
5043 | 5140 | ||
5044 | DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); | 5141 | /* make sure the atomic interupt_occurred has been written */ |
5142 | smp_rmb(); | ||
5143 | if (atomic_read(&bp->interrupt_occurred)) { | ||
5045 | 5144 | ||
5046 | /* HW attentions */ | 5145 | /* what work needs to be performed? */ |
5047 | if (status & BNX2X_DEF_SB_ATT_IDX) { | 5146 | u16 status = bnx2x_update_dsb_idx(bp); |
5048 | bnx2x_attn_int(bp); | ||
5049 | status &= ~BNX2X_DEF_SB_ATT_IDX; | ||
5050 | } | ||
5051 | 5147 | ||
5052 | /* SP events: STAT_QUERY and others */ | 5148 | DP(BNX2X_MSG_SP, "status %x\n", status); |
5053 | if (status & BNX2X_DEF_SB_IDX) { | 5149 | DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); |
5054 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); | 5150 | atomic_set(&bp->interrupt_occurred, 0); |
5151 | |||
5152 | /* HW attentions */ | ||
5153 | if (status & BNX2X_DEF_SB_ATT_IDX) { | ||
5154 | bnx2x_attn_int(bp); | ||
5155 | status &= ~BNX2X_DEF_SB_ATT_IDX; | ||
5156 | } | ||
5157 | |||
5158 | /* SP events: STAT_QUERY and others */ | ||
5159 | if (status & BNX2X_DEF_SB_IDX) { | ||
5160 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); | ||
5055 | 5161 | ||
5056 | if (FCOE_INIT(bp) && | 5162 | if (FCOE_INIT(bp) && |
5057 | (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | 5163 | (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
5058 | /* | 5164 | /* Prevent local bottom-halves from running as |
5059 | * Prevent local bottom-halves from running as | 5165 | * we are going to change the local NAPI list. |
5060 | * we are going to change the local NAPI list. | 5166 | */ |
5061 | */ | 5167 | local_bh_disable(); |
5062 | local_bh_disable(); | 5168 | napi_schedule(&bnx2x_fcoe(bp, napi)); |
5063 | napi_schedule(&bnx2x_fcoe(bp, napi)); | 5169 | local_bh_enable(); |
5064 | local_bh_enable(); | 5170 | } |
5171 | |||
5172 | /* Handle EQ completions */ | ||
5173 | bnx2x_eq_int(bp); | ||
5174 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, | ||
5175 | le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); | ||
5176 | |||
5177 | status &= ~BNX2X_DEF_SB_IDX; | ||
5065 | } | 5178 | } |
5066 | 5179 | ||
5067 | /* Handle EQ completions */ | 5180 | /* if status is non zero then perhaps something went wrong */ |
5068 | bnx2x_eq_int(bp); | 5181 | if (unlikely(status)) |
5182 | DP(BNX2X_MSG_SP, | ||
5183 | "got an unknown interrupt! (status 0x%x)\n", status); | ||
5069 | 5184 | ||
5070 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, | 5185 | /* ack status block only if something was actually handled */ |
5071 | le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); | 5186 | bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, |
5187 | le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); | ||
5072 | 5188 | ||
5073 | status &= ~BNX2X_DEF_SB_IDX; | ||
5074 | } | 5189 | } |
5075 | 5190 | ||
5076 | if (unlikely(status)) | 5191 | /* must be called after the EQ processing (since eq leads to sriov |
5077 | DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", | 5192 | * ramrod completion flows). |
5078 | status); | 5193 | * This flow may have been scheduled by the arrival of a ramrod |
5079 | 5194 | * completion, or by the sriov code rescheduling itself. | |
5080 | bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, | 5195 | */ |
5081 | le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); | 5196 | bnx2x_iov_sp_task(bp); |
5082 | 5197 | ||
5083 | /* afex - poll to check if VIFSET_ACK should be sent to MFW */ | 5198 | /* afex - poll to check if VIFSET_ACK should be sent to MFW */ |
5084 | if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, | 5199 | if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, |
@@ -5111,7 +5226,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
5111 | rcu_read_unlock(); | 5226 | rcu_read_unlock(); |
5112 | } | 5227 | } |
5113 | 5228 | ||
5114 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 5229 | /* schedule sp task to perform default status block work, ack |
5230 | * attentions and enable interrupts. | ||
5231 | */ | ||
5232 | bnx2x_schedule_sp_task(bp); | ||
5115 | 5233 | ||
5116 | return IRQ_HANDLED; | 5234 | return IRQ_HANDLED; |
5117 | } | 5235 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index d833a2d418ea..9233117ea506 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -518,6 +518,16 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
518 | } | 518 | } |
519 | } | 519 | } |
520 | 520 | ||
521 | void bnx2x_iov_remove_one(struct bnx2x *bp) | ||
522 | { | ||
523 | /* if SRIOV is not enabled there's nothing to do */ | ||
524 | if (!IS_SRIOV(bp)) | ||
525 | return; | ||
526 | |||
527 | /* free vf database */ | ||
528 | __bnx2x_iov_free_vfdb(bp); | ||
529 | } | ||
530 | |||
521 | void bnx2x_iov_free_mem(struct bnx2x *bp) | 531 | void bnx2x_iov_free_mem(struct bnx2x *bp) |
522 | { | 532 | { |
523 | int i; | 533 | int i; |
@@ -692,12 +702,241 @@ int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) | |||
692 | return line + i; | 702 | return line + i; |
693 | } | 703 | } |
694 | 704 | ||
695 | void bnx2x_iov_remove_one(struct bnx2x *bp) | 705 | static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) |
696 | { | 706 | { |
697 | /* if SRIOV is not enabled there's nothing to do */ | 707 | return ((cid >= BNX2X_FIRST_VF_CID) && |
708 | ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); | ||
709 | } | ||
710 | |||
711 | static | ||
712 | void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, | ||
713 | struct bnx2x_vf_queue *vfq, | ||
714 | union event_ring_elem *elem) | ||
715 | { | ||
716 | unsigned long ramrod_flags = 0; | ||
717 | int rc = 0; | ||
718 | |||
719 | /* Always push next commands out, don't wait here */ | ||
720 | set_bit(RAMROD_CONT, &ramrod_flags); | ||
721 | |||
722 | switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { | ||
723 | case BNX2X_FILTER_MAC_PENDING: | ||
724 | rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, | ||
725 | &ramrod_flags); | ||
726 | break; | ||
727 | case BNX2X_FILTER_VLAN_PENDING: | ||
728 | rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, | ||
729 | &ramrod_flags); | ||
730 | break; | ||
731 | default: | ||
732 | BNX2X_ERR("Unsupported classification command: %d\n", | ||
733 | elem->message.data.eth_event.echo); | ||
734 | return; | ||
735 | } | ||
736 | if (rc < 0) | ||
737 | BNX2X_ERR("Failed to schedule new commands: %d\n", rc); | ||
738 | else if (rc > 0) | ||
739 | DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); | ||
740 | } | ||
741 | |||
742 | static | ||
743 | void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, | ||
744 | struct bnx2x_virtf *vf) | ||
745 | { | ||
746 | struct bnx2x_mcast_ramrod_params rparam = {NULL}; | ||
747 | int rc; | ||
748 | |||
749 | rparam.mcast_obj = &vf->mcast_obj; | ||
750 | vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); | ||
751 | |||
752 | /* If there are pending mcast commands - send them */ | ||
753 | if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { | ||
754 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
755 | if (rc < 0) | ||
756 | BNX2X_ERR("Failed to send pending mcast commands: %d\n", | ||
757 | rc); | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static | ||
762 | void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, | ||
763 | struct bnx2x_virtf *vf) | ||
764 | { | ||
765 | smp_mb__before_clear_bit(); | ||
766 | clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | ||
767 | smp_mb__after_clear_bit(); | ||
768 | } | ||
769 | |||
770 | int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) | ||
771 | { | ||
772 | struct bnx2x_virtf *vf; | ||
773 | int qidx = 0, abs_vfid; | ||
774 | u8 opcode; | ||
775 | u16 cid = 0xffff; | ||
776 | |||
777 | if (!IS_SRIOV(bp)) | ||
778 | return 1; | ||
779 | |||
780 | /* first get the cid - the only events we handle here are cfc-delete | ||
781 | * and set-mac completion | ||
782 | */ | ||
783 | opcode = elem->message.opcode; | ||
784 | |||
785 | switch (opcode) { | ||
786 | case EVENT_RING_OPCODE_CFC_DEL: | ||
787 | cid = SW_CID((__force __le32) | ||
788 | elem->message.data.cfc_del_event.cid); | ||
789 | DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); | ||
790 | break; | ||
791 | case EVENT_RING_OPCODE_CLASSIFICATION_RULES: | ||
792 | case EVENT_RING_OPCODE_MULTICAST_RULES: | ||
793 | case EVENT_RING_OPCODE_FILTERS_RULES: | ||
794 | cid = (elem->message.data.eth_event.echo & | ||
795 | BNX2X_SWCID_MASK); | ||
796 | DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); | ||
797 | break; | ||
798 | case EVENT_RING_OPCODE_VF_FLR: | ||
799 | abs_vfid = elem->message.data.vf_flr_event.vf_id; | ||
800 | DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", | ||
801 | abs_vfid); | ||
802 | goto get_vf; | ||
803 | case EVENT_RING_OPCODE_MALICIOUS_VF: | ||
804 | abs_vfid = elem->message.data.malicious_vf_event.vf_id; | ||
805 | DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", | ||
806 | abs_vfid); | ||
807 | goto get_vf; | ||
808 | default: | ||
809 | return 1; | ||
810 | } | ||
811 | |||
812 | /* check if the cid is the VF range */ | ||
813 | if (!bnx2x_iov_is_vf_cid(bp, cid)) { | ||
814 | DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); | ||
815 | return 1; | ||
816 | } | ||
817 | |||
818 | /* extract vf and rxq index from vf_cid - relies on the following: | ||
819 | * 1. vfid on cid reflects the true abs_vfid | ||
820 | * 2. the max number of VFs (per path) is 64 | ||
821 | */ | ||
822 | qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); | ||
823 | abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); | ||
824 | get_vf: | ||
825 | vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); | ||
826 | |||
827 | if (!vf) { | ||
828 | BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", | ||
829 | cid, abs_vfid); | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | switch (opcode) { | ||
834 | case EVENT_RING_OPCODE_CFC_DEL: | ||
835 | DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", | ||
836 | vf->abs_vfid, qidx); | ||
837 | vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, | ||
838 | &vfq_get(vf, | ||
839 | qidx)->sp_obj, | ||
840 | BNX2X_Q_CMD_CFC_DEL); | ||
841 | break; | ||
842 | case EVENT_RING_OPCODE_CLASSIFICATION_RULES: | ||
843 | DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", | ||
844 | vf->abs_vfid, qidx); | ||
845 | bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); | ||
846 | break; | ||
847 | case EVENT_RING_OPCODE_MULTICAST_RULES: | ||
848 | DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", | ||
849 | vf->abs_vfid, qidx); | ||
850 | bnx2x_vf_handle_mcast_eqe(bp, vf); | ||
851 | break; | ||
852 | case EVENT_RING_OPCODE_FILTERS_RULES: | ||
853 | DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", | ||
854 | vf->abs_vfid, qidx); | ||
855 | bnx2x_vf_handle_filters_eqe(bp, vf); | ||
856 | break; | ||
857 | case EVENT_RING_OPCODE_VF_FLR: | ||
858 | DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", | ||
859 | vf->abs_vfid); | ||
860 | /* Do nothing for now */ | ||
861 | break; | ||
862 | case EVENT_RING_OPCODE_MALICIOUS_VF: | ||
863 | DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", | ||
864 | vf->abs_vfid); | ||
865 | /* Do nothing for now */ | ||
866 | break; | ||
867 | } | ||
868 | /* SRIOV: reschedule any 'in_progress' operations */ | ||
869 | bnx2x_iov_sp_event(bp, cid, false); | ||
870 | |||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) | ||
875 | { | ||
876 | /* extract the vf from vf_cid - relies on the following: | ||
877 | * 1. vfid on cid reflects the true abs_vfid | ||
878 | * 2. the max number of VFs (per path) is 64 | ||
879 | */ | ||
880 | int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); | ||
881 | return bnx2x_vf_by_abs_fid(bp, abs_vfid); | ||
882 | } | ||
883 | |||
884 | void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | ||
885 | struct bnx2x_queue_sp_obj **q_obj) | ||
886 | { | ||
887 | struct bnx2x_virtf *vf; | ||
888 | |||
698 | if (!IS_SRIOV(bp)) | 889 | if (!IS_SRIOV(bp)) |
699 | return; | 890 | return; |
700 | 891 | ||
701 | /* free vf database */ | 892 | vf = bnx2x_vf_by_cid(bp, vf_cid); |
702 | __bnx2x_iov_free_vfdb(bp); | 893 | |
894 | if (vf) { | ||
895 | /* extract queue index from vf_cid - relies on the following: | ||
896 | * 1. vfid on cid reflects the true abs_vfid | ||
897 | * 2. the max number of VFs (per path) is 64 | ||
898 | */ | ||
899 | int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); | ||
900 | *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); | ||
901 | } else { | ||
902 | BNX2X_ERR("No vf matching cid %d\n", vf_cid); | ||
903 | } | ||
904 | } | ||
905 | |||
906 | void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) | ||
907 | { | ||
908 | struct bnx2x_virtf *vf; | ||
909 | |||
910 | /* check if the cid is the VF range */ | ||
911 | if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) | ||
912 | return; | ||
913 | |||
914 | vf = bnx2x_vf_by_cid(bp, vf_cid); | ||
915 | if (vf) { | ||
916 | /* set in_progress flag */ | ||
917 | atomic_set(&vf->op_in_progress, 1); | ||
918 | if (queue_work) | ||
919 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | ||
920 | } | ||
921 | } | ||
922 | |||
923 | void bnx2x_iov_sp_task(struct bnx2x *bp) | ||
924 | { | ||
925 | int i; | ||
926 | |||
927 | if (!IS_SRIOV(bp)) | ||
928 | return; | ||
929 | /* Iterate over all VFs and invoke state transition for VFs with | ||
930 | * 'in-progress' slow-path operations | ||
931 | */ | ||
932 | DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); | ||
933 | for_each_vf(bp, i) { | ||
934 | struct bnx2x_virtf *vf = BP_VF(bp, i); | ||
935 | |||
936 | if (!list_empty(&vf->op_list_head) && | ||
937 | atomic_read(&vf->op_in_progress)) { | ||
938 | DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); | ||
939 | bnx2x_vfop_cur(bp, vf)->transition(bp, vf); | ||
940 | } | ||
941 | } | ||
703 | } | 942 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 0e521b0275e0..6af7bde74ddc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -83,6 +83,84 @@ union bnx2x_vfop_params { | |||
83 | 83 | ||
84 | /* forward */ | 84 | /* forward */ |
85 | struct bnx2x_virtf; | 85 | struct bnx2x_virtf; |
86 | |||
87 | /* VFOP definitions */ | ||
88 | typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); | ||
89 | |||
90 | /* VFOP queue filters command additional arguments */ | ||
91 | struct bnx2x_vfop_filter { | ||
92 | struct list_head link; | ||
93 | int type; | ||
94 | #define BNX2X_VFOP_FILTER_MAC 1 | ||
95 | #define BNX2X_VFOP_FILTER_VLAN 2 | ||
96 | |||
97 | bool add; | ||
98 | u8 *mac; | ||
99 | u16 vid; | ||
100 | }; | ||
101 | |||
102 | struct bnx2x_vfop_filters { | ||
103 | int add_cnt; | ||
104 | struct list_head head; | ||
105 | struct bnx2x_vfop_filter filters[]; | ||
106 | }; | ||
107 | |||
108 | /* transient list allocated, built and saved until its | ||
109 | * passed to the SP-VERBs layer. | ||
110 | */ | ||
111 | struct bnx2x_vfop_args_mcast { | ||
112 | int mc_num; | ||
113 | struct bnx2x_mcast_list_elem *mc; | ||
114 | }; | ||
115 | |||
116 | struct bnx2x_vfop_args_qctor { | ||
117 | int qid; | ||
118 | u16 sb_idx; | ||
119 | }; | ||
120 | |||
121 | struct bnx2x_vfop_args_qdtor { | ||
122 | int qid; | ||
123 | struct eth_context *cxt; | ||
124 | }; | ||
125 | |||
126 | struct bnx2x_vfop_args_defvlan { | ||
127 | int qid; | ||
128 | bool enable; | ||
129 | u16 vid; | ||
130 | u8 prio; | ||
131 | }; | ||
132 | |||
133 | struct bnx2x_vfop_args_qx { | ||
134 | int qid; | ||
135 | bool en_add; | ||
136 | }; | ||
137 | |||
138 | struct bnx2x_vfop_args_filters { | ||
139 | struct bnx2x_vfop_filters *multi_filter; | ||
140 | atomic_t *credit; /* non NULL means 'don't consume credit' */ | ||
141 | }; | ||
142 | |||
143 | union bnx2x_vfop_args { | ||
144 | struct bnx2x_vfop_args_mcast mc_list; | ||
145 | struct bnx2x_vfop_args_qctor qctor; | ||
146 | struct bnx2x_vfop_args_qdtor qdtor; | ||
147 | struct bnx2x_vfop_args_defvlan defvlan; | ||
148 | struct bnx2x_vfop_args_qx qx; | ||
149 | struct bnx2x_vfop_args_filters filters; | ||
150 | }; | ||
151 | |||
152 | struct bnx2x_vfop { | ||
153 | struct list_head link; | ||
154 | int rc; /* return code */ | ||
155 | int state; /* next state */ | ||
156 | union bnx2x_vfop_args args; /* extra arguments */ | ||
157 | union bnx2x_vfop_params *op_p; /* ramrod params */ | ||
158 | |||
159 | /* state machine callbacks */ | ||
160 | vfop_handler_t transition; | ||
161 | vfop_handler_t done; | ||
162 | }; | ||
163 | |||
86 | /* vf context */ | 164 | /* vf context */ |
87 | struct bnx2x_virtf { | 165 | struct bnx2x_virtf { |
88 | u16 cfg_flags; | 166 | u16 cfg_flags; |
@@ -281,6 +359,12 @@ struct bnx2x_vfdb { | |||
281 | u32 flrd_vfs[FLRD_VFS_DWORDS]; | 359 | u32 flrd_vfs[FLRD_VFS_DWORDS]; |
282 | }; | 360 | }; |
283 | 361 | ||
362 | /* queue access */ | ||
363 | static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) | ||
364 | { | ||
365 | return &(vf->vfqs[index]); | ||
366 | } | ||
367 | |||
284 | static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) | 368 | static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) |
285 | { | 369 | { |
286 | return vf->igu_base_id + sb_idx; | 370 | return vf->igu_base_id + sb_idx; |
@@ -295,7 +379,22 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp); | |||
295 | int bnx2x_iov_nic_init(struct bnx2x *bp); | 379 | int bnx2x_iov_nic_init(struct bnx2x *bp); |
296 | void bnx2x_iov_init_dq(struct bnx2x *bp); | 380 | void bnx2x_iov_init_dq(struct bnx2x *bp); |
297 | void bnx2x_iov_init_dmae(struct bnx2x *bp); | 381 | void bnx2x_iov_init_dmae(struct bnx2x *bp); |
382 | void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, | ||
383 | struct bnx2x_queue_sp_obj **q_obj); | ||
384 | void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); | ||
385 | int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); | ||
386 | void bnx2x_iov_sp_task(struct bnx2x *bp); | ||
387 | /* global vf mailbox routines */ | ||
388 | void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); | ||
298 | void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); | 389 | void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); |
390 | static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, | ||
391 | struct bnx2x_virtf *vf) | ||
392 | { | ||
393 | WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); | ||
394 | WARN_ON(list_empty(&vf->op_list_head)); | ||
395 | return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); | ||
396 | } | ||
397 | |||
299 | int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); | 398 | int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); |
300 | /* VF FLR helpers */ | 399 | /* VF FLR helpers */ |
301 | int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); | 400 | int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); |
@@ -305,4 +404,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, | |||
305 | void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, | 404 | void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, |
306 | u16 type, u16 length); | 405 | u16 type, u16 length); |
307 | void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); | 406 | void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); |
407 | |||
408 | bool bnx2x_tlv_supported(u16 tlvtype); | ||
409 | |||
308 | #endif /* bnx2x_sriov.h */ | 410 | #endif /* bnx2x_sriov.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index d1a8644200d9..6e5e0638ee81 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -79,6 +79,24 @@ void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) | |||
79 | tlv->type, tlv->length); | 79 | tlv->type, tlv->length); |
80 | } | 80 | } |
81 | 81 | ||
82 | /* test whether we support a tlv type */ | ||
83 | bool bnx2x_tlv_supported(u16 tlvtype) | ||
84 | { | ||
85 | return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; | ||
86 | } | ||
87 | |||
88 | static inline int bnx2x_pfvf_status_codes(int rc) | ||
89 | { | ||
90 | switch (rc) { | ||
91 | case 0: | ||
92 | return PFVF_STATUS_SUCCESS; | ||
93 | case -ENOMEM: | ||
94 | return PFVF_STATUS_NO_RESOURCE; | ||
95 | default: | ||
96 | return PFVF_STATUS_FAILURE; | ||
97 | } | ||
98 | } | ||
99 | |||
82 | /* General service functions */ | 100 | /* General service functions */ |
83 | static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) | 101 | static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) |
84 | { | 102 | { |
@@ -116,3 +134,139 @@ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) | |||
116 | /* enable the VF access to the mailbox */ | 134 | /* enable the VF access to the mailbox */ |
117 | bnx2x_vf_enable_access(bp, abs_vfid); | 135 | bnx2x_vf_enable_access(bp, abs_vfid); |
118 | } | 136 | } |
137 | |||
138 | /* this works only on !E1h */ | ||
139 | static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, | ||
140 | dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi, | ||
141 | u32 vf_addr_lo, u32 len32) | ||
142 | { | ||
143 | struct dmae_command dmae; | ||
144 | |||
145 | if (CHIP_IS_E1x(bp)) { | ||
146 | BNX2X_ERR("Chip revision does not support VFs\n"); | ||
147 | return DMAE_NOT_RDY; | ||
148 | } | ||
149 | |||
150 | if (!bp->dmae_ready) { | ||
151 | BNX2X_ERR("DMAE is not ready, can not copy\n"); | ||
152 | return DMAE_NOT_RDY; | ||
153 | } | ||
154 | |||
155 | /* set opcode and fixed command fields */ | ||
156 | bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); | ||
157 | |||
158 | if (from_vf) { | ||
159 | dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) | | ||
160 | (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) | | ||
161 | (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT); | ||
162 | |||
163 | dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT); | ||
164 | |||
165 | dmae.src_addr_lo = vf_addr_lo; | ||
166 | dmae.src_addr_hi = vf_addr_hi; | ||
167 | dmae.dst_addr_lo = U64_LO(pf_addr); | ||
168 | dmae.dst_addr_hi = U64_HI(pf_addr); | ||
169 | } else { | ||
170 | dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) | | ||
171 | (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) | | ||
172 | (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT); | ||
173 | |||
174 | dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT); | ||
175 | |||
176 | dmae.src_addr_lo = U64_LO(pf_addr); | ||
177 | dmae.src_addr_hi = U64_HI(pf_addr); | ||
178 | dmae.dst_addr_lo = vf_addr_lo; | ||
179 | dmae.dst_addr_hi = vf_addr_hi; | ||
180 | } | ||
181 | dmae.len = len32; | ||
182 | bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE); | ||
183 | |||
184 | /* issue the command and wait for completion */ | ||
185 | return bnx2x_issue_dmae_with_comp(bp, &dmae); | ||
186 | } | ||
187 | |||
188 | /* dispatch request */ | ||
189 | static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, | ||
190 | struct bnx2x_vf_mbx *mbx) | ||
191 | { | ||
192 | int i; | ||
193 | |||
194 | /* check if tlv type is known */ | ||
195 | if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { | ||
196 | /* switch on the opcode */ | ||
197 | switch (mbx->first_tlv.tl.type) { | ||
198 | } | ||
199 | } else { | ||
200 | /* unknown TLV - this may belong to a VF driver from the future | ||
201 | * - a version written after this PF driver was written, which | ||
202 | * supports features unknown as of yet. Too bad since we don't | ||
203 | * support them. Or this may be because someone wrote a crappy | ||
204 | * VF driver and is sending garbage over the channel. | ||
205 | */ | ||
206 | BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", | ||
207 | mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); | ||
208 | for (i = 0; i < 20; i++) | ||
209 | DP_CONT(BNX2X_MSG_IOV, "%x ", | ||
210 | mbx->msg->req.tlv_buf_size.tlv_buffer[i]); | ||
211 | } | ||
212 | } | ||
213 | |||
214 | /* handle new vf-pf message */ | ||
215 | void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) | ||
216 | { | ||
217 | struct bnx2x_virtf *vf; | ||
218 | struct bnx2x_vf_mbx *mbx; | ||
219 | u8 vf_idx; | ||
220 | int rc; | ||
221 | |||
222 | DP(BNX2X_MSG_IOV, | ||
223 | "vf pf event received: vfid %d, address_hi %x, address lo %x", | ||
224 | vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo); | ||
225 | /* Sanity checks consider removing later */ | ||
226 | |||
227 | /* check if the vf_id is valid */ | ||
228 | if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > | ||
229 | BNX2X_NR_VIRTFN(bp)) { | ||
230 | BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", | ||
231 | vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); | ||
232 | goto mbx_done; | ||
233 | } | ||
234 | vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); | ||
235 | mbx = BP_VF_MBX(bp, vf_idx); | ||
236 | |||
237 | /* verify an event is not currently being processed - | ||
238 | * debug failsafe only | ||
239 | */ | ||
240 | if (mbx->flags & VF_MSG_INPROCESS) { | ||
241 | BNX2X_ERR("Previous message is still being processed, vf_id %d\n", | ||
242 | vfpf_event->vf_id); | ||
243 | goto mbx_done; | ||
244 | } | ||
245 | vf = BP_VF(bp, vf_idx); | ||
246 | |||
247 | /* save the VF message address */ | ||
248 | mbx->vf_addr_hi = vfpf_event->msg_addr_hi; | ||
249 | mbx->vf_addr_lo = vfpf_event->msg_addr_lo; | ||
250 | DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", | ||
251 | mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); | ||
252 | |||
253 | /* dmae to get the VF request */ | ||
254 | rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, | ||
255 | mbx->vf_addr_hi, mbx->vf_addr_lo, | ||
256 | sizeof(union vfpf_tlvs)/4); | ||
257 | if (rc) { | ||
258 | BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); | ||
259 | goto mbx_error; | ||
260 | } | ||
261 | |||
262 | /* process the VF message header */ | ||
263 | mbx->first_tlv = mbx->msg->req.first_tlv; | ||
264 | |||
265 | /* dispatch the request (will prepare the response) */ | ||
266 | bnx2x_vf_mbx_request(bp, vf, mbx); | ||
267 | goto mbx_done; | ||
268 | |||
269 | mbx_error: | ||
270 | mbx_done: | ||
271 | return; | ||
272 | } | ||