diff options
author | Ariel Elior <ariele@broadcom.com> | 2013-01-01 00:22:33 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-02 04:45:06 -0500 |
commit | fd1fc79dd6deb88ebf38ae9673190da999b3209f (patch) | |
tree | 0e9ff0dda51c3f200b7d40609532f16c8d0f2a90 /drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |
parent | b56e9670ffa4de1a3cf0ca2f89ff5e2e0c31a1f7 (diff) |
bnx2x: Infrastructure for VF <-> PF request on PF side
Support interrupt from device which indicates VF has placed
A request on the VF <-> PF channel.
The PF driver issues a DMAE to retrieve the request from the VM
memory (the Ghost Physical Address of the request is contained
in the interrupt. The PF driver uses the GPA in the DMAE request,
which is translated by the IOMMU to the correct physical address).
The request which arrives is examined to recognize the sending VF.
The PF driver allocates a workitem to handle the VF Operation (vfop).
Signed-off-by: Ariel Elior <ariele@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 200 |
1 files changed, 159 insertions, 41 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e55de72a05ca..e6738f6984ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -347,6 +347,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) | |||
347 | #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" | 347 | #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" |
348 | #define DMAE_DP_DST_NONE "dst_addr [none]" | 348 | #define DMAE_DP_DST_NONE "dst_addr [none]" |
349 | 349 | ||
350 | void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) | ||
351 | { | ||
352 | u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; | ||
353 | |||
354 | switch (dmae->opcode & DMAE_COMMAND_DST) { | ||
355 | case DMAE_CMD_DST_PCI: | ||
356 | if (src_type == DMAE_CMD_SRC_PCI) | ||
357 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
358 | "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" | ||
359 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
360 | dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, | ||
361 | dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, | ||
362 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
363 | dmae->comp_val); | ||
364 | else | ||
365 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
366 | "src [%08x], len [%d*4], dst [%x:%08x]\n" | ||
367 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
368 | dmae->opcode, dmae->src_addr_lo >> 2, | ||
369 | dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, | ||
370 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
371 | dmae->comp_val); | ||
372 | break; | ||
373 | case DMAE_CMD_DST_GRC: | ||
374 | if (src_type == DMAE_CMD_SRC_PCI) | ||
375 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
376 | "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" | ||
377 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
378 | dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, | ||
379 | dmae->len, dmae->dst_addr_lo >> 2, | ||
380 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
381 | dmae->comp_val); | ||
382 | else | ||
383 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
384 | "src [%08x], len [%d*4], dst [%08x]\n" | ||
385 | "comp_addr [%x:%08x], comp_val 0x%08x\n", | ||
386 | dmae->opcode, dmae->src_addr_lo >> 2, | ||
387 | dmae->len, dmae->dst_addr_lo >> 2, | ||
388 | dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
389 | dmae->comp_val); | ||
390 | break; | ||
391 | default: | ||
392 | if (src_type == DMAE_CMD_SRC_PCI) | ||
393 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
394 | "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" | ||
395 | "comp_addr [%x:%08x] comp_val 0x%08x\n", | ||
396 | dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, | ||
397 | dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
398 | dmae->comp_val); | ||
399 | else | ||
400 | DP(msglvl, "DMAE: opcode 0x%08x\n" | ||
401 | "src_addr [%08x] len [%d * 4] dst_addr [none]\n" | ||
402 | "comp_addr [%x:%08x] comp_val 0x%08x\n", | ||
403 | dmae->opcode, dmae->src_addr_lo >> 2, | ||
404 | dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, | ||
405 | dmae->comp_val); | ||
406 | break; | ||
407 | } | ||
408 | } | ||
350 | 409 | ||
351 | /* copy command into DMAE command memory and set DMAE command go */ | 410 | /* copy command into DMAE command memory and set DMAE command go */ |
352 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) | 411 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) |
@@ -397,7 +456,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | |||
397 | return opcode; | 456 | return opcode; |
398 | } | 457 | } |
399 | 458 | ||
400 | static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, | 459 | void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, |
401 | struct dmae_command *dmae, | 460 | struct dmae_command *dmae, |
402 | u8 src_type, u8 dst_type) | 461 | u8 src_type, u8 dst_type) |
403 | { | 462 | { |
@@ -413,9 +472,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, | |||
413 | dmae->comp_val = DMAE_COMP_VAL; | 472 | dmae->comp_val = DMAE_COMP_VAL; |
414 | } | 473 | } |
415 | 474 | ||
416 | /* issue a dmae command over the init-channel and wailt for completion */ | 475 | /* issue a dmae command over the init-channel and wait for completion */ |
417 | static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, | 476 | int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) |
418 | struct dmae_command *dmae) | ||
419 | { | 477 | { |
420 | u32 *wb_comp = bnx2x_sp(bp, wb_comp); | 478 | u32 *wb_comp = bnx2x_sp(bp, wb_comp); |
421 | int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; | 479 | int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; |
@@ -1600,6 +1658,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) | |||
1600 | 1658 | ||
1601 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); | 1659 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); |
1602 | 1660 | ||
1661 | /* schedule the sp task and mark that interrupt occurred (runs from ISR) */ | ||
1662 | static int bnx2x_schedule_sp_task(struct bnx2x *bp) | ||
1663 | { | ||
1664 | /* Set the interrupt occurred bit for the sp-task to recognize it | ||
1665 | * must ack the interrupt and transition according to the IGU | ||
1666 | * state machine. | ||
1667 | */ | ||
1668 | atomic_set(&bp->interrupt_occurred, 1); | ||
1669 | |||
1670 | /* The sp_task must execute only after this bit | ||
1671 | * is set, otherwise we will get out of sync and miss all | ||
1672 | * further interrupts. Hence, the barrier. | ||
1673 | */ | ||
1674 | smp_wmb(); | ||
1675 | |||
1676 | /* schedule sp_task to workqueue */ | ||
1677 | return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | ||
1678 | } | ||
1603 | 1679 | ||
1604 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | 1680 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) |
1605 | { | 1681 | { |
@@ -1614,6 +1690,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1614 | fp->index, cid, command, bp->state, | 1690 | fp->index, cid, command, bp->state, |
1615 | rr_cqe->ramrod_cqe.ramrod_type); | 1691 | rr_cqe->ramrod_cqe.ramrod_type); |
1616 | 1692 | ||
1693 | /* If cid is within VF range, replace the slowpath object with the | ||
1694 | * one corresponding to this VF | ||
1695 | */ | ||
1696 | if (cid >= BNX2X_FIRST_VF_CID && | ||
1697 | cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) | ||
1698 | bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); | ||
1699 | |||
1617 | switch (command) { | 1700 | switch (command) { |
1618 | case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): | 1701 | case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): |
1619 | DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); | 1702 | DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); |
@@ -1665,6 +1748,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1665 | #else | 1748 | #else |
1666 | return; | 1749 | return; |
1667 | #endif | 1750 | #endif |
1751 | /* SRIOV: reschedule any 'in_progress' operations */ | ||
1752 | bnx2x_iov_sp_event(bp, cid, true); | ||
1668 | 1753 | ||
1669 | smp_mb__before_atomic_inc(); | 1754 | smp_mb__before_atomic_inc(); |
1670 | atomic_inc(&bp->cq_spq_left); | 1755 | atomic_inc(&bp->cq_spq_left); |
@@ -1690,8 +1775,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1690 | clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); | 1775 | clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); |
1691 | smp_mb__after_clear_bit(); | 1776 | smp_mb__after_clear_bit(); |
1692 | 1777 | ||
1693 | /* schedule workqueue to send ack to MCP */ | 1778 | /* schedule the sp task as mcp ack is required */ |
1694 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 1779 | bnx2x_schedule_sp_task(bp); |
1695 | } | 1780 | } |
1696 | 1781 | ||
1697 | return; | 1782 | return; |
@@ -1751,7 +1836,11 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1751 | } | 1836 | } |
1752 | 1837 | ||
1753 | if (unlikely(status & 0x1)) { | 1838 | if (unlikely(status & 0x1)) { |
1754 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 1839 | |
1840 | /* schedule sp task to perform default status block work, ack | ||
1841 | * attentions and enable interrupts. | ||
1842 | */ | ||
1843 | bnx2x_schedule_sp_task(bp); | ||
1755 | 1844 | ||
1756 | status &= ~0x1; | 1845 | status &= ~0x1; |
1757 | if (!status) | 1846 | if (!status) |
@@ -4833,7 +4922,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
4833 | u8 echo; | 4922 | u8 echo; |
4834 | u32 cid; | 4923 | u32 cid; |
4835 | u8 opcode; | 4924 | u8 opcode; |
4836 | int spqe_cnt = 0; | 4925 | int rc, spqe_cnt = 0; |
4837 | struct bnx2x_queue_sp_obj *q_obj; | 4926 | struct bnx2x_queue_sp_obj *q_obj; |
4838 | struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; | 4927 | struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; |
4839 | struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; | 4928 | struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; |
@@ -4864,12 +4953,23 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
4864 | 4953 | ||
4865 | elem = &bp->eq_ring[EQ_DESC(sw_cons)]; | 4954 | elem = &bp->eq_ring[EQ_DESC(sw_cons)]; |
4866 | 4955 | ||
4956 | rc = bnx2x_iov_eq_sp_event(bp, elem); | ||
4957 | if (!rc) { | ||
4958 | DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", | ||
4959 | rc); | ||
4960 | goto next_spqe; | ||
4961 | } | ||
4867 | cid = SW_CID(elem->message.data.cfc_del_event.cid); | 4962 | cid = SW_CID(elem->message.data.cfc_del_event.cid); |
4868 | opcode = elem->message.opcode; | 4963 | opcode = elem->message.opcode; |
4869 | 4964 | ||
4870 | 4965 | ||
4871 | /* handle eq element */ | 4966 | /* handle eq element */ |
4872 | switch (opcode) { | 4967 | switch (opcode) { |
4968 | case EVENT_RING_OPCODE_VF_PF_CHANNEL: | ||
4969 | DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); | ||
4970 | bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); | ||
4971 | continue; | ||
4972 | |||
4873 | case EVENT_RING_OPCODE_STAT_QUERY: | 4973 | case EVENT_RING_OPCODE_STAT_QUERY: |
4874 | DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, | 4974 | DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, |
4875 | "got statistics comp event %d\n", | 4975 | "got statistics comp event %d\n", |
@@ -5035,50 +5135,65 @@ next_spqe: | |||
5035 | static void bnx2x_sp_task(struct work_struct *work) | 5135 | static void bnx2x_sp_task(struct work_struct *work) |
5036 | { | 5136 | { |
5037 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); | 5137 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); |
5038 | u16 status; | ||
5039 | 5138 | ||
5040 | status = bnx2x_update_dsb_idx(bp); | 5139 | DP(BNX2X_MSG_SP, "sp task invoked\n"); |
5041 | /* if (status == 0) */ | ||
5042 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ | ||
5043 | 5140 | ||
5044 | DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); | 5141 | /* make sure the atomic interupt_occurred has been written */ |
5142 | smp_rmb(); | ||
5143 | if (atomic_read(&bp->interrupt_occurred)) { | ||
5045 | 5144 | ||
5046 | /* HW attentions */ | 5145 | /* what work needs to be performed? */ |
5047 | if (status & BNX2X_DEF_SB_ATT_IDX) { | 5146 | u16 status = bnx2x_update_dsb_idx(bp); |
5048 | bnx2x_attn_int(bp); | ||
5049 | status &= ~BNX2X_DEF_SB_ATT_IDX; | ||
5050 | } | ||
5051 | 5147 | ||
5052 | /* SP events: STAT_QUERY and others */ | 5148 | DP(BNX2X_MSG_SP, "status %x\n", status); |
5053 | if (status & BNX2X_DEF_SB_IDX) { | 5149 | DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); |
5054 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); | 5150 | atomic_set(&bp->interrupt_occurred, 0); |
5151 | |||
5152 | /* HW attentions */ | ||
5153 | if (status & BNX2X_DEF_SB_ATT_IDX) { | ||
5154 | bnx2x_attn_int(bp); | ||
5155 | status &= ~BNX2X_DEF_SB_ATT_IDX; | ||
5156 | } | ||
5157 | |||
5158 | /* SP events: STAT_QUERY and others */ | ||
5159 | if (status & BNX2X_DEF_SB_IDX) { | ||
5160 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); | ||
5055 | 5161 | ||
5056 | if (FCOE_INIT(bp) && | 5162 | if (FCOE_INIT(bp) && |
5057 | (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | 5163 | (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
5058 | /* | 5164 | /* Prevent local bottom-halves from running as |
5059 | * Prevent local bottom-halves from running as | 5165 | * we are going to change the local NAPI list. |
5060 | * we are going to change the local NAPI list. | 5166 | */ |
5061 | */ | 5167 | local_bh_disable(); |
5062 | local_bh_disable(); | 5168 | napi_schedule(&bnx2x_fcoe(bp, napi)); |
5063 | napi_schedule(&bnx2x_fcoe(bp, napi)); | 5169 | local_bh_enable(); |
5064 | local_bh_enable(); | 5170 | } |
5171 | |||
5172 | /* Handle EQ completions */ | ||
5173 | bnx2x_eq_int(bp); | ||
5174 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, | ||
5175 | le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); | ||
5176 | |||
5177 | status &= ~BNX2X_DEF_SB_IDX; | ||
5065 | } | 5178 | } |
5066 | 5179 | ||
5067 | /* Handle EQ completions */ | 5180 | /* if status is non zero then perhaps something went wrong */ |
5068 | bnx2x_eq_int(bp); | 5181 | if (unlikely(status)) |
5182 | DP(BNX2X_MSG_SP, | ||
5183 | "got an unknown interrupt! (status 0x%x)\n", status); | ||
5069 | 5184 | ||
5070 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, | 5185 | /* ack status block only if something was actually handled */ |
5071 | le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); | 5186 | bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, |
5187 | le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); | ||
5072 | 5188 | ||
5073 | status &= ~BNX2X_DEF_SB_IDX; | ||
5074 | } | 5189 | } |
5075 | 5190 | ||
5076 | if (unlikely(status)) | 5191 | /* must be called after the EQ processing (since eq leads to sriov |
5077 | DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", | 5192 | * ramrod completion flows). |
5078 | status); | 5193 | * This flow may have been scheduled by the arrival of a ramrod |
5079 | 5194 | * completion, or by the sriov code rescheduling itself. | |
5080 | bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, | 5195 | */ |
5081 | le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); | 5196 | bnx2x_iov_sp_task(bp); |
5082 | 5197 | ||
5083 | /* afex - poll to check if VIFSET_ACK should be sent to MFW */ | 5198 | /* afex - poll to check if VIFSET_ACK should be sent to MFW */ |
5084 | if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, | 5199 | if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, |
@@ -5111,7 +5226,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
5111 | rcu_read_unlock(); | 5226 | rcu_read_unlock(); |
5112 | } | 5227 | } |
5113 | 5228 | ||
5114 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); | 5229 | /* schedule sp task to perform default status block work, ack |
5230 | * attentions and enable interrupts. | ||
5231 | */ | ||
5232 | bnx2x_schedule_sp_task(bp); | ||
5115 | 5233 | ||
5116 | return IRQ_HANDLED; | 5234 | return IRQ_HANDLED; |
5117 | } | 5235 | } |