aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
diff options
context:
space:
mode:
authorAriel Elior <ariele@broadcom.com>2013-01-01 00:22:42 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-02 04:45:07 -0500
commitd16132cef8a72ff7563aff95f03bf505779fe3e6 (patch)
tree9d730327b75d8a14a5a7d5af070c90660870539d /drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
parentf1929b016c2161c364e45c21788aaae938ae557c (diff)
bnx2x: Support VF FLR
The FLR indication arrives as an attention from the management processor. Upon VF flr all FLRed function in the indication have already been released by Firmware and now we basically need to free the resources allocated to those VFs, and clean any remainders from the device (FLR final cleanup). Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c294
1 files changed, 294 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 71d0976b14d8..c96ce5bbf446 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -138,6 +138,17 @@ enum bnx2x_vfop_mcast_state {
138 BNX2X_VFOP_MCAST_ADD, 138 BNX2X_VFOP_MCAST_ADD,
139 BNX2X_VFOP_MCAST_CHK_DONE 139 BNX2X_VFOP_MCAST_CHK_DONE
140}; 140};
141enum bnx2x_vfop_qflr_state {
142 BNX2X_VFOP_QFLR_CLR_VLAN,
143 BNX2X_VFOP_QFLR_CLR_MAC,
144 BNX2X_VFOP_QFLR_TERMINATE,
145 BNX2X_VFOP_QFLR_DONE
146};
147
148enum bnx2x_vfop_flr_state {
149 BNX2X_VFOP_FLR_QUEUES,
150 BNX2X_VFOP_FLR_HW
151};
141 152
142enum bnx2x_vfop_close_state { 153enum bnx2x_vfop_close_state {
143 BNX2X_VFOP_CLOSE_QUEUES, 154 BNX2X_VFOP_CLOSE_QUEUES,
@@ -973,6 +984,94 @@ int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
973 return -ENOMEM; 984 return -ENOMEM;
974} 985}
975 986
987/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
988static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
989{
990 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
991 int qid = vfop->args.qx.qid;
992 enum bnx2x_vfop_qflr_state state = vfop->state;
993 struct bnx2x_queue_state_params *qstate;
994 struct bnx2x_vfop_cmd cmd;
995
996 bnx2x_vfop_reset_wq(vf);
997
998 if (vfop->rc < 0)
999 goto op_err;
1000
1001 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1002
1003 cmd.done = bnx2x_vfop_qflr;
1004 cmd.block = false;
1005
1006 switch (state) {
1007 case BNX2X_VFOP_QFLR_CLR_VLAN:
1008 /* vlan-clear-all: driver-only, don't consume credit */
1009 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1010 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1011 if (vfop->rc)
1012 goto op_err;
1013 return;
1014
1015 case BNX2X_VFOP_QFLR_CLR_MAC:
1016 /* mac-clear-all: driver only consume credit */
1017 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1018 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1019 DP(BNX2X_MSG_IOV,
1020 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1021 vf->abs_vfid, vfop->rc);
1022 if (vfop->rc)
1023 goto op_err;
1024 return;
1025
1026 case BNX2X_VFOP_QFLR_TERMINATE:
1027 qstate = &vfop->op_p->qctor.qstate;
1028 memset(qstate , 0, sizeof(*qstate));
1029 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1030 vfop->state = BNX2X_VFOP_QFLR_DONE;
1031
1032 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1033 vf->abs_vfid, qstate->q_obj->state);
1034
1035 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1036 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1037 qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1038 vfop->rc = bnx2x_queue_state_change(bp, qstate);
1039 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1040 } else {
1041 goto op_done;
1042 }
1043
1044op_err:
1045 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1046 vf->abs_vfid, qid, vfop->rc);
1047op_done:
1048 case BNX2X_VFOP_QFLR_DONE:
1049 bnx2x_vfop_end(bp, vf, vfop);
1050 return;
1051 default:
1052 bnx2x_vfop_default(state);
1053 }
1054op_pending:
1055 return;
1056}
1057
1058static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1059 struct bnx2x_virtf *vf,
1060 struct bnx2x_vfop_cmd *cmd,
1061 int qid)
1062{
1063 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1064
1065 if (vfop) {
1066 vfop->args.qx.qid = qid;
1067 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1068 bnx2x_vfop_qflr, cmd->done);
1069 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1070 cmd->block);
1071 }
1072 return -ENOMEM;
1073}
1074
976/* VFOP multi-casts */ 1075/* VFOP multi-casts */
977static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1076static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
978{ 1077{
@@ -1430,6 +1529,201 @@ static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1430 vf->state = VF_FREE; 1529 vf->state = VF_FREE;
1431} 1530}
1432 1531
1532static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1533{
1534 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1535
1536 /* DQ usage counter */
1537 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1538 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1539 "DQ VF usage counter timed out",
1540 poll_cnt);
1541 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1542
1543 /* FW cleanup command - poll for the results */
1544 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1545 poll_cnt))
1546 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1547
1548 /* verify TX hw is flushed */
1549 bnx2x_tx_hw_flushed(bp, poll_cnt);
1550}
1551
1552static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1553{
1554 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1555 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1556 enum bnx2x_vfop_flr_state state = vfop->state;
1557 struct bnx2x_vfop_cmd cmd = {
1558 .done = bnx2x_vfop_flr,
1559 .block = false,
1560 };
1561
1562 if (vfop->rc < 0)
1563 goto op_err;
1564
1565 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1566
1567 switch (state) {
1568 case BNX2X_VFOP_FLR_QUEUES:
1569 /* the cleanup operations are valid if and only if the VF
1570 * was first acquired.
1571 */
1572 if (++(qx->qid) < vf_rxq_count(vf)) {
1573 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1574 qx->qid);
1575 if (vfop->rc)
1576 goto op_err;
1577 return;
1578 }
1579 /* remove multicasts */
1580 vfop->state = BNX2X_VFOP_FLR_HW;
1581 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1582 0, true);
1583 if (vfop->rc)
1584 goto op_err;
1585 return;
1586 case BNX2X_VFOP_FLR_HW:
1587
1588 /* dispatch final cleanup and wait for HW queues to flush */
1589 bnx2x_vf_flr_clnup_hw(bp, vf);
1590
1591 /* release VF resources */
1592 bnx2x_vf_free_resc(bp, vf);
1593
1594 /* re-open the mailbox */
1595 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1596
1597 goto op_done;
1598 default:
1599 bnx2x_vfop_default(state);
1600 }
1601op_err:
1602 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1603op_done:
1604 vf->flr_clnup_stage = VF_FLR_ACK;
1605 bnx2x_vfop_end(bp, vf, vfop);
1606 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1607}
1608
1609static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1610 struct bnx2x_virtf *vf,
1611 vfop_handler_t done)
1612{
1613 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1614 if (vfop) {
1615 vfop->args.qx.qid = -1; /* loop */
1616 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1617 bnx2x_vfop_flr, done);
1618 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1619 }
1620 return -ENOMEM;
1621}
1622
1623static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1624{
1625 int i = prev_vf ? prev_vf->index + 1 : 0;
1626 struct bnx2x_virtf *vf;
1627
1628 /* find next VF to cleanup */
1629next_vf_to_clean:
1630 for (;
1631 i < BNX2X_NR_VIRTFN(bp) &&
1632 (bnx2x_vf(bp, i, state) != VF_RESET ||
1633 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1634 i++)
1635 ;
1636
1637 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
1638 BNX2X_NR_VIRTFN(bp));
1639
1640 if (i < BNX2X_NR_VIRTFN(bp)) {
1641 vf = BP_VF(bp, i);
1642
1643 /* lock the vf pf channel */
1644 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1645
1646 /* invoke the VF FLR SM */
1647 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1648 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1649 vf->abs_vfid);
1650
1651 /* mark the VF to be ACKED and continue */
1652 vf->flr_clnup_stage = VF_FLR_ACK;
1653 goto next_vf_to_clean;
1654 }
1655 return;
1656 }
1657
1658 /* we are done, update vf records */
1659 for_each_vf(bp, i) {
1660 vf = BP_VF(bp, i);
1661
1662 if (vf->flr_clnup_stage != VF_FLR_ACK)
1663 continue;
1664
1665 vf->flr_clnup_stage = VF_FLR_EPILOG;
1666 }
1667
1668 /* Acknowledge the handled VFs.
1669 * we are acknowledge all the vfs which an flr was requested for, even
1670 * if amongst them there are such that we never opened, since the mcp
1671 * will interrupt us immediately again if we only ack some of the bits,
1672 * resulting in an endless loop. This can happen for example in KVM
1673 * where an 'all ones' flr request is sometimes given by hyper visor
1674 */
1675 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1676 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1677 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1678 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1679 bp->vfdb->flrd_vfs[i]);
1680
1681 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1682
1683 /* clear the acked bits - better yet if the MCP implemented
1684 * write to clear semantics
1685 */
1686 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1687 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1688}
1689
1690void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1691{
1692 int i;
1693
1694 /* Read FLR'd VFs */
1695 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1696 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1697
1698 DP(BNX2X_MSG_MCP,
1699 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1700 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1701
1702 for_each_vf(bp, i) {
1703 struct bnx2x_virtf *vf = BP_VF(bp, i);
1704 u32 reset = 0;
1705
1706 if (vf->abs_vfid < 32)
1707 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1708 else
1709 reset = bp->vfdb->flrd_vfs[1] &
1710 (1 << (vf->abs_vfid - 32));
1711
1712 if (reset) {
1713 /* set as reset and ready for cleanup */
1714 vf->state = VF_RESET;
1715 vf->flr_clnup_stage = VF_FLR_CLN;
1716
1717 DP(BNX2X_MSG_IOV,
1718 "Initiating Final cleanup for VF %d\n",
1719 vf->abs_vfid);
1720 }
1721 }
1722
1723 /* do the FLR cleanup for all marked VFs*/
1724 bnx2x_vf_flr_clnup(bp, NULL);
1725}
1726
1433/* IOV global initialization routines */ 1727/* IOV global initialization routines */
1434void bnx2x_iov_init_dq(struct bnx2x *bp) 1728void bnx2x_iov_init_dq(struct bnx2x *bp)
1435{ 1729{