aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
diff options
context:
space:
mode:
authorAriel Elior <ariele@broadcom.com>2013-01-01 00:22:34 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-02 04:45:06 -0500
commit8ca5e17e58c953b9a9dbd4974c554b25c6d70b1a (patch)
treebfb964684236920270f74b165c69b55eebd90def /drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
parentfd1fc79dd6deb88ebf38ae9673190da999b3209f (diff)
bnx2x: Support of PF driver of a VF acquire request
When a VF is probed by the VF driver, the VF driver sends an 'acquire' request over the VF <-> PF channel for the resources it needs to operate (interrupts, queues, etc). The PF driver either ratifies the request and allocates the resources, responds with the maximum values it will allow the VF to acquire, or fails the request entirely if there is a problem. Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c199
1 files changed, 199 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9233117ea506..de42f665c1fa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -593,6 +593,63 @@ alloc_mem_err:
593 return -ENOMEM; 593 return -ENOMEM;
594} 594}
595 595
596static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
597 struct bnx2x_vf_queue *q)
598{
599 u8 cl_id = vfq_cl_id(vf, q);
600 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
601 unsigned long q_type = 0;
602
603 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
604 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
605
606 /* Queue State object */
607 bnx2x_init_queue_obj(bp, &q->sp_obj,
608 cl_id, &q->cid, 1, func_id,
609 bnx2x_vf_sp(bp, vf, q_data),
610 bnx2x_vf_sp_map(bp, vf, q_data),
611 q_type);
612
613 DP(BNX2X_MSG_IOV,
614 "initialized vf %d's queue object. func id set to %d\n",
615 vf->abs_vfid, q->sp_obj.func_id);
616
617 /* mac/vlan objects are per queue, but only those
618 * that belong to the leading queue are initialized
619 */
620 if (vfq_is_leading(q)) {
621 /* mac */
622 bnx2x_init_mac_obj(bp, &q->mac_obj,
623 cl_id, q->cid, func_id,
624 bnx2x_vf_sp(bp, vf, mac_rdata),
625 bnx2x_vf_sp_map(bp, vf, mac_rdata),
626 BNX2X_FILTER_MAC_PENDING,
627 &vf->filter_state,
628 BNX2X_OBJ_TYPE_RX_TX,
629 &bp->macs_pool);
630 /* vlan */
631 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
632 cl_id, q->cid, func_id,
633 bnx2x_vf_sp(bp, vf, vlan_rdata),
634 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
635 BNX2X_FILTER_VLAN_PENDING,
636 &vf->filter_state,
637 BNX2X_OBJ_TYPE_RX_TX,
638 &bp->vlans_pool);
639
640 /* mcast */
641 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
642 q->cid, func_id, func_id,
643 bnx2x_vf_sp(bp, vf, mcast_rdata),
644 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
645 BNX2X_FILTER_MCAST_PENDING,
646 &vf->filter_state,
647 BNX2X_OBJ_TYPE_RX_TX);
648
649 vf->leading_rss = cl_id;
650 }
651}
652
596/* called by bnx2x_nic_load */ 653/* called by bnx2x_nic_load */
597int bnx2x_iov_nic_init(struct bnx2x *bp) 654int bnx2x_iov_nic_init(struct bnx2x *bp)
598{ 655{
@@ -940,3 +997,145 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
940 } 997 }
941 } 998 }
942} 999}
1000
1001u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1002{
1003 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1004 BNX2X_VF_MAX_QUEUES);
1005}
1006
1007static
1008int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1009 struct vf_pf_resc_request *req_resc)
1010{
1011 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1012 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1013
1014 return ((req_resc->num_rxqs <= rxq_cnt) &&
1015 (req_resc->num_txqs <= txq_cnt) &&
1016 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1017 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1018 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1019}
1020
1021/* CORE VF API */
1022int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1023 struct vf_pf_resc_request *resc)
1024{
1025 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1026 BNX2X_CIDS_PER_VF;
1027
1028 union cdu_context *base_cxt = (union cdu_context *)
1029 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1030 (base_vf_cid & (ILT_PAGE_CIDS-1));
1031 int i;
1032
1033 /* if state is 'acquired' the VF was not released or FLR'd, in
1034 * this case the returned resources match the acquired already
1035 * acquired resources. Verify that the requested numbers do
1036 * not exceed the already acquired numbers.
1037 */
1038 if (vf->state == VF_ACQUIRED) {
1039 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
1040 vf->abs_vfid);
1041
1042 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1043 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
1044 vf->abs_vfid);
1045 return -EINVAL;
1046 }
1047 return 0;
1048 }
1049
1050 /* Otherwise vf state must be 'free' or 'reset' */
1051 if (vf->state != VF_FREE && vf->state != VF_RESET) {
1052 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
1053 vf->abs_vfid, vf->state);
1054 return -EINVAL;
1055 }
1056
1057 /* static allocation:
1058 * the global maximum number are fixed per VF. fail the request if
1059 * requested number exceed these globals
1060 */
1061 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1062 DP(BNX2X_MSG_IOV,
1063 "cannot fulfill vf resource request. Placing maximal available values in response\n");
1064 /* set the max resource in the vf */
1065 return -ENOMEM;
1066 }
1067
1068 /* Set resources counters - 0 request means max available */
1069 vf_sb_count(vf) = resc->num_sbs;
1070 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1071 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1072 if (resc->num_mac_filters)
1073 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1074 if (resc->num_vlan_filters)
1075 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
1076
1077 DP(BNX2X_MSG_IOV,
1078 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1079 vf_sb_count(vf), vf_rxq_count(vf),
1080 vf_txq_count(vf), vf_mac_rules_cnt(vf),
1081 vf_vlan_rules_cnt(vf));
1082
1083 /* Initialize the queues */
1084 if (!vf->vfqs) {
1085 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
1086 return -EINVAL;
1087 }
1088
1089 for_each_vfq(vf, i) {
1090 struct bnx2x_vf_queue *q = vfq_get(vf, i);
1091
1092 if (!q) {
1093 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
1094 return -EINVAL;
1095 }
1096
1097 q->index = i;
1098 q->cxt = &((base_cxt + i)->eth);
1099 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
1100
1101 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
1102 vf->abs_vfid, i, q->index, q->cid, q->cxt);
1103
1104 /* init SP objects */
1105 bnx2x_vfq_init(bp, vf, q);
1106 }
1107 vf->state = VF_ACQUIRED;
1108 return 0;
1109}
1110
1111void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1112 enum channel_tlvs tlv)
1113{
1114 /* lock the channel */
1115 mutex_lock(&vf->op_mutex);
1116
1117 /* record the locking op */
1118 vf->op_current = tlv;
1119
1120 /* log the lock */
1121 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
1122 vf->abs_vfid, tlv);
1123}
1124
1125void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1126 enum channel_tlvs expected_tlv)
1127{
1128 WARN(expected_tlv != vf->op_current,
1129 "lock mismatch: expected %d found %d", expected_tlv,
1130 vf->op_current);
1131
1132 /* lock the channel */
1133 mutex_unlock(&vf->op_mutex);
1134
1135 /* log the unlock */
1136 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
1137 vf->abs_vfid, vf->op_current);
1138
1139 /* record the locking op */
1140 vf->op_current = CHANNEL_TLV_NONE;
1141}