aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ocrdma/ocrdma_hw.c')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c312
1 files changed, 271 insertions, 41 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 638bff1ffc6c..0c9e95909a64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
734 break; 734 break;
735 } 735 }
736 736
737 if (type < OCRDMA_MAX_ASYNC_ERRORS)
738 atomic_inc(&dev->async_err_stats[type]);
739
737 if (qp_event) { 740 if (qp_event) {
738 if (qp->ibqp.event_handler) 741 if (qp->ibqp.event_handler)
739 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
831 return 0; 834 return 0;
832} 835}
833 836
834static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 837static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
835 struct ocrdma_cq *cq) 838 struct ocrdma_cq *cq, bool sq)
836{ 839{
837 unsigned long flags;
838 struct ocrdma_qp *qp; 840 struct ocrdma_qp *qp;
839 bool buddy_cq_found = false; 841 struct list_head *cur;
840 /* Go through list of QPs in error state which are using this CQ 842 struct ocrdma_cq *bcq = NULL;
841 * and invoke its callback handler to trigger CQE processing for 843 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
842 * error/flushed CQE. It is rare to find more than few entries in 844
843 * this list as most consumers stops after getting error CQE. 845 list_for_each(cur, head) {
844 * List is traversed only once when a matching buddy cq found for a QP. 846 if (sq)
845 */ 847 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
846 spin_lock_irqsave(&dev->flush_q_lock, flags); 848 else
847 list_for_each_entry(qp, &cq->sq_head, sq_entry) { 849 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
850
848 if (qp->srq) 851 if (qp->srq)
849 continue; 852 continue;
850 /* if wq and rq share the same cq, than comp_handler 853 /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
856 * if completion came on rq, sq's cq is buddy cq. 859 * if completion came on rq, sq's cq is buddy cq.
857 */ 860 */
858 if (qp->sq_cq == cq) 861 if (qp->sq_cq == cq)
859 cq = qp->rq_cq; 862 bcq = qp->rq_cq;
860 else 863 else
861 cq = qp->sq_cq; 864 bcq = qp->sq_cq;
862 buddy_cq_found = true; 865 return bcq;
863 break;
864 } 866 }
867 return NULL;
868}
869
870static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
871 struct ocrdma_cq *cq)
872{
873 unsigned long flags;
874 struct ocrdma_cq *bcq = NULL;
875
876 /* Go through list of QPs in error state which are using this CQ
877 * and invoke its callback handler to trigger CQE processing for
878 * error/flushed CQE. It is rare to find more than few entries in
879 * this list as most consumers stops after getting error CQE.
880 * List is traversed only once when a matching buddy cq found for a QP.
881 */
882 spin_lock_irqsave(&dev->flush_q_lock, flags);
883 /* Check if buddy CQ is present.
884 * true - Check for SQ CQ
885 * false - Check for RQ CQ
886 */
887 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
888 if (bcq == NULL)
889 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
865 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 890 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
866 if (buddy_cq_found == false) 891
867 return; 892 /* if there is valid buddy cq, look for its completion handler */
868 if (cq->ibcq.comp_handler) { 893 if (bcq && bcq->ibcq.comp_handler) {
869 spin_lock_irqsave(&cq->comp_handler_lock, flags); 894 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
870 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
871 spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 896 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
872 } 897 }
873} 898}
874 899
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
935 960
936 } while (budget); 961 } while (budget);
937 962
963 eq->aic_obj.eq_intr_cnt++;
938 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 964 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
939 return IRQ_HANDLED; 965 return IRQ_HANDLED;
940} 966}
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1050 attr->max_pd = 1076 attr->max_pd =
1051 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 1077 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1052 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 1078 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1079 attr->max_dpp_pds =
1080 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1081 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1053 attr->max_qp = 1082 attr->max_qp =
1054 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 1083 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1055 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; 1084 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1396 return status; 1425 return status;
1397} 1426}
1398 1427
1428
1429static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1430{
1431 int status = -ENOMEM;
1432 size_t pd_bitmap_size;
1433 struct ocrdma_alloc_pd_range *cmd;
1434 struct ocrdma_alloc_pd_range_rsp *rsp;
1435
1436 /* Pre allocate the DPP PDs */
1437 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1438 if (!cmd)
1439 return -ENOMEM;
1440 cmd->pd_count = dev->attr.max_dpp_pds;
1441 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1443 if (status)
1444 goto mbx_err;
1445 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1446
1447 if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
1448 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1449 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1450 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1451 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1452 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1453 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1454 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1455 GFP_KERNEL);
1456 }
1457 kfree(cmd);
1458
1459 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1460 if (!cmd)
1461 return -ENOMEM;
1462
1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1465 if (status)
1466 goto mbx_err;
1467 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1468 if (rsp->pd_count) {
1469 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1470 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1471 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1472 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1474 GFP_KERNEL);
1475 }
1476
1477 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1478 /* Enable PD resource manager */
1479 dev->pd_mgr->pd_prealloc_valid = true;
1480 } else {
1481 return -ENOMEM;
1482 }
1483mbx_err:
1484 kfree(cmd);
1485 return status;
1486}
1487
1488static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1489{
1490 struct ocrdma_dealloc_pd_range *cmd;
1491
1492 /* return normal PDs to firmware */
1493 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1494 if (!cmd)
1495 goto mbx_err;
1496
1497 if (dev->pd_mgr->max_normal_pd) {
1498 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1499 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1500 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501 }
1502
1503 if (dev->pd_mgr->max_dpp_pd) {
1504 kfree(cmd);
1505 /* return DPP PDs to firmware */
1506 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1507 sizeof(*cmd));
1508 if (!cmd)
1509 goto mbx_err;
1510
1511 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1512 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1513 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1514 }
1515mbx_err:
1516 kfree(cmd);
1517}
1518
1519void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1520{
1521 int status;
1522
1523 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1524 GFP_KERNEL);
1525 if (!dev->pd_mgr) {
1526 pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
1527 return;
1528 }
1529 status = ocrdma_mbx_alloc_pd_range(dev);
1530 if (status) {
1531 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1532 __func__, dev->id);
1533 }
1534}
1535
1536static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1537{
1538 ocrdma_mbx_dealloc_pd_range(dev);
1539 kfree(dev->pd_mgr->pd_norm_bitmap);
1540 kfree(dev->pd_mgr->pd_dpp_bitmap);
1541 kfree(dev->pd_mgr);
1542}
1543
1399static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, 1544static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1400 int *num_pages, int *page_size) 1545 int *num_pages, int *page_size)
1401{ 1546{
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1896{ 2041{
1897 bool found; 2042 bool found;
1898 unsigned long flags; 2043 unsigned long flags;
2044 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1899 2045
1900 spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 2046 spin_lock_irqsave(&dev->flush_q_lock, flags);
1901 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 2047 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1902 if (!found) 2048 if (!found)
1903 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); 2049 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
1906 if (!found) 2052 if (!found)
1907 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 2053 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1908 } 2054 }
1909 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 2055 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1910} 2056}
1911 2057
1912static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) 2058static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1972 int status; 2118 int status;
1973 u32 len, hw_pages, hw_page_size; 2119 u32 len, hw_pages, hw_page_size;
1974 dma_addr_t pa; 2120 dma_addr_t pa;
1975 struct ocrdma_dev *dev = qp->dev; 2121 struct ocrdma_pd *pd = qp->pd;
2122 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1976 struct pci_dev *pdev = dev->nic_info.pdev; 2123 struct pci_dev *pdev = dev->nic_info.pdev;
1977 u32 max_wqe_allocated; 2124 u32 max_wqe_allocated;
1978 u32 max_sges = attrs->cap.max_send_sge; 2125 u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2027 int status; 2174 int status;
2028 u32 len, hw_pages, hw_page_size; 2175 u32 len, hw_pages, hw_page_size;
2029 dma_addr_t pa = 0; 2176 dma_addr_t pa = 0;
2030 struct ocrdma_dev *dev = qp->dev; 2177 struct ocrdma_pd *pd = qp->pd;
2178 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2031 struct pci_dev *pdev = dev->nic_info.pdev; 2179 struct pci_dev *pdev = dev->nic_info.pdev;
2032 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 2180 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2033 2181
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2086static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 2234static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2087 struct ocrdma_qp *qp) 2235 struct ocrdma_qp *qp)
2088{ 2236{
2089 struct ocrdma_dev *dev = qp->dev; 2237 struct ocrdma_pd *pd = qp->pd;
2238 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2090 struct pci_dev *pdev = dev->nic_info.pdev; 2239 struct pci_dev *pdev = dev->nic_info.pdev;
2091 dma_addr_t pa = 0; 2240 dma_addr_t pa = 0;
2092 int ird_page_size = dev->attr.ird_page_size; 2241 int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2157{ 2306{
2158 int status = -ENOMEM; 2307 int status = -ENOMEM;
2159 u32 flags = 0; 2308 u32 flags = 0;
2160 struct ocrdma_dev *dev = qp->dev;
2161 struct ocrdma_pd *pd = qp->pd; 2309 struct ocrdma_pd *pd = qp->pd;
2310 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2162 struct pci_dev *pdev = dev->nic_info.pdev; 2311 struct pci_dev *pdev = dev->nic_info.pdev;
2163 struct ocrdma_cq *cq; 2312 struct ocrdma_cq *cq;
2164 struct ocrdma_create_qp_req *cmd; 2313 struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2281 union ib_gid sgid, zgid; 2430 union ib_gid sgid, zgid;
2282 u32 vlan_id; 2431 u32 vlan_id;
2283 u8 mac_addr[6]; 2432 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2284 2434
2285 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2435 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2286 return -EINVAL; 2436 return -EINVAL;
2287 if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) 2437 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2288 ocrdma_init_service_level(qp->dev); 2438 ocrdma_init_service_level(dev);
2289 cmd->params.tclass_sq_psn |= 2439 cmd->params.tclass_sq_psn |=
2290 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2440 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2291 cmd->params.rnt_rc_sl_fl |= 2441 cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2296 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2446 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2297 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2447 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2298 sizeof(cmd->params.dgid)); 2448 sizeof(cmd->params.dgid));
2299 status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2449 status = ocrdma_query_gid(&dev->ibdev, 1,
2300 ah_attr->grh.sgid_index, &sgid); 2450 ah_attr->grh.sgid_index, &sgid);
2301 if (status) 2451 if (status)
2302 return status; 2452 return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2307 2457
2308 qp->sgid_idx = ah_attr->grh.sgid_index; 2458 qp->sgid_idx = ah_attr->grh.sgid_index;
2309 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2459 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2310 ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); 2460 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2461 if (status)
2462 return status;
2311 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2463 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2312 (mac_addr[2] << 16) | (mac_addr[3] << 24); 2464 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2313 /* convert them to LE format. */ 2465 /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2320 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2321 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2322 cmd->params.rnt_rc_sl_fl |= 2474 cmd->params.rnt_rc_sl_fl |=
2323 (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2324 } 2476 }
2325 return 0; 2477 return 0;
2326} 2478}
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2330 struct ib_qp_attr *attrs, int attr_mask) 2482 struct ib_qp_attr *attrs, int attr_mask)
2331{ 2483{
2332 int status = 0; 2484 int status = 0;
2485 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2333 2486
2334 if (attr_mask & IB_QP_PKEY_INDEX) { 2487 if (attr_mask & IB_QP_PKEY_INDEX) {
2335 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & 2488 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2347 return status; 2500 return status;
2348 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2501 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2349 /* set the default mac address for UD, GSI QPs */ 2502 /* set the default mac address for UD, GSI QPs */
2350 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2503 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2351 (qp->dev->nic_info.mac_addr[1] << 8) | 2504 (dev->nic_info.mac_addr[1] << 8) |
2352 (qp->dev->nic_info.mac_addr[2] << 16) | 2505 (dev->nic_info.mac_addr[2] << 16) |
2353 (qp->dev->nic_info.mac_addr[3] << 24); 2506 (dev->nic_info.mac_addr[3] << 24);
2354 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2507 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2355 (qp->dev->nic_info.mac_addr[5] << 8); 2508 (dev->nic_info.mac_addr[5] << 8);
2356 } 2509 }
2357 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2510 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2358 attrs->en_sqd_async_notify) { 2511 attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2409 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2562 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2410 } 2563 }
2411 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2564 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2412 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2565 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2413 status = -EINVAL; 2566 status = -EINVAL;
2414 goto pmtu_err; 2567 goto pmtu_err;
2415 } 2568 }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2417 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2570 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2418 } 2571 }
2419 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2572 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2420 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2573 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2421 status = -EINVAL; 2574 status = -EINVAL;
2422 goto pmtu_err; 2575 goto pmtu_err;
2423 } 2576 }
@@ -2870,6 +3023,82 @@ done:
2870 return status; 3023 return status;
2871} 3024}
2872 3025
3026static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3027 int num)
3028{
3029 int i, status = -ENOMEM;
3030 struct ocrdma_modify_eqd_req *cmd;
3031
3032 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3033 if (!cmd)
3034 return status;
3035
3036 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3037 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3038
3039 cmd->cmd.num_eq = num;
3040 for (i = 0; i < num; i++) {
3041 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3042 cmd->cmd.set_eqd[i].phase = 0;
3043 cmd->cmd.set_eqd[i].delay_multiplier =
3044 (eq[i].aic_obj.prev_eqd * 65)/100;
3045 }
3046 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3047 if (status)
3048 goto mbx_err;
3049mbx_err:
3050 kfree(cmd);
3051 return status;
3052}
3053
3054static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3055 int num)
3056{
3057 int num_eqs, i = 0;
3058 if (num > 8) {
3059 while (num) {
3060 num_eqs = min(num, 8);
3061 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3062 i += num_eqs;
3063 num -= num_eqs;
3064 }
3065 } else {
3066 ocrdma_mbx_modify_eqd(dev, eq, num);
3067 }
3068 return 0;
3069}
3070
3071void ocrdma_eqd_set_task(struct work_struct *work)
3072{
3073 struct ocrdma_dev *dev =
3074 container_of(work, struct ocrdma_dev, eqd_work.work);
3075 struct ocrdma_eq *eq = 0;
3076 int i, num = 0, status = -EINVAL;
3077 u64 eq_intr;
3078
3079 for (i = 0; i < dev->eq_cnt; i++) {
3080 eq = &dev->eq_tbl[i];
3081 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3082 eq_intr = eq->aic_obj.eq_intr_cnt -
3083 eq->aic_obj.prev_eq_intr_cnt;
3084 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3085 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3086 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3087 num++;
3088 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3089 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3090 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3091 num++;
3092 }
3093 }
3094 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3095 }
3096
3097 if (num)
3098 status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3099 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3100}
3101
2873int ocrdma_init_hw(struct ocrdma_dev *dev) 3102int ocrdma_init_hw(struct ocrdma_dev *dev)
2874{ 3103{
2875 int status; 3104 int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
2915 3144
2916void ocrdma_cleanup_hw(struct ocrdma_dev *dev) 3145void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2917{ 3146{
3147 ocrdma_free_pd_pool(dev);
2918 ocrdma_mbx_delete_ah_tbl(dev); 3148 ocrdma_mbx_delete_ah_tbl(dev);
2919 3149
2920 /* cleanup the eqs */ 3150 /* cleanup the eqs */