diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_rdma.c')
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_rdma.c | 243 |
1 files changed, 212 insertions, 31 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index a411f9c702a1..7873d6dfd91f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c | |||
| @@ -134,28 +134,40 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap) | |||
| 134 | return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); | 134 | return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) | 137 | static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) |
| 138 | { | 138 | { |
| 139 | /* First sb id for RoCE is after all the l2 sb */ | 139 | /* First sb id for RoCE is after all the l2 sb */ |
| 140 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; | 140 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | 143 | int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) |
| 144 | struct qed_ptt *p_ptt, | ||
| 145 | struct qed_rdma_start_in_params *params) | ||
| 146 | { | 144 | { |
| 147 | struct qed_rdma_info *p_rdma_info; | 145 | struct qed_rdma_info *p_rdma_info; |
| 148 | u32 num_cons, num_tasks; | ||
| 149 | int rc = -ENOMEM; | ||
| 150 | 146 | ||
| 151 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); | ||
| 152 | |||
| 153 | /* Allocate a struct with current pf rdma info */ | ||
| 154 | p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); | 147 | p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); |
| 155 | if (!p_rdma_info) | 148 | if (!p_rdma_info) |
| 156 | return rc; | 149 | return -ENOMEM; |
| 150 | |||
| 151 | spin_lock_init(&p_rdma_info->lock); | ||
| 157 | 152 | ||
| 158 | p_hwfn->p_rdma_info = p_rdma_info; | 153 | p_hwfn->p_rdma_info = p_rdma_info; |
| 154 | return 0; | ||
| 155 | } | ||
| 156 | |||
| 157 | void qed_rdma_info_free(struct qed_hwfn *p_hwfn) | ||
| 158 | { | ||
| 159 | kfree(p_hwfn->p_rdma_info); | ||
| 160 | p_hwfn->p_rdma_info = NULL; | ||
| 161 | } | ||
| 162 | |||
| 163 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) | ||
| 164 | { | ||
| 165 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | ||
| 166 | u32 num_cons, num_tasks; | ||
| 167 | int rc = -ENOMEM; | ||
| 168 | |||
| 169 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); | ||
| 170 | |||
| 159 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) | 171 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
| 160 | p_rdma_info->proto = PROTOCOLID_IWARP; | 172 | p_rdma_info->proto = PROTOCOLID_IWARP; |
| 161 | else | 173 | else |
| @@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | |||
| 183 | /* Allocate a struct with device params and fill it */ | 195 | /* Allocate a struct with device params and fill it */ |
| 184 | p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); | 196 | p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); |
| 185 | if (!p_rdma_info->dev) | 197 | if (!p_rdma_info->dev) |
| 186 | goto free_rdma_info; | 198 | return rc; |
| 187 | 199 | ||
| 188 | /* Allocate a struct with port params and fill it */ | 200 | /* Allocate a struct with port params and fill it */ |
| 189 | p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); | 201 | p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); |
| @@ -228,7 +240,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | |||
| 228 | num_cons, "Toggle"); | 240 | num_cons, "Toggle"); |
| 229 | if (rc) { | 241 | if (rc) { |
| 230 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | 242 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
| 231 | "Failed to allocate toogle bits, rc = %d\n", rc); | 243 | "Failed to allocate toggle bits, rc = %d\n", rc); |
| 232 | goto free_cq_map; | 244 | goto free_cq_map; |
| 233 | } | 245 | } |
| 234 | 246 | ||
| @@ -259,15 +271,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | |||
| 259 | goto free_cid_map; | 271 | goto free_cid_map; |
| 260 | } | 272 | } |
| 261 | 273 | ||
| 274 | /* Allocate bitmap for srqs */ | ||
| 275 | p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); | ||
| 276 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, | ||
| 277 | p_rdma_info->num_srqs, "SRQ"); | ||
| 278 | if (rc) { | ||
| 279 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
| 280 | "Failed to allocate srq bitmap, rc = %d\n", rc); | ||
| 281 | goto free_real_cid_map; | ||
| 282 | } | ||
| 283 | |||
| 262 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) | 284 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
| 263 | rc = qed_iwarp_alloc(p_hwfn); | 285 | rc = qed_iwarp_alloc(p_hwfn); |
| 264 | 286 | ||
| 265 | if (rc) | 287 | if (rc) |
| 266 | goto free_cid_map; | 288 | goto free_srq_map; |
| 267 | 289 | ||
| 268 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); | 290 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); |
| 269 | return 0; | 291 | return 0; |
| 270 | 292 | ||
| 293 | free_srq_map: | ||
| 294 | kfree(p_rdma_info->srq_map.bitmap); | ||
| 295 | free_real_cid_map: | ||
| 296 | kfree(p_rdma_info->real_cid_map.bitmap); | ||
| 271 | free_cid_map: | 297 | free_cid_map: |
| 272 | kfree(p_rdma_info->cid_map.bitmap); | 298 | kfree(p_rdma_info->cid_map.bitmap); |
| 273 | free_tid_map: | 299 | free_tid_map: |
| @@ -284,8 +310,6 @@ free_rdma_port: | |||
| 284 | kfree(p_rdma_info->port); | 310 | kfree(p_rdma_info->port); |
| 285 | free_rdma_dev: | 311 | free_rdma_dev: |
| 286 | kfree(p_rdma_info->dev); | 312 | kfree(p_rdma_info->dev); |
| 287 | free_rdma_info: | ||
| 288 | kfree(p_rdma_info); | ||
| 289 | 313 | ||
| 290 | return rc; | 314 | return rc; |
| 291 | } | 315 | } |
| @@ -351,11 +375,11 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) | |||
| 351 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); | 375 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); |
| 352 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); | 376 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); |
| 353 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); | 377 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); |
| 378 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); | ||
| 379 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); | ||
| 354 | 380 | ||
| 355 | kfree(p_rdma_info->port); | 381 | kfree(p_rdma_info->port); |
| 356 | kfree(p_rdma_info->dev); | 382 | kfree(p_rdma_info->dev); |
| 357 | |||
| 358 | kfree(p_rdma_info); | ||
| 359 | } | 383 | } |
| 360 | 384 | ||
| 361 | static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) | 385 | static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) |
| @@ -431,6 +455,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, | |||
| 431 | if (cdev->rdma_max_sge) | 455 | if (cdev->rdma_max_sge) |
| 432 | dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); | 456 | dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); |
| 433 | 457 | ||
| 458 | dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; | ||
| 459 | if (p_hwfn->cdev->rdma_max_srq_sge) { | ||
| 460 | dev->max_srq_sge = min_t(u32, | ||
| 461 | p_hwfn->cdev->rdma_max_srq_sge, | ||
| 462 | dev->max_srq_sge); | ||
| 463 | } | ||
| 434 | dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; | 464 | dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; |
| 435 | 465 | ||
| 436 | dev->max_inline = (cdev->rdma_max_inline) ? | 466 | dev->max_inline = (cdev->rdma_max_inline) ? |
| @@ -474,6 +504,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, | |||
| 474 | dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; | 504 | dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; |
| 475 | dev->max_pkey = QED_RDMA_MAX_P_KEY; | 505 | dev->max_pkey = QED_RDMA_MAX_P_KEY; |
| 476 | 506 | ||
| 507 | dev->max_srq = p_hwfn->p_rdma_info->num_srqs; | ||
| 508 | dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; | ||
| 477 | dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / | 509 | dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / |
| 478 | (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); | 510 | (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); |
| 479 | dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / | 511 | dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / |
| @@ -655,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, | |||
| 655 | 687 | ||
| 656 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); | 688 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); |
| 657 | 689 | ||
| 658 | spin_lock_init(&p_hwfn->p_rdma_info->lock); | ||
| 659 | |||
| 660 | qed_rdma_init_devinfo(p_hwfn, params); | 690 | qed_rdma_init_devinfo(p_hwfn, params); |
| 661 | qed_rdma_init_port(p_hwfn); | 691 | qed_rdma_init_port(p_hwfn); |
| 662 | qed_rdma_init_events(p_hwfn, params); | 692 | qed_rdma_init_events(p_hwfn, params); |
| @@ -682,7 +712,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, | |||
| 682 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); | 712 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); |
| 683 | } | 713 | } |
| 684 | 714 | ||
| 685 | int qed_rdma_stop(void *rdma_cxt) | 715 | static int qed_rdma_stop(void *rdma_cxt) |
| 686 | { | 716 | { |
| 687 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 717 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 688 | struct rdma_close_func_ramrod_data *p_ramrod; | 718 | struct rdma_close_func_ramrod_data *p_ramrod; |
| @@ -703,7 +733,7 @@ int qed_rdma_stop(void *rdma_cxt) | |||
| 703 | /* Disable RoCE search */ | 733 | /* Disable RoCE search */ |
| 704 | qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); | 734 | qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); |
| 705 | p_hwfn->b_rdma_enabled_in_prs = false; | 735 | p_hwfn->b_rdma_enabled_in_prs = false; |
| 706 | 736 | p_hwfn->p_rdma_info->active = 0; | |
| 707 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); | 737 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); |
| 708 | 738 | ||
| 709 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); | 739 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); |
| @@ -1212,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt, | |||
| 1212 | u8 max_stats_queues; | 1242 | u8 max_stats_queues; |
| 1213 | int rc; | 1243 | int rc; |
| 1214 | 1244 | ||
| 1215 | if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { | 1245 | if (!rdma_cxt || !in_params || !out_params || |
| 1246 | !p_hwfn->p_rdma_info->active) { | ||
| 1216 | DP_ERR(p_hwfn->cdev, | 1247 | DP_ERR(p_hwfn->cdev, |
| 1217 | "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", | 1248 | "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", |
| 1218 | rdma_cxt, in_params, out_params); | 1249 | rdma_cxt, in_params, out_params); |
| @@ -1484,15 +1515,13 @@ qed_rdma_register_tid(void *rdma_cxt, | |||
| 1484 | case QED_RDMA_TID_FMR: | 1515 | case QED_RDMA_TID_FMR: |
| 1485 | tid_type = RDMA_TID_FMR; | 1516 | tid_type = RDMA_TID_FMR; |
| 1486 | break; | 1517 | break; |
| 1487 | case QED_RDMA_TID_MW_TYPE1: | 1518 | case QED_RDMA_TID_MW: |
| 1488 | tid_type = RDMA_TID_MW_TYPE1; | 1519 | tid_type = RDMA_TID_MW; |
| 1489 | break; | ||
| 1490 | case QED_RDMA_TID_MW_TYPE2A: | ||
| 1491 | tid_type = RDMA_TID_MW_TYPE2A; | ||
| 1492 | break; | 1520 | break; |
| 1493 | default: | 1521 | default: |
| 1494 | rc = -EINVAL; | 1522 | rc = -EINVAL; |
| 1495 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | 1523 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); |
| 1524 | qed_sp_destroy_request(p_hwfn, p_ent); | ||
| 1496 | return rc; | 1525 | return rc; |
| 1497 | } | 1526 | } |
| 1498 | SET_FIELD(p_ramrod->flags1, | 1527 | SET_FIELD(p_ramrod->flags1, |
| @@ -1520,7 +1549,6 @@ qed_rdma_register_tid(void *rdma_cxt, | |||
| 1520 | RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); | 1549 | RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); |
| 1521 | DMA_REGPAIR_LE(p_ramrod->dif_error_addr, | 1550 | DMA_REGPAIR_LE(p_ramrod->dif_error_addr, |
| 1522 | params->dif_error_addr); | 1551 | params->dif_error_addr); |
| 1523 | DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); | ||
| 1524 | } | 1552 | } |
| 1525 | 1553 | ||
| 1526 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | 1554 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); |
| @@ -1628,12 +1656,161 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) | |||
| 1628 | return QED_LEADING_HWFN(cdev); | 1656 | return QED_LEADING_HWFN(cdev); |
| 1629 | } | 1657 | } |
| 1630 | 1658 | ||
| 1659 | static int qed_rdma_modify_srq(void *rdma_cxt, | ||
| 1660 | struct qed_rdma_modify_srq_in_params *in_params) | ||
| 1661 | { | ||
| 1662 | struct rdma_srq_modify_ramrod_data *p_ramrod; | ||
| 1663 | struct qed_sp_init_data init_data = {}; | ||
| 1664 | struct qed_hwfn *p_hwfn = rdma_cxt; | ||
| 1665 | struct qed_spq_entry *p_ent; | ||
| 1666 | u16 opaque_fid; | ||
| 1667 | int rc; | ||
| 1668 | |||
| 1669 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
| 1670 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
| 1671 | |||
| 1672 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
| 1673 | RDMA_RAMROD_MODIFY_SRQ, | ||
| 1674 | p_hwfn->p_rdma_info->proto, &init_data); | ||
| 1675 | if (rc) | ||
| 1676 | return rc; | ||
| 1677 | |||
| 1678 | p_ramrod = &p_ent->ramrod.rdma_modify_srq; | ||
| 1679 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); | ||
| 1680 | opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
| 1681 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | ||
| 1682 | p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); | ||
| 1683 | |||
| 1684 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | ||
| 1685 | if (rc) | ||
| 1686 | return rc; | ||
| 1687 | |||
| 1688 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", | ||
| 1689 | in_params->srq_id); | ||
| 1690 | |||
| 1691 | return rc; | ||
| 1692 | } | ||
| 1693 | |||
| 1694 | static int | ||
| 1695 | qed_rdma_destroy_srq(void *rdma_cxt, | ||
| 1696 | struct qed_rdma_destroy_srq_in_params *in_params) | ||
| 1697 | { | ||
| 1698 | struct rdma_srq_destroy_ramrod_data *p_ramrod; | ||
| 1699 | struct qed_sp_init_data init_data = {}; | ||
| 1700 | struct qed_hwfn *p_hwfn = rdma_cxt; | ||
| 1701 | struct qed_spq_entry *p_ent; | ||
| 1702 | struct qed_bmap *bmap; | ||
| 1703 | u16 opaque_fid; | ||
| 1704 | int rc; | ||
| 1705 | |||
| 1706 | opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
| 1707 | |||
| 1708 | init_data.opaque_fid = opaque_fid; | ||
| 1709 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
| 1710 | |||
| 1711 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
| 1712 | RDMA_RAMROD_DESTROY_SRQ, | ||
| 1713 | p_hwfn->p_rdma_info->proto, &init_data); | ||
| 1714 | if (rc) | ||
| 1715 | return rc; | ||
| 1716 | |||
| 1717 | p_ramrod = &p_ent->ramrod.rdma_destroy_srq; | ||
| 1718 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); | ||
| 1719 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | ||
| 1720 | |||
| 1721 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | ||
| 1722 | if (rc) | ||
| 1723 | return rc; | ||
| 1724 | |||
| 1725 | bmap = &p_hwfn->p_rdma_info->srq_map; | ||
| 1726 | |||
| 1727 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 1728 | qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); | ||
| 1729 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 1730 | |||
| 1731 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", | ||
| 1732 | in_params->srq_id); | ||
| 1733 | |||
| 1734 | return rc; | ||
| 1735 | } | ||
| 1736 | |||
| 1737 | static int | ||
| 1738 | qed_rdma_create_srq(void *rdma_cxt, | ||
| 1739 | struct qed_rdma_create_srq_in_params *in_params, | ||
| 1740 | struct qed_rdma_create_srq_out_params *out_params) | ||
| 1741 | { | ||
| 1742 | struct rdma_srq_create_ramrod_data *p_ramrod; | ||
| 1743 | struct qed_sp_init_data init_data = {}; | ||
| 1744 | struct qed_hwfn *p_hwfn = rdma_cxt; | ||
| 1745 | enum qed_cxt_elem_type elem_type; | ||
| 1746 | struct qed_spq_entry *p_ent; | ||
| 1747 | u16 opaque_fid, srq_id; | ||
| 1748 | struct qed_bmap *bmap; | ||
| 1749 | u32 returned_id; | ||
| 1750 | int rc; | ||
| 1751 | |||
| 1752 | bmap = &p_hwfn->p_rdma_info->srq_map; | ||
| 1753 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 1754 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); | ||
| 1755 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 1756 | |||
| 1757 | if (rc) { | ||
| 1758 | DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); | ||
| 1759 | return rc; | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | elem_type = QED_ELEM_SRQ; | ||
| 1763 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); | ||
| 1764 | if (rc) | ||
| 1765 | goto err; | ||
| 1766 | /* returned id is no greater than u16 */ | ||
| 1767 | srq_id = (u16)returned_id; | ||
| 1768 | opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
| 1769 | |||
| 1770 | opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
| 1771 | init_data.opaque_fid = opaque_fid; | ||
| 1772 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
| 1773 | |||
| 1774 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
| 1775 | RDMA_RAMROD_CREATE_SRQ, | ||
| 1776 | p_hwfn->p_rdma_info->proto, &init_data); | ||
| 1777 | if (rc) | ||
| 1778 | goto err; | ||
| 1779 | |||
| 1780 | p_ramrod = &p_ent->ramrod.rdma_create_srq; | ||
| 1781 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); | ||
| 1782 | p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); | ||
| 1783 | p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); | ||
| 1784 | p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); | ||
| 1785 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | ||
| 1786 | p_ramrod->page_size = cpu_to_le16(in_params->page_size); | ||
| 1787 | DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); | ||
| 1788 | |||
| 1789 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | ||
| 1790 | if (rc) | ||
| 1791 | goto err; | ||
| 1792 | |||
| 1793 | out_params->srq_id = srq_id; | ||
| 1794 | |||
| 1795 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | ||
| 1796 | "SRQ created Id = %x\n", out_params->srq_id); | ||
| 1797 | |||
| 1798 | return rc; | ||
| 1799 | |||
| 1800 | err: | ||
| 1801 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 1802 | qed_bmap_release_id(p_hwfn, bmap, returned_id); | ||
| 1803 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 1804 | |||
| 1805 | return rc; | ||
| 1806 | } | ||
| 1807 | |||
| 1631 | bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) | 1808 | bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) |
| 1632 | { | 1809 | { |
| 1633 | bool result; | 1810 | bool result; |
| 1634 | 1811 | ||
| 1635 | /* if rdma info has not been allocated, naturally there are no qps */ | 1812 | /* if rdma wasn't activated yet, naturally there are no qps */ |
| 1636 | if (!p_hwfn->p_rdma_info) | 1813 | if (!p_hwfn->p_rdma_info->active) |
| 1637 | return false; | 1814 | return false; |
| 1638 | 1815 | ||
| 1639 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | 1816 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); |
| @@ -1679,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt, | |||
| 1679 | if (!p_ptt) | 1856 | if (!p_ptt) |
| 1680 | goto err; | 1857 | goto err; |
| 1681 | 1858 | ||
| 1682 | rc = qed_rdma_alloc(p_hwfn, p_ptt, params); | 1859 | rc = qed_rdma_alloc(p_hwfn); |
| 1683 | if (rc) | 1860 | if (rc) |
| 1684 | goto err1; | 1861 | goto err1; |
| 1685 | 1862 | ||
| @@ -1688,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt, | |||
| 1688 | goto err2; | 1865 | goto err2; |
| 1689 | 1866 | ||
| 1690 | qed_ptt_release(p_hwfn, p_ptt); | 1867 | qed_ptt_release(p_hwfn, p_ptt); |
| 1868 | p_hwfn->p_rdma_info->active = 1; | ||
| 1691 | 1869 | ||
| 1692 | return rc; | 1870 | return rc; |
| 1693 | 1871 | ||
| @@ -1773,6 +1951,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { | |||
| 1773 | .rdma_free_tid = &qed_rdma_free_tid, | 1951 | .rdma_free_tid = &qed_rdma_free_tid, |
| 1774 | .rdma_register_tid = &qed_rdma_register_tid, | 1952 | .rdma_register_tid = &qed_rdma_register_tid, |
| 1775 | .rdma_deregister_tid = &qed_rdma_deregister_tid, | 1953 | .rdma_deregister_tid = &qed_rdma_deregister_tid, |
| 1954 | .rdma_create_srq = &qed_rdma_create_srq, | ||
| 1955 | .rdma_modify_srq = &qed_rdma_modify_srq, | ||
| 1956 | .rdma_destroy_srq = &qed_rdma_destroy_srq, | ||
| 1776 | .ll2_acquire_connection = &qed_ll2_acquire_connection, | 1957 | .ll2_acquire_connection = &qed_ll2_acquire_connection, |
| 1777 | .ll2_establish_connection = &qed_ll2_establish_connection, | 1958 | .ll2_establish_connection = &qed_ll2_establish_connection, |
| 1778 | .ll2_terminate_connection = &qed_ll2_terminate_connection, | 1959 | .ll2_terminate_connection = &qed_ll2_terminate_connection, |
