aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMitesh Ahuja <mitesh.ahuja@emulex.com>2014-12-18 03:43:06 -0500
committerRoland Dreier <roland@purestorage.com>2015-02-18 11:31:04 -0500
commitd2b8f7b1f87948f5b4198d9ca52733eb9ff9e4be (patch)
treea6ef53f88fec84ec4aa9a9dc9b7018bdb95a7a60 /drivers/infiniband
parentb4dbe8d52d08e5ed60c9d01efbcd7b8694cf4b9f (diff)
RDMA/ocrdma: remove reference of ocrdma_dev out of ocrdma_qp structure
Use get_ocrdma_dev(ocrdma_qp->ibqp.device) function to access ocrdma device pointer. Signed-off-by: Mitesh Ahuja <mitesh.ahuja@emulex.com> Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c44
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c18
3 files changed, 34 insertions, 29 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b9fee0ed32d2..4dcec05e3b7f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -370,7 +370,6 @@ struct ocrdma_srq {
370 370
371struct ocrdma_qp { 371struct ocrdma_qp {
372 struct ib_qp ibqp; 372 struct ib_qp ibqp;
373 struct ocrdma_dev *dev;
374 373
375 u8 __iomem *sq_db; 374 u8 __iomem *sq_db;
376 struct ocrdma_qp_hwq_info sq; 375 struct ocrdma_qp_hwq_info sq;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 189ebc7c20af..0c9e95909a64 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2041,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
2041{ 2041{
2042 bool found; 2042 bool found;
2043 unsigned long flags; 2043 unsigned long flags;
2044 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2044 2045
2045 spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 2046 spin_lock_irqsave(&dev->flush_q_lock, flags);
2046 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 2047 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
2047 if (!found) 2048 if (!found)
2048 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); 2049 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -2051,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
2051 if (!found) 2052 if (!found)
2052 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 2053 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
2053 } 2054 }
2054 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 2055 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2055} 2056}
2056 2057
2057static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) 2058static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -2117,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2117 int status; 2118 int status;
2118 u32 len, hw_pages, hw_page_size; 2119 u32 len, hw_pages, hw_page_size;
2119 dma_addr_t pa; 2120 dma_addr_t pa;
2120 struct ocrdma_dev *dev = qp->dev; 2121 struct ocrdma_pd *pd = qp->pd;
2122 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2121 struct pci_dev *pdev = dev->nic_info.pdev; 2123 struct pci_dev *pdev = dev->nic_info.pdev;
2122 u32 max_wqe_allocated; 2124 u32 max_wqe_allocated;
2123 u32 max_sges = attrs->cap.max_send_sge; 2125 u32 max_sges = attrs->cap.max_send_sge;
@@ -2172,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2172 int status; 2174 int status;
2173 u32 len, hw_pages, hw_page_size; 2175 u32 len, hw_pages, hw_page_size;
2174 dma_addr_t pa = 0; 2176 dma_addr_t pa = 0;
2175 struct ocrdma_dev *dev = qp->dev; 2177 struct ocrdma_pd *pd = qp->pd;
2178 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2176 struct pci_dev *pdev = dev->nic_info.pdev; 2179 struct pci_dev *pdev = dev->nic_info.pdev;
2177 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 2180 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2178 2181
@@ -2231,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2231static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 2234static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2232 struct ocrdma_qp *qp) 2235 struct ocrdma_qp *qp)
2233{ 2236{
2234 struct ocrdma_dev *dev = qp->dev; 2237 struct ocrdma_pd *pd = qp->pd;
2238 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2235 struct pci_dev *pdev = dev->nic_info.pdev; 2239 struct pci_dev *pdev = dev->nic_info.pdev;
2236 dma_addr_t pa = 0; 2240 dma_addr_t pa = 0;
2237 int ird_page_size = dev->attr.ird_page_size; 2241 int ird_page_size = dev->attr.ird_page_size;
@@ -2302,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2302{ 2306{
2303 int status = -ENOMEM; 2307 int status = -ENOMEM;
2304 u32 flags = 0; 2308 u32 flags = 0;
2305 struct ocrdma_dev *dev = qp->dev;
2306 struct ocrdma_pd *pd = qp->pd; 2309 struct ocrdma_pd *pd = qp->pd;
2310 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2307 struct pci_dev *pdev = dev->nic_info.pdev; 2311 struct pci_dev *pdev = dev->nic_info.pdev;
2308 struct ocrdma_cq *cq; 2312 struct ocrdma_cq *cq;
2309 struct ocrdma_create_qp_req *cmd; 2313 struct ocrdma_create_qp_req *cmd;
@@ -2426,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2426 union ib_gid sgid, zgid; 2430 union ib_gid sgid, zgid;
2427 u32 vlan_id; 2431 u32 vlan_id;
2428 u8 mac_addr[6]; 2432 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2429 2434
2430 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2435 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2431 return -EINVAL; 2436 return -EINVAL;
2432 if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) 2437 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2433 ocrdma_init_service_level(qp->dev); 2438 ocrdma_init_service_level(dev);
2434 cmd->params.tclass_sq_psn |= 2439 cmd->params.tclass_sq_psn |=
2435 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2440 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2436 cmd->params.rnt_rc_sl_fl |= 2441 cmd->params.rnt_rc_sl_fl |=
@@ -2441,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2441 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2446 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2442 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2447 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2443 sizeof(cmd->params.dgid)); 2448 sizeof(cmd->params.dgid));
2444 status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2449 status = ocrdma_query_gid(&dev->ibdev, 1,
2445 ah_attr->grh.sgid_index, &sgid); 2450 ah_attr->grh.sgid_index, &sgid);
2446 if (status) 2451 if (status)
2447 return status; 2452 return status;
@@ -2452,7 +2457,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2452 2457
2453 qp->sgid_idx = ah_attr->grh.sgid_index; 2458 qp->sgid_idx = ah_attr->grh.sgid_index;
2454 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2459 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2455 status = ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); 2460 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2456 if (status) 2461 if (status)
2457 return status; 2462 return status;
2458 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2463 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
@@ -2467,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2467 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2468 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2469 cmd->params.rnt_rc_sl_fl |= 2474 cmd->params.rnt_rc_sl_fl |=
2470 (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2471 } 2476 }
2472 return 0; 2477 return 0;
2473} 2478}
@@ -2477,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2477 struct ib_qp_attr *attrs, int attr_mask) 2482 struct ib_qp_attr *attrs, int attr_mask)
2478{ 2483{
2479 int status = 0; 2484 int status = 0;
2485 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2480 2486
2481 if (attr_mask & IB_QP_PKEY_INDEX) { 2487 if (attr_mask & IB_QP_PKEY_INDEX) {
2482 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & 2488 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2494,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2494 return status; 2500 return status;
2495 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2501 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2496 /* set the default mac address for UD, GSI QPs */ 2502 /* set the default mac address for UD, GSI QPs */
2497 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2503 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2498 (qp->dev->nic_info.mac_addr[1] << 8) | 2504 (dev->nic_info.mac_addr[1] << 8) |
2499 (qp->dev->nic_info.mac_addr[2] << 16) | 2505 (dev->nic_info.mac_addr[2] << 16) |
2500 (qp->dev->nic_info.mac_addr[3] << 24); 2506 (dev->nic_info.mac_addr[3] << 24);
2501 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2507 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2502 (qp->dev->nic_info.mac_addr[5] << 8); 2508 (dev->nic_info.mac_addr[5] << 8);
2503 } 2509 }
2504 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2510 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2505 attrs->en_sqd_async_notify) { 2511 attrs->en_sqd_async_notify) {
@@ -2556,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2556 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2562 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2557 } 2563 }
2558 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2564 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2559 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2565 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2560 status = -EINVAL; 2566 status = -EINVAL;
2561 goto pmtu_err; 2567 goto pmtu_err;
2562 } 2568 }
@@ -2564,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2564 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2570 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2565 } 2571 }
2566 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2572 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2567 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2573 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2568 status = -EINVAL; 2574 status = -EINVAL;
2569 goto pmtu_err; 2575 goto pmtu_err;
2570 } 2576 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 14a5fafd3e3a..4d5f581af49a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1219,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1219 int status = 0; 1219 int status = 0;
1220 u64 usr_db; 1220 u64 usr_db;
1221 struct ocrdma_create_qp_uresp uresp; 1221 struct ocrdma_create_qp_uresp uresp;
1222 struct ocrdma_dev *dev = qp->dev;
1223 struct ocrdma_pd *pd = qp->pd; 1222 struct ocrdma_pd *pd = qp->pd;
1223 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1224 1224
1225 memset(&uresp, 0, sizeof(uresp)); 1225 memset(&uresp, 0, sizeof(uresp));
1226 usr_db = dev->nic_info.unmapped_db + 1226 usr_db = dev->nic_info.unmapped_db +
@@ -1359,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1359 status = -ENOMEM; 1359 status = -ENOMEM;
1360 goto gen_err; 1360 goto gen_err;
1361 } 1361 }
1362 qp->dev = dev;
1363 ocrdma_set_qp_init_params(qp, pd, attrs); 1362 ocrdma_set_qp_init_params(qp, pd, attrs);
1364 if (udata == NULL) 1363 if (udata == NULL)
1365 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | 1364 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1418,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1418 enum ib_qp_state old_qps; 1417 enum ib_qp_state old_qps;
1419 1418
1420 qp = get_ocrdma_qp(ibqp); 1419 qp = get_ocrdma_qp(ibqp);
1421 dev = qp->dev; 1420 dev = get_ocrdma_dev(ibqp->device);
1422 if (attr_mask & IB_QP_STATE) 1421 if (attr_mask & IB_QP_STATE)
1423 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1422 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1424 /* if new and previous states are same hw doesn't need to 1423 /* if new and previous states are same hw doesn't need to
@@ -1441,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1441 enum ib_qp_state old_qps, new_qps; 1440 enum ib_qp_state old_qps, new_qps;
1442 1441
1443 qp = get_ocrdma_qp(ibqp); 1442 qp = get_ocrdma_qp(ibqp);
1444 dev = qp->dev; 1443 dev = get_ocrdma_dev(ibqp->device);
1445 1444
1446 /* syncronize with multiple context trying to change, retrive qps */ 1445 /* syncronize with multiple context trying to change, retrive qps */
1447 mutex_lock(&dev->dev_lock); 1446 mutex_lock(&dev->dev_lock);
@@ -1508,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1508 u32 qp_state; 1507 u32 qp_state;
1509 struct ocrdma_qp_params params; 1508 struct ocrdma_qp_params params;
1510 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1509 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1511 struct ocrdma_dev *dev = qp->dev; 1510 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1512 1511
1513 memset(&params, 0, sizeof(params)); 1512 memset(&params, 0, sizeof(params));
1514 mutex_lock(&dev->dev_lock); 1513 mutex_lock(&dev->dev_lock);
@@ -1704,7 +1703,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1704{ 1703{
1705 int found = false; 1704 int found = false;
1706 unsigned long flags; 1705 unsigned long flags;
1707 struct ocrdma_dev *dev = qp->dev; 1706 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1708 /* sync with any active CQ poll */ 1707 /* sync with any active CQ poll */
1709 1708
1710 spin_lock_irqsave(&dev->flush_q_lock, flags); 1709 spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1729,7 +1728,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1729 unsigned long flags; 1728 unsigned long flags;
1730 1729
1731 qp = get_ocrdma_qp(ibqp); 1730 qp = get_ocrdma_qp(ibqp);
1732 dev = qp->dev; 1731 dev = get_ocrdma_dev(ibqp->device);
1733 1732
1734 attrs.qp_state = IB_QPS_ERR; 1733 attrs.qp_state = IB_QPS_ERR;
1735 pd = qp->pd; 1734 pd = qp->pd;
@@ -2114,11 +2113,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2114 u64 fbo; 2113 u64 fbo;
2115 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2114 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2116 struct ocrdma_mr *mr; 2115 struct ocrdma_mr *mr;
2116 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2117 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2117 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2118 2118
2119 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2119 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2120 2120
2121 if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) 2121 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2122 return -EINVAL; 2122 return -EINVAL;
2123 2123
2124 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2124 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2146,7 +2146,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2146 fast_reg->size_sge = 2146 fast_reg->size_sge =
2147 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2147 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2148 mr = (struct ocrdma_mr *) (unsigned long) 2148 mr = (struct ocrdma_mr *) (unsigned long)
2149 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2149 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2150 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2150 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2151 return 0; 2151 return 0;
2152} 2152}