aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSelvin Xavier <selvin.xavier@emulex.com>2014-02-04 01:27:03 -0500
committerRoland Dreier <roland@purestorage.com>2014-04-03 11:30:04 -0400
commitcf5788ade718a2cc654170ff11c7d6f6f1ecbdcc (patch)
treed6169037de92b79da03959f0ead1484df81dc42b
parent9d1878a369b23f48a5ca5bcbd89abb0e569c58cc (diff)
RDMA/ocrdma: Use non-zero tag in SRQ posting
As part of SRQ receive buffers posting we populate a non-zero tag which will be returned in SRQ receive completions. Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com> Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 8cc00d2a06af..2b56c4288680 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1538,7 +1538,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1538 int discard_cnt = 0; 1538 int discard_cnt = 0;
1539 u32 cur_getp, stop_getp; 1539 u32 cur_getp, stop_getp;
1540 struct ocrdma_cqe *cqe; 1540 struct ocrdma_cqe *cqe;
1541 u32 qpn = 0; 1541 u32 qpn = 0, wqe_idx = 0;
1542 1542
1543 spin_lock_irqsave(&cq->cq_lock, cq_flags); 1543 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1544 1544
@@ -1567,24 +1567,29 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1567 if (qpn == 0 || qpn != qp->id) 1567 if (qpn == 0 || qpn != qp->id)
1568 goto skip_cqe; 1568 goto skip_cqe;
1569 1569
1570 /* mark cqe discarded so that it is not picked up later
1571 * in the poll_cq().
1572 */
1573 discard_cnt += 1;
1574 cqe->cmn.qpn = 0;
1575 if (is_cqe_for_sq(cqe)) { 1570 if (is_cqe_for_sq(cqe)) {
1576 ocrdma_hwq_inc_tail(&qp->sq); 1571 ocrdma_hwq_inc_tail(&qp->sq);
1577 } else { 1572 } else {
1578 if (qp->srq) { 1573 if (qp->srq) {
1574 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1575 OCRDMA_CQE_BUFTAG_SHIFT) &
1576 qp->srq->rq.max_wqe_idx;
1577 if (wqe_idx < 1)
1578 BUG();
1579 spin_lock_irqsave(&qp->srq->q_lock, flags); 1579 spin_lock_irqsave(&qp->srq->q_lock, flags);
1580 ocrdma_hwq_inc_tail(&qp->srq->rq); 1580 ocrdma_hwq_inc_tail(&qp->srq->rq);
1581 ocrdma_srq_toggle_bit(qp->srq, cur_getp); 1581 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1582 spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1582 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1583 1583
1584 } else { 1584 } else {
1585 ocrdma_hwq_inc_tail(&qp->rq); 1585 ocrdma_hwq_inc_tail(&qp->rq);
1586 } 1586 }
1587 } 1587 }
1588 /* mark cqe discarded so that it is not picked up later
1589 * in the poll_cq().
1590 */
1591 discard_cnt += 1;
1592 cqe->cmn.qpn = 0;
1588skip_cqe: 1593skip_cqe:
1589 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1594 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1590 } while (cur_getp != stop_getp); 1595 } while (cur_getp != stop_getp);
@@ -2238,7 +2243,7 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2238 2243
2239 if (row == srq->bit_fields_len) 2244 if (row == srq->bit_fields_len)
2240 BUG(); 2245 BUG();
2241 return indx; 2246 return indx + 1; /* Use from index 1 */
2242} 2247}
2243 2248
2244static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) 2249static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
@@ -2575,10 +2580,13 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2575 2580
2576 srq = get_ocrdma_srq(qp->ibqp.srq); 2581 srq = get_ocrdma_srq(qp->ibqp.srq);
2577 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 2582 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2578 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; 2583 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2584 if (wqe_idx < 1)
2585 BUG();
2586
2579 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2587 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2580 spin_lock_irqsave(&srq->q_lock, flags); 2588 spin_lock_irqsave(&srq->q_lock, flags);
2581 ocrdma_srq_toggle_bit(srq, wqe_idx); 2589 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2582 spin_unlock_irqrestore(&srq->q_lock, flags); 2590 spin_unlock_irqrestore(&srq->q_lock, flags);
2583 ocrdma_hwq_inc_tail(&srq->rq); 2591 ocrdma_hwq_inc_tail(&srq->rq);
2584} 2592}