aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_cq.c
diff options
context:
space:
mode:
authorRoland Dreier <roland@eddore.topspincom.com>2005-08-19 13:59:31 -0400
committerRoland Dreier <rolandd@cisco.com>2005-08-26 23:37:37 -0400
commitec34a922d243c3401a694450734e9effb2bafbfe (patch)
tree7d79ed1848d1b63665d7565274c1d2b56d09df9d /drivers/infiniband/hw/mthca/mthca_cq.c
parentd20a40192868082eff6fec729b311cb8463b4a21 (diff)
[PATCH] IB/mthca: Add SRQ implementation
Add mthca support for shared receive queues (SRQs), including userspace SRQs. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_cq.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 5dee908c2f34..5ece609c2ee0 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -224,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
225} 225}
226 226
227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
228 struct mthca_srq *srq)
228{ 229{
229 struct mthca_cq *cq; 230 struct mthca_cq *cq;
230 struct mthca_cqe *cqe; 231 struct mthca_cqe *cqe;
@@ -265,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
265 */ 266 */
266 while (prod_index > cq->cons_index) { 267 while (prod_index > cq->cons_index) {
267 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 268 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
268 if (cqe->my_qpn == cpu_to_be32(qpn)) 269 if (cqe->my_qpn == cpu_to_be32(qpn)) {
270 if (srq)
271 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
269 ++nfreed; 272 ++nfreed;
273 }
270 else if (nfreed) 274 else if (nfreed)
271 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 275 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
272 cq->ibcq.cqe), 276 cq->ibcq.cqe),
@@ -455,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
455 >> wq->wqe_shift); 459 >> wq->wqe_shift);
456 entry->wr_id = (*cur_qp)->wrid[wqe_index + 460 entry->wr_id = (*cur_qp)->wrid[wqe_index +
457 (*cur_qp)->rq.max]; 461 (*cur_qp)->rq.max];
462 } else if ((*cur_qp)->ibqp.srq) {
463 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
464 u32 wqe = be32_to_cpu(cqe->wqe);
465 wq = NULL;
466 wqe_index = wqe >> srq->wqe_shift;
467 entry->wr_id = srq->wrid[wqe_index];
468 mthca_free_srq_wqe(srq, wqe);
458 } else { 469 } else {
459 wq = &(*cur_qp)->rq; 470 wq = &(*cur_qp)->rq;
460 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 471 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
461 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 472 entry->wr_id = (*cur_qp)->wrid[wqe_index];
462 } 473 }
463 474
464 if (wq->last_comp < wqe_index) 475 if (wq) {
465 wq->tail += wqe_index - wq->last_comp; 476 if (wq->last_comp < wqe_index)
466 else 477 wq->tail += wqe_index - wq->last_comp;
467 wq->tail += wqe_index + wq->max - wq->last_comp; 478 else
468 479 wq->tail += wqe_index + wq->max - wq->last_comp;
469 wq->last_comp = wqe_index;
470 480
471 if (0) 481 wq->last_comp = wqe_index;
472 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 482 }
473 is_send ? "Send" : "Receive",
474 (*cur_qp)->qpn, wqe_index, wq->max);
475 483
476 if (is_error) { 484 if (is_error) {
477 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, 485 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,