aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2012-06-27 18:33:05 -0400
committerRoland Dreier <roland@purestorage.com>2012-07-08 21:05:19 -0400
commit354dff1bd8ccd41b6e8421226d586d35e7fb8920 (patch)
treedc228fa9509f66c288f76063dc1fb3e5a6493dbb
parent6887a4131da3adaab011613776d865f4bcfb5678 (diff)
IB/qib: Fix UC MR refs for immediate operations
An MR reference leak exists when handling UC RDMA writes with immediate data because we manipulate the reference counts as if the operation had been a send. This patch moves the last_imm label so that the RDMA write operations with immediate data converge at the cq building code. The copy/mr deref code is now done correctly prior to the branch to last_imm. Reviewed-by: Edward Mascarenhas <edward.mascarenhas@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index ce7387ff5d91..70b4cb710f9a 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -403,7 +403,6 @@ send_last:
403 if (unlikely(wc.byte_len > qp->r_len)) 403 if (unlikely(wc.byte_len > qp->r_len))
404 goto rewind; 404 goto rewind;
405 wc.opcode = IB_WC_RECV; 405 wc.opcode = IB_WC_RECV;
406last_imm:
407 qib_copy_sge(&qp->r_sge, data, tlen, 0); 406 qib_copy_sge(&qp->r_sge, data, tlen, 0);
408 while (qp->s_rdma_read_sge.num_sge) { 407 while (qp->s_rdma_read_sge.num_sge) {
409 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); 408 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
@@ -411,6 +410,7 @@ last_imm:
411 qp->s_rdma_read_sge.sge = 410 qp->s_rdma_read_sge.sge =
412 *qp->s_rdma_read_sge.sg_list++; 411 *qp->s_rdma_read_sge.sg_list++;
413 } 412 }
413last_imm:
414 wc.wr_id = qp->r_wr_id; 414 wc.wr_id = qp->r_wr_id;
415 wc.status = IB_WC_SUCCESS; 415 wc.status = IB_WC_SUCCESS;
416 wc.qp = &qp->ibqp; 416 wc.qp = &qp->ibqp;
@@ -509,6 +509,12 @@ rdma_last_imm:
509 } 509 }
510 wc.byte_len = qp->r_len; 510 wc.byte_len = qp->r_len;
511 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 511 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
512 qib_copy_sge(&qp->r_sge, data, tlen, 1);
513 while (qp->r_sge.num_sge) {
514 atomic_dec(&qp->r_sge.sge.mr->refcount);
515 if (--qp->r_sge.num_sge)
516 qp->r_sge.sge = *qp->r_sge.sg_list++;
517 }
512 goto last_imm; 518 goto last_imm;
513 519
514 case OP(RDMA_WRITE_LAST): 520 case OP(RDMA_WRITE_LAST):