aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@sandisk.com>2017-02-14 13:56:36 -0500
committerDoug Ledford <dledford@redhat.com>2017-02-19 09:51:55 -0500
commit9294000d6d895ad609f3cc4aff98c9c6175b466f (patch)
tree0ffc9f78e77a74c1ce03b69090e5a78cc0765064
parentf039f44fc331a7c6f828dfed97d5df0588602fd8 (diff)
IB/srp: Drain the send queue before destroying a QP
A quote from the IB spec: However, if the Consumer does not wait for the Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment leakage may occur. Therefore, it is good programming practice to tear down a QP that is associated with an SRQ by using the following process: * Put the QP in the Error State; * wait for the Affiliated Asynchronous Last WQE Reached Event; * either: * drain the CQ by invoking the Poll CQ verb and either wait for CQ to be empty or the number of Poll CQ operations has exceeded CQ capacity size; or * post another WR that completes on the same CQ and wait for this WR to return as a WC; * and then invoke a Destroy QP or Reset QP. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Israel Rukshin <israelr@mellanox.com> Cc: Max Gurtovoy <maxg@mellanox.com> Cc: Laurence Oberman <loberman@redhat.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index e8225cc8b938..c7d97097d55c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -466,9 +466,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
466 * completion handler can access the queue pair while it is 466 * completion handler can access the queue pair while it is
467 * being destroyed. 467 * being destroyed.
468 */ 468 */
469static void srp_destroy_qp(struct ib_qp *qp) 469static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
470{ 470{
471 ib_drain_rq(qp); 471 spin_lock_irq(&ch->lock);
472 ib_process_cq_direct(ch->send_cq, -1);
473 spin_unlock_irq(&ch->lock);
474
475 ib_drain_qp(qp);
472 ib_destroy_qp(qp); 476 ib_destroy_qp(qp);
473} 477}
474 478
@@ -542,7 +546,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
542 } 546 }
543 547
544 if (ch->qp) 548 if (ch->qp)
545 srp_destroy_qp(ch->qp); 549 srp_destroy_qp(ch, ch->qp);
546 if (ch->recv_cq) 550 if (ch->recv_cq)
547 ib_free_cq(ch->recv_cq); 551 ib_free_cq(ch->recv_cq);
548 if (ch->send_cq) 552 if (ch->send_cq)
@@ -566,7 +570,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
566 return 0; 570 return 0;
567 571
568err_qp: 572err_qp:
569 srp_destroy_qp(qp); 573 srp_destroy_qp(ch, qp);
570 574
571err_send_cq: 575err_send_cq:
572 ib_free_cq(send_cq); 576 ib_free_cq(send_cq);
@@ -609,7 +613,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
609 ib_destroy_fmr_pool(ch->fmr_pool); 613 ib_destroy_fmr_pool(ch->fmr_pool);
610 } 614 }
611 615
612 srp_destroy_qp(ch->qp); 616 srp_destroy_qp(ch, ch->qp);
613 ib_free_cq(ch->send_cq); 617 ib_free_cq(ch->send_cq);
614 ib_free_cq(ch->recv_cq); 618 ib_free_cq(ch->recv_cq);
615 619
@@ -1822,6 +1826,11 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1822 return iu; 1826 return iu;
1823} 1827}
1824 1828
1829/*
1830 * Note: if this function is called from inside ib_drain_sq() then it will
1831 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1832 * with status IB_WC_SUCCESS then that's a bug.
1833 */
1825static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) 1834static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1826{ 1835{
1827 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); 1836 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);