aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2016-02-17 11:15:42 -0500
committerDoug Ledford <dledford@redhat.com>2016-02-29 17:10:27 -0500
commit561392d42d42c0fefad179a07b6dd1e6e261a572 (patch)
treeaee9bf9384f62f84b95e25ce480745acff32af38
parent086dc6e359d11fd29d0f2041cdc0bb76a5d807d8 (diff)
IB/srp: Use ib_drain_rq()
Signed-off-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c40
1 files changed, 4 insertions, 36 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 03022f6420d7..b6bf20496021 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
446 dev->max_pages_per_mr); 446 dev->max_pages_per_mr);
447} 447}
448 448
449static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
450{
451 struct srp_rdma_ch *ch = cq->cq_context;
452
453 complete(&ch->done);
454}
455
456static struct ib_cqe srp_drain_cqe = {
457 .done = srp_drain_done,
458};
459
460/** 449/**
461 * srp_destroy_qp() - destroy an RDMA queue pair 450 * srp_destroy_qp() - destroy an RDMA queue pair
462 * @ch: SRP RDMA channel. 451 * @ch: SRP RDMA channel.
463 * 452 *
464 * Change a queue pair into the error state and wait until all receive 453 * Drain the qp before destroying it. This avoids that the receive
465 * completions have been processed before destroying it. This avoids that 454 * completion handler can access the queue pair while it is
466 * the receive completion handler can access the queue pair while it is
467 * being destroyed. 455 * being destroyed.
468 */ 456 */
469static void srp_destroy_qp(struct srp_rdma_ch *ch) 457static void srp_destroy_qp(struct srp_rdma_ch *ch)
470{ 458{
471 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 459 ib_drain_rq(ch->qp);
472 static struct ib_recv_wr wr = { 0 };
473 struct ib_recv_wr *bad_wr;
474 int ret;
475
476 wr.wr_cqe = &srp_drain_cqe;
477 /* Destroying a QP and reusing ch->done is only safe if not connected */
478 WARN_ON_ONCE(ch->connected);
479
480 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
481 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
482 if (ret)
483 goto out;
484
485 init_completion(&ch->done);
486 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
487 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
488 if (ret == 0)
489 wait_for_completion(&ch->done);
490
491out:
492 ib_destroy_qp(ch->qp); 460 ib_destroy_qp(ch->qp);
493} 461}
494 462
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
508 if (!init_attr) 476 if (!init_attr)
509 return -ENOMEM; 477 return -ENOMEM;
510 478
511 /* queue_size + 1 for ib_drain_qp */ 479 /* queue_size + 1 for ib_drain_rq() */
512 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, 480 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
513 ch->comp_vector, IB_POLL_SOFTIRQ); 481 ch->comp_vector, IB_POLL_SOFTIRQ);
514 if (IS_ERR(recv_cq)) { 482 if (IS_ERR(recv_cq)) {