diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-05-23 18:16:08 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-05-23 18:16:08 -0400 |
commit | 02d89b87081f516ad3993637f9b75db0d9786554 (patch) | |
tree | c7d26293d06c88e4831851ac6362b3b7e3fa69de | |
parent | 8aee74c8ee875448cc6d1cf995c9469eb60ae515 (diff) |
IB/mlx4: Don't allocate RQ doorbell if using SRQ
If a QP is attached to a shared receive queue (SRQ), then it doesn't
have a receive queue (RQ). So don't allocate an RQ doorbell (or map a
doorbell from userspace for userspace QPs) for that QP.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 37 |
1 files changed, 22 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a824bc5f79fd..88a994d8a133 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -319,20 +319,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
319 | if (err) | 319 | if (err) |
320 | goto err_mtt; | 320 | goto err_mtt; |
321 | 321 | ||
322 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), | 322 | if (!init_attr->srq) { |
323 | ucmd.db_addr, &qp->db); | 323 | err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), |
324 | if (err) | 324 | ucmd.db_addr, &qp->db); |
325 | goto err_mtt; | 325 | if (err) |
326 | goto err_mtt; | ||
327 | } | ||
326 | } else { | 328 | } else { |
327 | err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); | 329 | err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); |
328 | if (err) | 330 | if (err) |
329 | goto err; | 331 | goto err; |
330 | 332 | ||
331 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); | 333 | if (!init_attr->srq) { |
332 | if (err) | 334 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); |
333 | goto err; | 335 | if (err) |
336 | goto err; | ||
334 | 337 | ||
335 | *qp->db.db = 0; | 338 | *qp->db.db = 0; |
339 | } | ||
336 | 340 | ||
337 | if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { | 341 | if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { |
338 | err = -ENOMEM; | 342 | err = -ENOMEM; |
@@ -386,7 +390,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
386 | return 0; | 390 | return 0; |
387 | 391 | ||
388 | err_wrid: | 392 | err_wrid: |
389 | if (pd->uobject) | 393 | if (pd->uobject && !init_attr->srq) |
390 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); | 394 | mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); |
391 | else { | 395 | else { |
392 | kfree(qp->sq.wrid); | 396 | kfree(qp->sq.wrid); |
@@ -403,7 +407,7 @@ err_buf: | |||
403 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 407 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
404 | 408 | ||
405 | err_db: | 409 | err_db: |
406 | if (!pd->uobject) | 410 | if (!pd->uobject && !init_attr->srq) |
407 | mlx4_ib_db_free(dev, &qp->db); | 411 | mlx4_ib_db_free(dev, &qp->db); |
408 | 412 | ||
409 | err: | 413 | err: |
@@ -481,14 +485,16 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
481 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | 485 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
482 | 486 | ||
483 | if (is_user) { | 487 | if (is_user) { |
484 | mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), | 488 | if (!qp->ibqp.srq) |
485 | &qp->db); | 489 | mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), |
490 | &qp->db); | ||
486 | ib_umem_release(qp->umem); | 491 | ib_umem_release(qp->umem); |
487 | } else { | 492 | } else { |
488 | kfree(qp->sq.wrid); | 493 | kfree(qp->sq.wrid); |
489 | kfree(qp->rq.wrid); | 494 | kfree(qp->rq.wrid); |
490 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 495 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
491 | mlx4_ib_db_free(dev, &qp->db); | 496 | if (!qp->ibqp.srq) |
497 | mlx4_ib_db_free(dev, &qp->db); | ||
492 | } | 498 | } |
493 | } | 499 | } |
494 | 500 | ||
@@ -852,7 +858,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
852 | if (ibqp->srq) | 858 | if (ibqp->srq) |
853 | context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); | 859 | context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); |
854 | 860 | ||
855 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | 861 | if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) |
856 | context->db_rec_addr = cpu_to_be64(qp->db.dma); | 862 | context->db_rec_addr = cpu_to_be64(qp->db.dma); |
857 | 863 | ||
858 | if (cur_state == IB_QPS_INIT && | 864 | if (cur_state == IB_QPS_INIT && |
@@ -919,7 +925,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
919 | qp->rq.tail = 0; | 925 | qp->rq.tail = 0; |
920 | qp->sq.head = 0; | 926 | qp->sq.head = 0; |
921 | qp->sq.tail = 0; | 927 | qp->sq.tail = 0; |
922 | *qp->db.db = 0; | 928 | if (!ibqp->srq) |
929 | *qp->db.db = 0; | ||
923 | } | 930 | } |
924 | 931 | ||
925 | out: | 932 | out: |