diff options
author | Eli Cohen <eli@dev.mellanox.co.il> | 2014-01-14 10:45:21 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-01-23 02:23:53 -0500 |
commit | 9e9c47d07d447e09a66ee528c3ebad9ba359af6a (patch) | |
tree | b3bd156928143fe3260c58c550854d1cb716d656 | |
parent | 05bdb2ab6b09f2306f0afe0f60f4b9abffa7aba4 (diff) |
IB/mlx5: Allow creation of QPs with zero-length work queues
The current code attmepts to call ib_umem_get() even if the length is
zero, which causes a failure. Since the spec allows zero length work
queues, change the code so we don't call ib_umem_get() in those cases.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 49 |
1 files changed, 29 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 87b7fb176f22..70dd77085db6 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -523,12 +523,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
523 | { | 523 | { |
524 | struct mlx5_ib_ucontext *context; | 524 | struct mlx5_ib_ucontext *context; |
525 | struct mlx5_ib_create_qp ucmd; | 525 | struct mlx5_ib_create_qp ucmd; |
526 | int page_shift; | 526 | int page_shift = 0; |
527 | int uar_index; | 527 | int uar_index; |
528 | int npages; | 528 | int npages; |
529 | u32 offset; | 529 | u32 offset = 0; |
530 | int uuarn; | 530 | int uuarn; |
531 | int ncont; | 531 | int ncont = 0; |
532 | int err; | 532 | int err; |
533 | 533 | ||
534 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); | 534 | err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); |
@@ -564,23 +564,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
564 | if (err) | 564 | if (err) |
565 | goto err_uuar; | 565 | goto err_uuar; |
566 | 566 | ||
567 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | 567 | if (ucmd.buf_addr && qp->buf_size) { |
568 | qp->buf_size, 0, 0); | 568 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, |
569 | if (IS_ERR(qp->umem)) { | 569 | qp->buf_size, 0, 0); |
570 | mlx5_ib_dbg(dev, "umem_get failed\n"); | 570 | if (IS_ERR(qp->umem)) { |
571 | err = PTR_ERR(qp->umem); | 571 | mlx5_ib_dbg(dev, "umem_get failed\n"); |
572 | goto err_uuar; | 572 | err = PTR_ERR(qp->umem); |
573 | goto err_uuar; | ||
574 | } | ||
575 | } else { | ||
576 | qp->umem = NULL; | ||
573 | } | 577 | } |
574 | 578 | ||
575 | mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, | 579 | if (qp->umem) { |
576 | &ncont, NULL); | 580 | mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, |
577 | err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); | 581 | &ncont, NULL); |
578 | if (err) { | 582 | err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); |
579 | mlx5_ib_warn(dev, "bad offset\n"); | 583 | if (err) { |
580 | goto err_umem; | 584 | mlx5_ib_warn(dev, "bad offset\n"); |
585 | goto err_umem; | ||
586 | } | ||
587 | mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", | ||
588 | ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); | ||
581 | } | 589 | } |
582 | mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", | ||
583 | ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); | ||
584 | 590 | ||
585 | *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; | 591 | *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; |
586 | *in = mlx5_vzalloc(*inlen); | 592 | *in = mlx5_vzalloc(*inlen); |
@@ -588,7 +594,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
588 | err = -ENOMEM; | 594 | err = -ENOMEM; |
589 | goto err_umem; | 595 | goto err_umem; |
590 | } | 596 | } |
591 | mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); | 597 | if (qp->umem) |
598 | mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); | ||
592 | (*in)->ctx.log_pg_sz_remote_qpn = | 599 | (*in)->ctx.log_pg_sz_remote_qpn = |
593 | cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); | 600 | cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); |
594 | (*in)->ctx.params2 = cpu_to_be32(offset << 6); | 601 | (*in)->ctx.params2 = cpu_to_be32(offset << 6); |
@@ -619,7 +626,8 @@ err_free: | |||
619 | mlx5_vfree(*in); | 626 | mlx5_vfree(*in); |
620 | 627 | ||
621 | err_umem: | 628 | err_umem: |
622 | ib_umem_release(qp->umem); | 629 | if (qp->umem) |
630 | ib_umem_release(qp->umem); | ||
623 | 631 | ||
624 | err_uuar: | 632 | err_uuar: |
625 | free_uuar(&context->uuari, uuarn); | 633 | free_uuar(&context->uuari, uuarn); |
@@ -632,7 +640,8 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) | |||
632 | 640 | ||
633 | context = to_mucontext(pd->uobject->context); | 641 | context = to_mucontext(pd->uobject->context); |
634 | mlx5_ib_db_unmap_user(context, &qp->db); | 642 | mlx5_ib_db_unmap_user(context, &qp->db); |
635 | ib_umem_release(qp->umem); | 643 | if (qp->umem) |
644 | ib_umem_release(qp->umem); | ||
636 | free_uuar(&context->uuari, qp->uuarn); | 645 | free_uuar(&context->uuari, qp->uuarn); |
637 | } | 646 | } |
638 | 647 | ||