diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-03-04 19:15:11 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-05-08 21:00:37 -0400 |
commit | f7c6a7b5d59980b076abbf2ceeb8735591290285 (patch) | |
tree | 29c35b47052bba87f031a4744d8ad12ff5187149 /drivers/infiniband/hw/cxgb3/iwch_provider.c | |
parent | 36f021b579d195cdc5fa6f3e2bab198b4bf70643 (diff) |
IB/uverbs: Export ib_umem_get()/ib_umem_release() to modules
Export ib_umem_get()/ib_umem_release() and put low-level drivers in
control of when to call ib_umem_get() to pin and DMA map userspace,
rather than always calling it in ib_uverbs_reg_mr() before calling the
low-level driver's reg_user_mr method.
Also move these functions to be in the ib_core module instead of
ib_uverbs, so that driver modules using them do not depend on
ib_uverbs.
This has a number of advantages:
- It is better design from the standpoint of making generic code a
library that can be used or overridden by device-specific code as
the details of specific devices dictate.
- Drivers that do not need to pin userspace memory regions do not
need to take the performance hit of calling ib_mem_get(). For
example, although I have not tried to implement it in this patch,
the ipath driver should be able to avoid pinning memory and just
use copy_{to,from}_user() to access userspace memory regions.
- Buffers that need special mapping treatment can be identified by
the low-level driver. For example, it may be possible to solve
some Altix-specific memory ordering issues with mthca CQs in
userspace by mapping CQ buffers with extra flags.
- Drivers that need to pin and DMA map userspace memory for things
other than memory regions can use ib_umem_get() directly, instead
of hacks using extra parameters to their reg_phys_mr method. For
example, the mlx4 driver that is pending being merged needs to pin
and DMA map QP and CQ buffers, but it does not need to create a
memory key for these buffers. So the cleanest solution is for mlx4
to call ib_umem_get() in the create_qp and create_cq methods.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/iwch_provider.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index a891493fd34..e7c2c394803 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <rdma/iw_cm.h> | 47 | #include <rdma/iw_cm.h> |
48 | #include <rdma/ib_verbs.h> | 48 | #include <rdma/ib_verbs.h> |
49 | #include <rdma/ib_smi.h> | 49 | #include <rdma/ib_smi.h> |
50 | #include <rdma/ib_umem.h> | ||
50 | #include <rdma/ib_user_verbs.h> | 51 | #include <rdma/ib_user_verbs.h> |
51 | 52 | ||
52 | #include "cxio_hal.h" | 53 | #include "cxio_hal.h" |
@@ -443,6 +444,8 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr) | |||
443 | remove_handle(rhp, &rhp->mmidr, mmid); | 444 | remove_handle(rhp, &rhp->mmidr, mmid); |
444 | if (mhp->kva) | 445 | if (mhp->kva) |
445 | kfree((void *) (unsigned long) mhp->kva); | 446 | kfree((void *) (unsigned long) mhp->kva); |
447 | if (mhp->umem) | ||
448 | ib_umem_release(mhp->umem); | ||
446 | PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); | 449 | PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); |
447 | kfree(mhp); | 450 | kfree(mhp); |
448 | return 0; | 451 | return 0; |
@@ -577,8 +580,8 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, | |||
577 | } | 580 | } |
578 | 581 | ||
579 | 582 | ||
580 | static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | 583 | static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
581 | int acc, struct ib_udata *udata) | 584 | u64 virt, int acc, struct ib_udata *udata) |
582 | { | 585 | { |
583 | __be64 *pages; | 586 | __be64 *pages; |
584 | int shift, n, len; | 587 | int shift, n, len; |
@@ -591,7 +594,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
591 | struct iwch_reg_user_mr_resp uresp; | 594 | struct iwch_reg_user_mr_resp uresp; |
592 | 595 | ||
593 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | 596 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); |
594 | shift = ffs(region->page_size) - 1; | ||
595 | 597 | ||
596 | php = to_iwch_pd(pd); | 598 | php = to_iwch_pd(pd); |
597 | rhp = php->rhp; | 599 | rhp = php->rhp; |
@@ -599,8 +601,17 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
599 | if (!mhp) | 601 | if (!mhp) |
600 | return ERR_PTR(-ENOMEM); | 602 | return ERR_PTR(-ENOMEM); |
601 | 603 | ||
604 | mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); | ||
605 | if (IS_ERR(mhp->umem)) { | ||
606 | err = PTR_ERR(mhp->umem); | ||
607 | kfree(mhp); | ||
608 | return ERR_PTR(err); | ||
609 | } | ||
610 | |||
611 | shift = ffs(mhp->umem->page_size) - 1; | ||
612 | |||
602 | n = 0; | 613 | n = 0; |
603 | list_for_each_entry(chunk, ®ion->chunk_list, list) | 614 | list_for_each_entry(chunk, &mhp->umem->chunk_list, list) |
604 | n += chunk->nents; | 615 | n += chunk->nents; |
605 | 616 | ||
606 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); | 617 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); |
@@ -611,13 +622,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
611 | 622 | ||
612 | i = n = 0; | 623 | i = n = 0; |
613 | 624 | ||
614 | list_for_each_entry(chunk, ®ion->chunk_list, list) | 625 | list_for_each_entry(chunk, &mhp->umem->chunk_list, list) |
615 | for (j = 0; j < chunk->nmap; ++j) { | 626 | for (j = 0; j < chunk->nmap; ++j) { |
616 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | 627 | len = sg_dma_len(&chunk->page_list[j]) >> shift; |
617 | for (k = 0; k < len; ++k) { | 628 | for (k = 0; k < len; ++k) { |
618 | pages[i++] = cpu_to_be64(sg_dma_address( | 629 | pages[i++] = cpu_to_be64(sg_dma_address( |
619 | &chunk->page_list[j]) + | 630 | &chunk->page_list[j]) + |
620 | region->page_size * k); | 631 | mhp->umem->page_size * k); |
621 | } | 632 | } |
622 | } | 633 | } |
623 | 634 | ||
@@ -625,9 +636,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
625 | mhp->attr.pdid = php->pdid; | 636 | mhp->attr.pdid = php->pdid; |
626 | mhp->attr.zbva = 0; | 637 | mhp->attr.zbva = 0; |
627 | mhp->attr.perms = iwch_ib_to_tpt_access(acc); | 638 | mhp->attr.perms = iwch_ib_to_tpt_access(acc); |
628 | mhp->attr.va_fbo = region->virt_base; | 639 | mhp->attr.va_fbo = virt; |
629 | mhp->attr.page_size = shift - 12; | 640 | mhp->attr.page_size = shift - 12; |
630 | mhp->attr.len = (u32) region->length; | 641 | mhp->attr.len = (u32) length; |
631 | mhp->attr.pbl_size = i; | 642 | mhp->attr.pbl_size = i; |
632 | err = iwch_register_mem(rhp, php, mhp, shift, pages); | 643 | err = iwch_register_mem(rhp, php, mhp, shift, pages); |
633 | kfree(pages); | 644 | kfree(pages); |
@@ -650,6 +661,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
650 | return &mhp->ibmr; | 661 | return &mhp->ibmr; |
651 | 662 | ||
652 | err: | 663 | err: |
664 | ib_umem_release(mhp->umem); | ||
653 | kfree(mhp); | 665 | kfree(mhp); |
654 | return ERR_PTR(err); | 666 | return ERR_PTR(err); |
655 | } | 667 | } |