aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_provider.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-07-07 20:57:19 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-07 21:23:49 -0400
commit24d4281be0598d2d4ab9a2ffb1b78f5af0ffaddf (patch)
treecc7810ff27bc207e264ea183a150600cd8db7bd8 /drivers/infiniband/hw/mthca/mthca_provider.c
parent99264c1ee2ce908f95c075cce97698758a793b58 (diff)
[PATCH] IB uverbs: add mthca user MR support
Add support for userspace memory regions (MRs) to mthca. Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_provider.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c82
1 files changed, 82 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 318356c19abe..bbdfcbe6bade 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -654,6 +654,87 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
654 return &mr->ibmr; 654 return &mr->ibmr;
655} 655}
656 656
657static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
658 int acc, struct ib_udata *udata)
659{
660 struct mthca_dev *dev = to_mdev(pd->device);
661 struct ib_umem_chunk *chunk;
662 struct mthca_mr *mr;
663 u64 *pages;
664 int shift, n, len;
665 int i, j, k;
666 int err = 0;
667
668 shift = ffs(region->page_size) - 1;
669
670 mr = kmalloc(sizeof *mr, GFP_KERNEL);
671 if (!mr)
672 return ERR_PTR(-ENOMEM);
673
674 n = 0;
675 list_for_each_entry(chunk, &region->chunk_list, list)
676 n += chunk->nents;
677
678 mr->mtt = mthca_alloc_mtt(dev, n);
679 if (IS_ERR(mr->mtt)) {
680 err = PTR_ERR(mr->mtt);
681 goto err;
682 }
683
684 pages = (u64 *) __get_free_page(GFP_KERNEL);
685 if (!pages) {
686 err = -ENOMEM;
687 goto err_mtt;
688 }
689
690 i = n = 0;
691
692 list_for_each_entry(chunk, &region->chunk_list, list)
693 for (j = 0; j < chunk->nmap; ++j) {
694 len = sg_dma_len(&chunk->page_list[j]) >> shift;
695 for (k = 0; k < len; ++k) {
696 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
697 region->page_size * k;
698 /*
699 * Be friendly to WRITE_MTT command
700 * and leave two empty slots for the
701 * index and reserved fields of the
702 * mailbox.
703 */
704 if (i == PAGE_SIZE / sizeof (u64) - 2) {
705 err = mthca_write_mtt(dev, mr->mtt,
706 n, pages, i);
707 if (err)
708 goto mtt_done;
709 n += i;
710 i = 0;
711 }
712 }
713 }
714
715 if (i)
716 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
717mtt_done:
718 free_page((unsigned long) pages);
719 if (err)
720 goto err_mtt;
721
722 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
723 region->length, convert_access(acc), mr);
724
725 if (err)
726 goto err_mtt;
727
728 return &mr->ibmr;
729
730err_mtt:
731 mthca_free_mtt(dev, mr->mtt);
732
733err:
734 kfree(mr);
735 return ERR_PTR(err);
736}
737
657static int mthca_dereg_mr(struct ib_mr *mr) 738static int mthca_dereg_mr(struct ib_mr *mr)
658{ 739{
659 struct mthca_mr *mmr = to_mmr(mr); 740 struct mthca_mr *mmr = to_mmr(mr);
@@ -804,6 +885,7 @@ int mthca_register_device(struct mthca_dev *dev)
804 dev->ib_dev.poll_cq = mthca_poll_cq; 885 dev->ib_dev.poll_cq = mthca_poll_cq;
805 dev->ib_dev.get_dma_mr = mthca_get_dma_mr; 886 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
806 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; 887 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
888 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
807 dev->ib_dev.dereg_mr = mthca_dereg_mr; 889 dev->ib_dev.dereg_mr = mthca_dereg_mr;
808 890
809 if (dev->mthca_flags & MTHCA_FLAG_FMR) { 891 if (dev->mthca_flags & MTHCA_FLAG_FMR) {