diff options
| author | Tom Tucker <tom@opengridcomputing.com> | 2008-10-06 15:45:18 -0400 |
|---|---|---|
| committer | Tom Tucker <tom@opengridcomputing.com> | 2008-10-06 15:45:18 -0400 |
| commit | 64be8608c163bd480cf5ec4b34366f11e0f3c87f (patch) | |
| tree | e0a2499259c799d1ac97935107a25c4cefd7cb0c | |
| parent | 0d3ebb9ae9f9c887518fd4c81a68084111d154d7 (diff) | |
svcrdma: Add FRMR get/put services
Add services for the allocating, freeing, and unmapping Fast Reg MR. These
services will be used by the transport connection setup, send and receive
routines.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
| -rw-r--r-- | include/linux/sunrpc/svc_rdma.h | 3 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 116 |
2 files changed, 114 insertions, 5 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 49e458d9894..34252683671 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
| @@ -214,6 +214,9 @@ extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); | |||
| 214 | extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); | 214 | extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); |
| 215 | extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); | 215 | extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); |
| 216 | extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); | 216 | extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); |
| 217 | extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); | ||
| 218 | extern void svc_rdma_put_frmr(struct svcxprt_rdma *, | ||
| 219 | struct svc_rdma_fastreg_mr *); | ||
| 217 | extern void svc_sq_reap(struct svcxprt_rdma *); | 220 | extern void svc_sq_reap(struct svcxprt_rdma *); |
| 218 | extern void svc_rq_reap(struct svcxprt_rdma *); | 221 | extern void svc_rq_reap(struct svcxprt_rdma *); |
| 219 | extern struct svc_xprt_class svc_rdma_class; | 222 | extern struct svc_xprt_class svc_rdma_class; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 900cb69728c..f0b5c5f2f62 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
| @@ -100,6 +100,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) | |||
| 100 | ctxt->xprt = xprt; | 100 | ctxt->xprt = xprt; |
| 101 | INIT_LIST_HEAD(&ctxt->dto_q); | 101 | INIT_LIST_HEAD(&ctxt->dto_q); |
| 102 | ctxt->count = 0; | 102 | ctxt->count = 0; |
| 103 | ctxt->frmr = NULL; | ||
| 103 | atomic_inc(&xprt->sc_ctxt_used); | 104 | atomic_inc(&xprt->sc_ctxt_used); |
| 104 | return ctxt; | 105 | return ctxt; |
| 105 | } | 106 | } |
| @@ -109,11 +110,19 @@ static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) | |||
| 109 | struct svcxprt_rdma *xprt = ctxt->xprt; | 110 | struct svcxprt_rdma *xprt = ctxt->xprt; |
| 110 | int i; | 111 | int i; |
| 111 | for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { | 112 | for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { |
| 112 | atomic_dec(&xprt->sc_dma_used); | 113 | /* |
| 113 | ib_dma_unmap_single(xprt->sc_cm_id->device, | 114 | * Unmap the DMA addr in the SGE if the lkey matches |
| 114 | ctxt->sge[i].addr, | 115 | * the sc_dma_lkey, otherwise, ignore it since it is |
| 115 | ctxt->sge[i].length, | 116 | * an FRMR lkey and will be unmapped later when the |
| 116 | ctxt->direction); | 117 | * last WR that uses it completes. |
| 118 | */ | ||
| 119 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { | ||
| 120 | atomic_dec(&xprt->sc_dma_used); | ||
| 121 | ib_dma_unmap_single(xprt->sc_cm_id->device, | ||
| 122 | ctxt->sge[i].addr, | ||
| 123 | ctxt->sge[i].length, | ||
| 124 | ctxt->direction); | ||
| 125 | } | ||
| 117 | } | 126 | } |
| 118 | } | 127 | } |
| 119 | 128 | ||
| @@ -150,6 +159,7 @@ struct svc_rdma_req_map *svc_rdma_get_req_map(void) | |||
| 150 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); | 159 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); |
| 151 | } | 160 | } |
| 152 | map->count = 0; | 161 | map->count = 0; |
| 162 | map->frmr = NULL; | ||
| 153 | return map; | 163 | return map; |
| 154 | } | 164 | } |
| 155 | 165 | ||
| @@ -425,10 +435,12 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, | |||
| 425 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); | 435 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); |
| 426 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); | 436 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); |
| 427 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); | 437 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); |
| 438 | INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); | ||
| 428 | init_waitqueue_head(&cma_xprt->sc_send_wait); | 439 | init_waitqueue_head(&cma_xprt->sc_send_wait); |
| 429 | 440 | ||
| 430 | spin_lock_init(&cma_xprt->sc_lock); | 441 | spin_lock_init(&cma_xprt->sc_lock); |
| 431 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); | 442 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); |
| 443 | spin_lock_init(&cma_xprt->sc_frmr_q_lock); | ||
| 432 | 444 | ||
| 433 | cma_xprt->sc_ord = svcrdma_ord; | 445 | cma_xprt->sc_ord = svcrdma_ord; |
| 434 | 446 | ||
| @@ -686,6 +698,97 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
| 686 | return ERR_PTR(ret); | 698 | return ERR_PTR(ret); |
| 687 | } | 699 | } |
| 688 | 700 | ||
| 701 | static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) | ||
| 702 | { | ||
| 703 | struct ib_mr *mr; | ||
| 704 | struct ib_fast_reg_page_list *pl; | ||
| 705 | struct svc_rdma_fastreg_mr *frmr; | ||
| 706 | |||
| 707 | frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); | ||
| 708 | if (!frmr) | ||
| 709 | goto err; | ||
| 710 | |||
| 711 | mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); | ||
| 712 | if (!mr) | ||
| 713 | goto err_free_frmr; | ||
| 714 | |||
| 715 | pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, | ||
| 716 | RPCSVC_MAXPAGES); | ||
| 717 | if (!pl) | ||
| 718 | goto err_free_mr; | ||
| 719 | |||
| 720 | frmr->mr = mr; | ||
| 721 | frmr->page_list = pl; | ||
| 722 | INIT_LIST_HEAD(&frmr->frmr_list); | ||
| 723 | return frmr; | ||
| 724 | |||
| 725 | err_free_mr: | ||
| 726 | ib_dereg_mr(mr); | ||
| 727 | err_free_frmr: | ||
| 728 | kfree(frmr); | ||
| 729 | err: | ||
| 730 | return ERR_PTR(-ENOMEM); | ||
| 731 | } | ||
| 732 | |||
| 733 | static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) | ||
| 734 | { | ||
| 735 | struct svc_rdma_fastreg_mr *frmr; | ||
| 736 | |||
| 737 | while (!list_empty(&xprt->sc_frmr_q)) { | ||
| 738 | frmr = list_entry(xprt->sc_frmr_q.next, | ||
| 739 | struct svc_rdma_fastreg_mr, frmr_list); | ||
| 740 | list_del_init(&frmr->frmr_list); | ||
| 741 | ib_dereg_mr(frmr->mr); | ||
| 742 | ib_free_fast_reg_page_list(frmr->page_list); | ||
| 743 | kfree(frmr); | ||
| 744 | } | ||
| 745 | } | ||
| 746 | |||
| 747 | struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) | ||
| 748 | { | ||
| 749 | struct svc_rdma_fastreg_mr *frmr = NULL; | ||
| 750 | |||
| 751 | spin_lock_bh(&rdma->sc_frmr_q_lock); | ||
| 752 | if (!list_empty(&rdma->sc_frmr_q)) { | ||
| 753 | frmr = list_entry(rdma->sc_frmr_q.next, | ||
| 754 | struct svc_rdma_fastreg_mr, frmr_list); | ||
| 755 | list_del_init(&frmr->frmr_list); | ||
| 756 | frmr->map_len = 0; | ||
| 757 | frmr->page_list_len = 0; | ||
| 758 | } | ||
| 759 | spin_unlock_bh(&rdma->sc_frmr_q_lock); | ||
| 760 | if (frmr) | ||
| 761 | return frmr; | ||
| 762 | |||
| 763 | return rdma_alloc_frmr(rdma); | ||
| 764 | } | ||
| 765 | |||
| 766 | static void frmr_unmap_dma(struct svcxprt_rdma *xprt, | ||
| 767 | struct svc_rdma_fastreg_mr *frmr) | ||
| 768 | { | ||
| 769 | int page_no; | ||
| 770 | for (page_no = 0; page_no < frmr->page_list_len; page_no++) { | ||
| 771 | dma_addr_t addr = frmr->page_list->page_list[page_no]; | ||
| 772 | if (ib_dma_mapping_error(frmr->mr->device, addr)) | ||
| 773 | continue; | ||
| 774 | atomic_dec(&xprt->sc_dma_used); | ||
| 775 | ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE, | ||
| 776 | frmr->direction); | ||
| 777 | } | ||
| 778 | } | ||
| 779 | |||
| 780 | void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, | ||
| 781 | struct svc_rdma_fastreg_mr *frmr) | ||
| 782 | { | ||
| 783 | if (frmr) { | ||
| 784 | frmr_unmap_dma(rdma, frmr); | ||
| 785 | spin_lock_bh(&rdma->sc_frmr_q_lock); | ||
| 786 | BUG_ON(!list_empty(&frmr->frmr_list)); | ||
| 787 | list_add(&frmr->frmr_list, &rdma->sc_frmr_q); | ||
| 788 | spin_unlock_bh(&rdma->sc_frmr_q_lock); | ||
| 789 | } | ||
| 790 | } | ||
| 791 | |||
| 689 | /* | 792 | /* |
| 690 | * This is the xpo_recvfrom function for listening endpoints. Its | 793 | * This is the xpo_recvfrom function for listening endpoints. Its |
| 691 | * purpose is to accept incoming connections. The CMA callback handler | 794 | * purpose is to accept incoming connections. The CMA callback handler |
| @@ -961,6 +1064,9 @@ static void __svc_rdma_free(struct work_struct *work) | |||
| 961 | WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); | 1064 | WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); |
| 962 | WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); | 1065 | WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); |
| 963 | 1066 | ||
| 1067 | /* De-allocate fastreg mr */ | ||
| 1068 | rdma_dealloc_frmr_q(rdma); | ||
| 1069 | |||
| 964 | /* Destroy the QP if present (not a listener) */ | 1070 | /* Destroy the QP if present (not a listener) */ |
| 965 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) | 1071 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) |
| 966 | ib_destroy_qp(rdma->sc_qp); | 1072 | ib_destroy_qp(rdma->sc_qp); |
