aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-03-30 14:35:26 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-03-31 09:52:53 -0400
commit3968cb58501bf526eed1441f4ef237028aa9cd2d (patch)
tree177449f429780c5965567ff045e1224d9ca6ecc1 /net/sunrpc
parent4561f347d49c645fd81d1f47b0fb460e8a6e4587 (diff)
xprtrdma: Add "open" memreg op
The open op determines the size of various transport data structures based on device capabilities and memory registration mode. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com> Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com> Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c8
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c48
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c8
-rw-r--r--net/sunrpc/xprtrdma/verbs.c49
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
5 files changed, 70 insertions, 46 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index e9ca5944ac1e..e8a9837f8d63 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -20,6 +20,13 @@
20/* Maximum scatter/gather per FMR */ 20/* Maximum scatter/gather per FMR */
21#define RPCRDMA_MAX_FMR_SGES (64) 21#define RPCRDMA_MAX_FMR_SGES (64)
22 22
23static int
24fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
25 struct rpcrdma_create_data_internal *cdata)
26{
27 return 0;
28}
29
23/* FMR mode conveys up to 64 pages of payload per chunk segment. 30/* FMR mode conveys up to 64 pages of payload per chunk segment.
24 */ 31 */
25static size_t 32static size_t
@@ -188,6 +195,7 @@ fmr_op_destroy(struct rpcrdma_buffer *buf)
188const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { 195const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
189 .ro_map = fmr_op_map, 196 .ro_map = fmr_op_map,
190 .ro_unmap = fmr_op_unmap, 197 .ro_unmap = fmr_op_unmap,
198 .ro_open = fmr_op_open,
191 .ro_maxpages = fmr_op_maxpages, 199 .ro_maxpages = fmr_op_maxpages,
192 .ro_init = fmr_op_init, 200 .ro_init = fmr_op_init,
193 .ro_reset = fmr_op_reset, 201 .ro_reset = fmr_op_reset,
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 121e400d0565..e17d54d473a7 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -58,6 +58,53 @@ __frwr_release(struct rpcrdma_mw *r)
58 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); 58 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
59} 59}
60 60
61static int
62frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
63 struct rpcrdma_create_data_internal *cdata)
64{
65 struct ib_device_attr *devattr = &ia->ri_devattr;
66 int depth, delta;
67
68 ia->ri_max_frmr_depth =
69 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
70 devattr->max_fast_reg_page_list_len);
71 dprintk("RPC: %s: device's max FR page list len = %u\n",
72 __func__, ia->ri_max_frmr_depth);
73
74 /* Add room for frmr register and invalidate WRs.
75 * 1. FRMR reg WR for head
76 * 2. FRMR invalidate WR for head
77 * 3. N FRMR reg WRs for pagelist
78 * 4. N FRMR invalidate WRs for pagelist
79 * 5. FRMR reg WR for tail
80 * 6. FRMR invalidate WR for tail
81 * 7. The RDMA_SEND WR
82 */
83 depth = 7;
84
85 /* Calculate N if the device max FRMR depth is smaller than
86 * RPCRDMA_MAX_DATA_SEGS.
87 */
88 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
89 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
90 do {
91 depth += 2; /* FRMR reg + invalidate */
92 delta -= ia->ri_max_frmr_depth;
93 } while (delta > 0);
94 }
95
96 ep->rep_attr.cap.max_send_wr *= depth;
97 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
98 cdata->max_requests = devattr->max_qp_wr / depth;
99 if (!cdata->max_requests)
100 return -EINVAL;
101 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
102 depth;
103 }
104
105 return 0;
106}
107
61/* FRWR mode conveys a list of pages per chunk segment. The 108/* FRWR mode conveys a list of pages per chunk segment. The
62 * maximum length of that list is the FRWR page list depth. 109 * maximum length of that list is the FRWR page list depth.
63 */ 110 */
@@ -276,6 +323,7 @@ frwr_op_destroy(struct rpcrdma_buffer *buf)
276const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { 323const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
277 .ro_map = frwr_op_map, 324 .ro_map = frwr_op_map,
278 .ro_unmap = frwr_op_unmap, 325 .ro_unmap = frwr_op_unmap,
326 .ro_open = frwr_op_open,
279 .ro_maxpages = frwr_op_maxpages, 327 .ro_maxpages = frwr_op_maxpages,
280 .ro_init = frwr_op_init, 328 .ro_init = frwr_op_init,
281 .ro_reset = frwr_op_reset, 329 .ro_reset = frwr_op_reset,
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index eb39011e3129..0ba130bed1fc 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -19,6 +19,13 @@
19# define RPCDBG_FACILITY RPCDBG_TRANS 19# define RPCDBG_FACILITY RPCDBG_TRANS
20#endif 20#endif
21 21
22static int
23physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata)
25{
26 return 0;
27}
28
22/* PHYSICAL memory registration conveys one page per chunk segment. 29/* PHYSICAL memory registration conveys one page per chunk segment.
23 */ 30 */
24static size_t 31static size_t
@@ -72,6 +79,7 @@ physical_op_destroy(struct rpcrdma_buffer *buf)
72const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { 79const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
73 .ro_map = physical_op_map, 80 .ro_map = physical_op_map,
74 .ro_unmap = physical_op_unmap, 81 .ro_unmap = physical_op_unmap,
82 .ro_open = physical_op_open,
75 .ro_maxpages = physical_op_maxpages, 83 .ro_maxpages = physical_op_maxpages,
76 .ro_init = physical_op_init, 84 .ro_init = physical_op_init,
77 .ro_reset = physical_op_reset, 85 .ro_reset = physical_op_reset,
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index a7fb31441069..b697b3ed6273 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -622,11 +622,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
622 dprintk("RPC: %s: FRMR registration " 622 dprintk("RPC: %s: FRMR registration "
623 "not supported by HCA\n", __func__); 623 "not supported by HCA\n", __func__);
624 memreg = RPCRDMA_MTHCAFMR; 624 memreg = RPCRDMA_MTHCAFMR;
625 } else {
626 /* Mind the ia limit on FRMR page list depth */
627 ia->ri_max_frmr_depth = min_t(unsigned int,
628 RPCRDMA_MAX_DATA_SEGS,
629 devattr->max_fast_reg_page_list_len);
630 } 625 }
631 } 626 }
632 if (memreg == RPCRDMA_MTHCAFMR) { 627 if (memreg == RPCRDMA_MTHCAFMR) {
@@ -741,49 +736,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
741 736
742 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; 737 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
743 ep->rep_attr.qp_context = ep; 738 ep->rep_attr.qp_context = ep;
744 /* send_cq and recv_cq initialized below */
745 ep->rep_attr.srq = NULL; 739 ep->rep_attr.srq = NULL;
746 ep->rep_attr.cap.max_send_wr = cdata->max_requests; 740 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
747 switch (ia->ri_memreg_strategy) { 741 rc = ia->ri_ops->ro_open(ia, ep, cdata);
748 case RPCRDMA_FRMR: { 742 if (rc)
749 int depth = 7; 743 return rc;
750
751 /* Add room for frmr register and invalidate WRs.
752 * 1. FRMR reg WR for head
753 * 2. FRMR invalidate WR for head
754 * 3. N FRMR reg WRs for pagelist
755 * 4. N FRMR invalidate WRs for pagelist
756 * 5. FRMR reg WR for tail
757 * 6. FRMR invalidate WR for tail
758 * 7. The RDMA_SEND WR
759 */
760
761 /* Calculate N if the device max FRMR depth is smaller than
762 * RPCRDMA_MAX_DATA_SEGS.
763 */
764 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
765 int delta = RPCRDMA_MAX_DATA_SEGS -
766 ia->ri_max_frmr_depth;
767
768 do {
769 depth += 2; /* FRMR reg + invalidate */
770 delta -= ia->ri_max_frmr_depth;
771 } while (delta > 0);
772
773 }
774 ep->rep_attr.cap.max_send_wr *= depth;
775 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
776 cdata->max_requests = devattr->max_qp_wr / depth;
777 if (!cdata->max_requests)
778 return -EINVAL;
779 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
780 depth;
781 }
782 break;
783 }
784 default:
785 break;
786 }
787 ep->rep_attr.cap.max_recv_wr = cdata->max_requests; 744 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
788 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); 745 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
789 ep->rep_attr.cap.max_recv_sge = 1; 746 ep->rep_attr.cap.max_recv_sge = 1;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index b95e223d3d69..9036fb4174d5 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -340,6 +340,9 @@ struct rpcrdma_memreg_ops {
340 struct rpcrdma_mr_seg *, int, bool); 340 struct rpcrdma_mr_seg *, int, bool);
341 int (*ro_unmap)(struct rpcrdma_xprt *, 341 int (*ro_unmap)(struct rpcrdma_xprt *,
342 struct rpcrdma_mr_seg *); 342 struct rpcrdma_mr_seg *);
343 int (*ro_open)(struct rpcrdma_ia *,
344 struct rpcrdma_ep *,
345 struct rpcrdma_create_data_internal *);
343 size_t (*ro_maxpages)(struct rpcrdma_xprt *); 346 size_t (*ro_maxpages)(struct rpcrdma_xprt *);
344 int (*ro_init)(struct rpcrdma_xprt *); 347 int (*ro_init)(struct rpcrdma_xprt *);
345 void (*ro_reset)(struct rpcrdma_xprt *); 348 void (*ro_reset)(struct rpcrdma_xprt *);