aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-03-30 14:34:30 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-03-31 09:52:52 -0400
commit1c9351ee0e346ec1b3c700a4bc8f881923e1808e (patch)
tree2ed88d5862cab405742db7e97efe6532c4e1e73f
parenta0ce85f595c22d28bf03c3fae8545b3077b7be1b (diff)
xprtrdma: Add a "max_payload" op for each memreg mode
The max_payload computation is generalized to ensure that the payload maximum is the lesser of RPC_MAX_DATA_SEGS and the number of data segments that can be transmitted in an inline buffer. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com> Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com> Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c13
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c13
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c10
-rw-r--r--net/sunrpc/xprtrdma/transport.c5
-rw-r--r--net/sunrpc/xprtrdma/verbs.c49
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h5
6 files changed, 59 insertions, 36 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index ffb7d9358480..eec266055b28 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -17,6 +17,19 @@
17# define RPCDBG_FACILITY RPCDBG_TRANS 17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif 18#endif
19 19
20/* Maximum scatter/gather per FMR */
21#define RPCRDMA_MAX_FMR_SGES (64)
22
23/* FMR mode conveys up to 64 pages of payload per chunk segment.
24 */
25static size_t
26fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
27{
28 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
29 rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES);
30}
31
20const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { 32const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
33 .ro_maxpages = fmr_op_maxpages,
21 .ro_displayname = "fmr", 34 .ro_displayname = "fmr",
22}; 35};
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 79173f98e09a..73a5ac898efc 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -17,6 +17,19 @@
17# define RPCDBG_FACILITY RPCDBG_TRANS 17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif 18#endif
19 19
20/* FRWR mode conveys a list of pages per chunk segment. The
21 * maximum length of that list is the FRWR page list depth.
22 */
23static size_t
24frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
25{
26 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
27
28 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
29 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
30}
31
20const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { 32const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
33 .ro_maxpages = frwr_op_maxpages,
21 .ro_displayname = "frwr", 34 .ro_displayname = "frwr",
22}; 35};
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index b0922accabcf..28ade1943a57 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -19,6 +19,16 @@
19# define RPCDBG_FACILITY RPCDBG_TRANS 19# define RPCDBG_FACILITY RPCDBG_TRANS
20#endif 20#endif
21 21
22/* PHYSICAL memory registration conveys one page per chunk segment.
23 */
24static size_t
25physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
26{
27 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
28 rpcrdma_max_segments(r_xprt));
29}
30
22const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { 31const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
32 .ro_maxpages = physical_op_maxpages,
23 .ro_displayname = "physical", 33 .ro_displayname = "physical",
24}; 34};
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 97f656292feb..da71a24641e3 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -406,7 +406,10 @@ xprt_setup_rdma(struct xprt_create *args)
406 xprt_rdma_connect_worker); 406 xprt_rdma_connect_worker);
407 407
408 xprt_rdma_format_addresses(xprt); 408 xprt_rdma_format_addresses(xprt);
409 xprt->max_payload = rpcrdma_max_payload(new_xprt); 409 xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
410 if (xprt->max_payload == 0)
411 goto out4;
412 xprt->max_payload <<= PAGE_SHIFT;
410 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", 413 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
411 __func__, xprt->max_payload); 414 __func__, xprt->max_payload);
412 415
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index c3319e12551c..da55cda30568 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -2212,43 +2212,24 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
2212 return rc; 2212 return rc;
2213} 2213}
2214 2214
2215/* Physical mapping means one Read/Write list entry per-page. 2215/* How many chunk list items fit within our inline buffers?
2216 * All list entries must fit within an inline buffer
2217 *
2218 * NB: The server must return a Write list for NFS READ,
2219 * which has the same constraint. Factor in the inline
2220 * rsize as well.
2221 */ 2216 */
2222static size_t 2217unsigned int
2223rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt) 2218rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
2224{ 2219{
2225 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 2220 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
2226 unsigned int inline_size, pages; 2221 int bytes, segments;
2227
2228 inline_size = min_t(unsigned int,
2229 cdata->inline_wsize, cdata->inline_rsize);
2230 inline_size -= RPCRDMA_HDRLEN_MIN;
2231 pages = inline_size / sizeof(struct rpcrdma_segment);
2232 return pages << PAGE_SHIFT;
2233}
2234 2222
2235static size_t 2223 bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
2236rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt) 2224 bytes -= RPCRDMA_HDRLEN_MIN;
2237{ 2225 if (bytes < sizeof(struct rpcrdma_segment) * 2) {
2238 return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; 2226 pr_warn("RPC: %s: inline threshold too small\n",
2239} 2227 __func__);
2240 2228 return 0;
2241size_t
2242rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
2243{
2244 size_t result;
2245
2246 switch (r_xprt->rx_ia.ri_memreg_strategy) {
2247 case RPCRDMA_ALLPHYSICAL:
2248 result = rpcrdma_physical_max_payload(r_xprt);
2249 break;
2250 default:
2251 result = rpcrdma_mr_max_payload(r_xprt);
2252 } 2229 }
2253 return result; 2230
2231 segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
2232 dprintk("RPC: %s: max chunk list size = %d segments\n",
2233 __func__, segments);
2234 return segments;
2254} 2235}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index ef3cf4aeecd6..59e627e96a0b 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -334,7 +334,9 @@ struct rpcrdma_stats {
334/* 334/*
335 * Per-registration mode operations 335 * Per-registration mode operations
336 */ 336 */
337struct rpcrdma_xprt;
337struct rpcrdma_memreg_ops { 338struct rpcrdma_memreg_ops {
339 size_t (*ro_maxpages)(struct rpcrdma_xprt *);
338 const char *ro_displayname; 340 const char *ro_displayname;
339}; 341};
340 342
@@ -411,6 +413,8 @@ struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
411void rpcrdma_free_regbuf(struct rpcrdma_ia *, 413void rpcrdma_free_regbuf(struct rpcrdma_ia *,
412 struct rpcrdma_regbuf *); 414 struct rpcrdma_regbuf *);
413 415
416unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
417
414/* 418/*
415 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c 419 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
416 */ 420 */
@@ -422,7 +426,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
422 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 426 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
423 */ 427 */
424int rpcrdma_marshal_req(struct rpc_rqst *); 428int rpcrdma_marshal_req(struct rpc_rqst *);
425size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
426 429
427/* Temporary NFS request map cache. Created in svc_rdma.c */ 430/* Temporary NFS request map cache. Created in svc_rdma.c */
428extern struct kmem_cache *svc_rdma_map_cachep; 431extern struct kmem_cache *svc_rdma_map_cachep;