diff options
| -rw-r--r-- | net/sunrpc/xprtrdma/fmr_ops.c | 13 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 13 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/physical_ops.c | 10 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 5 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 49 | ||||
| -rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 5 |
6 files changed, 59 insertions, 36 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index ffb7d9358480..eec266055b28 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
| @@ -17,6 +17,19 @@ | |||
| 17 | # define RPCDBG_FACILITY RPCDBG_TRANS | 17 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 18 | #endif | 18 | #endif |
| 19 | 19 | ||
| 20 | /* Maximum scatter/gather per FMR */ | ||
| 21 | #define RPCRDMA_MAX_FMR_SGES (64) | ||
| 22 | |||
| 23 | /* FMR mode conveys up to 64 pages of payload per chunk segment. | ||
| 24 | */ | ||
| 25 | static size_t | ||
| 26 | fmr_op_maxpages(struct rpcrdma_xprt *r_xprt) | ||
| 27 | { | ||
| 28 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
| 29 | rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES); | ||
| 30 | } | ||
| 31 | |||
| 20 | const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { | 32 | const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { |
| 33 | .ro_maxpages = fmr_op_maxpages, | ||
| 21 | .ro_displayname = "fmr", | 34 | .ro_displayname = "fmr", |
| 22 | }; | 35 | }; |
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 79173f98e09a..73a5ac898efc 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
| @@ -17,6 +17,19 @@ | |||
| 17 | # define RPCDBG_FACILITY RPCDBG_TRANS | 17 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 18 | #endif | 18 | #endif |
| 19 | 19 | ||
| 20 | /* FRWR mode conveys a list of pages per chunk segment. The | ||
| 21 | * maximum length of that list is the FRWR page list depth. | ||
| 22 | */ | ||
| 23 | static size_t | ||
| 24 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) | ||
| 25 | { | ||
| 26 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
| 27 | |||
| 28 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
| 29 | rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); | ||
| 30 | } | ||
| 31 | |||
| 20 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { | 32 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { |
| 33 | .ro_maxpages = frwr_op_maxpages, | ||
| 21 | .ro_displayname = "frwr", | 34 | .ro_displayname = "frwr", |
| 22 | }; | 35 | }; |
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index b0922accabcf..28ade1943a57 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c | |||
| @@ -19,6 +19,16 @@ | |||
| 19 | # define RPCDBG_FACILITY RPCDBG_TRANS | 19 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 20 | #endif | 20 | #endif |
| 21 | 21 | ||
| 22 | /* PHYSICAL memory registration conveys one page per chunk segment. | ||
| 23 | */ | ||
| 24 | static size_t | ||
| 25 | physical_op_maxpages(struct rpcrdma_xprt *r_xprt) | ||
| 26 | { | ||
| 27 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
| 28 | rpcrdma_max_segments(r_xprt)); | ||
| 29 | } | ||
| 30 | |||
| 22 | const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { | 31 | const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { |
| 32 | .ro_maxpages = physical_op_maxpages, | ||
| 23 | .ro_displayname = "physical", | 33 | .ro_displayname = "physical", |
| 24 | }; | 34 | }; |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 97f656292feb..da71a24641e3 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -406,7 +406,10 @@ xprt_setup_rdma(struct xprt_create *args) | |||
| 406 | xprt_rdma_connect_worker); | 406 | xprt_rdma_connect_worker); |
| 407 | 407 | ||
| 408 | xprt_rdma_format_addresses(xprt); | 408 | xprt_rdma_format_addresses(xprt); |
| 409 | xprt->max_payload = rpcrdma_max_payload(new_xprt); | 409 | xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt); |
| 410 | if (xprt->max_payload == 0) | ||
| 411 | goto out4; | ||
| 412 | xprt->max_payload <<= PAGE_SHIFT; | ||
| 410 | dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", | 413 | dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", |
| 411 | __func__, xprt->max_payload); | 414 | __func__, xprt->max_payload); |
| 412 | 415 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index c3319e12551c..da55cda30568 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -2212,43 +2212,24 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, | |||
| 2212 | return rc; | 2212 | return rc; |
| 2213 | } | 2213 | } |
| 2214 | 2214 | ||
| 2215 | /* Physical mapping means one Read/Write list entry per-page. | 2215 | /* How many chunk list items fit within our inline buffers? |
| 2216 | * All list entries must fit within an inline buffer | ||
| 2217 | * | ||
| 2218 | * NB: The server must return a Write list for NFS READ, | ||
| 2219 | * which has the same constraint. Factor in the inline | ||
| 2220 | * rsize as well. | ||
| 2221 | */ | 2216 | */ |
| 2222 | static size_t | 2217 | unsigned int |
| 2223 | rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt) | 2218 | rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt) |
| 2224 | { | 2219 | { |
| 2225 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | 2220 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
| 2226 | unsigned int inline_size, pages; | 2221 | int bytes, segments; |
| 2227 | |||
| 2228 | inline_size = min_t(unsigned int, | ||
| 2229 | cdata->inline_wsize, cdata->inline_rsize); | ||
| 2230 | inline_size -= RPCRDMA_HDRLEN_MIN; | ||
| 2231 | pages = inline_size / sizeof(struct rpcrdma_segment); | ||
| 2232 | return pages << PAGE_SHIFT; | ||
| 2233 | } | ||
| 2234 | 2222 | ||
| 2235 | static size_t | 2223 | bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize); |
| 2236 | rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt) | 2224 | bytes -= RPCRDMA_HDRLEN_MIN; |
| 2237 | { | 2225 | if (bytes < sizeof(struct rpcrdma_segment) * 2) { |
| 2238 | return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; | 2226 | pr_warn("RPC: %s: inline threshold too small\n", |
| 2239 | } | 2227 | __func__); |
| 2240 | 2228 | return 0; | |
| 2241 | size_t | ||
| 2242 | rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt) | ||
| 2243 | { | ||
| 2244 | size_t result; | ||
| 2245 | |||
| 2246 | switch (r_xprt->rx_ia.ri_memreg_strategy) { | ||
| 2247 | case RPCRDMA_ALLPHYSICAL: | ||
| 2248 | result = rpcrdma_physical_max_payload(r_xprt); | ||
| 2249 | break; | ||
| 2250 | default: | ||
| 2251 | result = rpcrdma_mr_max_payload(r_xprt); | ||
| 2252 | } | 2229 | } |
| 2253 | return result; | 2230 | |
| 2231 | segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1); | ||
| 2232 | dprintk("RPC: %s: max chunk list size = %d segments\n", | ||
| 2233 | __func__, segments); | ||
| 2234 | return segments; | ||
| 2254 | } | 2235 | } |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index ef3cf4aeecd6..59e627e96a0b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
| @@ -334,7 +334,9 @@ struct rpcrdma_stats { | |||
| 334 | /* | 334 | /* |
| 335 | * Per-registration mode operations | 335 | * Per-registration mode operations |
| 336 | */ | 336 | */ |
| 337 | struct rpcrdma_xprt; | ||
| 337 | struct rpcrdma_memreg_ops { | 338 | struct rpcrdma_memreg_ops { |
| 339 | size_t (*ro_maxpages)(struct rpcrdma_xprt *); | ||
| 338 | const char *ro_displayname; | 340 | const char *ro_displayname; |
| 339 | }; | 341 | }; |
| 340 | 342 | ||
| @@ -411,6 +413,8 @@ struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, | |||
| 411 | void rpcrdma_free_regbuf(struct rpcrdma_ia *, | 413 | void rpcrdma_free_regbuf(struct rpcrdma_ia *, |
| 412 | struct rpcrdma_regbuf *); | 414 | struct rpcrdma_regbuf *); |
| 413 | 415 | ||
| 416 | unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *); | ||
| 417 | |||
| 414 | /* | 418 | /* |
| 415 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c | 419 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c |
| 416 | */ | 420 | */ |
| @@ -422,7 +426,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *); | |||
| 422 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c | 426 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c |
| 423 | */ | 427 | */ |
| 424 | int rpcrdma_marshal_req(struct rpc_rqst *); | 428 | int rpcrdma_marshal_req(struct rpc_rqst *); |
| 425 | size_t rpcrdma_max_payload(struct rpcrdma_xprt *); | ||
| 426 | 429 | ||
| 427 | /* Temporary NFS request map cache. Created in svc_rdma.c */ | 430 | /* Temporary NFS request map cache. Created in svc_rdma.c */ |
| 428 | extern struct kmem_cache *svc_rdma_map_cachep; | 431 | extern struct kmem_cache *svc_rdma_map_cachep; |
