aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-01-21 11:04:16 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-01-30 10:47:49 -0500
commit85275c874eaeb92fb2a78a1d4ebb1ff4b0f7b732 (patch)
treea9816da15586ef6b23196992dd777e3656fb9f6b /net/sunrpc
parent0ca77dc372110cbed4dbac5e867ffdc60ebccf6a (diff)
xprtrdma: Allocate RPC/RDMA send buffer separately from struct rpcrdma_req
The rl_base field is currently the buffer where each RPC/RDMA call header is built. The inline threshold is an agreed-on size limit to for RDMA SEND operations that pass between client and server. The sum of the RPC/RDMA header size and the RPC header size must be less than or equal to this threshold. Increasing the r/wsize maximum will require MAX_SEGS to grow significantly, but the inline threshold size won't change (both sides agree on it). The server's inline threshold doesn't change. Since an RPC/RDMA header can never be larger than the inline threshold, make all RPC/RDMA header buffers the size of the inline threshold. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c11
-rw-r--r--net/sunrpc/xprtrdma/transport.c9
-rw-r--r--net/sunrpc/xprtrdma/verbs.c22
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h6
4 files changed, 19 insertions, 29 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 8a6bdbd3e936..c1d4a093b8f1 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -294,7 +294,7 @@ ssize_t
294rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result) 294rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
295{ 295{
296 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 296 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
297 struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base; 297 struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf);
298 298
299 if (req->rl_rtype != rpcrdma_noch) 299 if (req->rl_rtype != rpcrdma_noch)
300 result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, 300 result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
@@ -406,8 +406,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
406 base = rqst->rq_svec[0].iov_base; 406 base = rqst->rq_svec[0].iov_base;
407 rpclen = rqst->rq_svec[0].iov_len; 407 rpclen = rqst->rq_svec[0].iov_len;
408 408
409 /* build RDMA header in private area at front */ 409 headerp = rdmab_to_msg(req->rl_rdmabuf);
410 headerp = (struct rpcrdma_msg *) req->rl_base;
411 /* don't byte-swap XID, it's already done in request */ 410 /* don't byte-swap XID, it's already done in request */
412 headerp->rm_xid = rqst->rq_xid; 411 headerp->rm_xid = rqst->rq_xid;
413 headerp->rm_vers = rpcrdma_version; 412 headerp->rm_vers = rpcrdma_version;
@@ -528,7 +527,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
528 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" 527 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
529 " headerp 0x%p base 0x%p lkey 0x%x\n", 528 " headerp 0x%p base 0x%p lkey 0x%x\n",
530 __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen, 529 __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
531 headerp, base, req->rl_iov.lkey); 530 headerp, base, rdmab_lkey(req->rl_rdmabuf));
532 531
533 /* 532 /*
534 * initialize send_iov's - normally only two: rdma chunk header and 533 * initialize send_iov's - normally only two: rdma chunk header and
@@ -537,9 +536,9 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
537 * header and any write data. In all non-rdma cases, any following 536 * header and any write data. In all non-rdma cases, any following
538 * data has been copied into the RPC header buffer. 537 * data has been copied into the RPC header buffer.
539 */ 538 */
540 req->rl_send_iov[0].addr = req->rl_iov.addr; 539 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
541 req->rl_send_iov[0].length = hdrlen; 540 req->rl_send_iov[0].length = hdrlen;
542 req->rl_send_iov[0].lkey = req->rl_iov.lkey; 541 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
543 542
544 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 543 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
545 req->rl_send_iov[1].length = rpclen; 544 req->rl_send_iov[1].length = rpclen;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index a9d566227e7e..2c2fabe99d84 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -470,6 +470,8 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size)
470 if (req == NULL) 470 if (req == NULL)
471 return NULL; 471 return NULL;
472 472
473 if (req->rl_rdmabuf == NULL)
474 goto out_rdmabuf;
473 if (req->rl_sendbuf == NULL) 475 if (req->rl_sendbuf == NULL)
474 goto out_sendbuf; 476 goto out_sendbuf;
475 if (size > req->rl_sendbuf->rg_size) 477 if (size > req->rl_sendbuf->rg_size)
@@ -480,6 +482,13 @@ out:
480 req->rl_connect_cookie = 0; /* our reserved value */ 482 req->rl_connect_cookie = 0; /* our reserved value */
481 return req->rl_sendbuf->rg_base; 483 return req->rl_sendbuf->rg_base;
482 484
485out_rdmabuf:
486 min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
487 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
488 if (IS_ERR(rb))
489 goto out_fail;
490 req->rl_rdmabuf = rb;
491
483out_sendbuf: 492out_sendbuf:
484 /* XDR encoding and RPC/RDMA marshaling of this request has not 493 /* XDR encoding and RPC/RDMA marshaling of this request has not
485 * yet occurred. Thus a lower bound is needed to prevent buffer 494 * yet occurred. Thus a lower bound is needed to prevent buffer
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 40894403db81..c81749b9a0de 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1078,30 +1078,14 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
1078static struct rpcrdma_req * 1078static struct rpcrdma_req *
1079rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) 1079rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1080{ 1080{
1081 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1082 size_t wlen = cdata->inline_wsize;
1083 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1084 struct rpcrdma_req *req; 1081 struct rpcrdma_req *req;
1085 int rc;
1086 1082
1087 rc = -ENOMEM; 1083 req = kzalloc(sizeof(*req), GFP_KERNEL);
1088 req = kmalloc(sizeof(*req) + wlen, GFP_KERNEL);
1089 if (req == NULL) 1084 if (req == NULL)
1090 goto out; 1085 return ERR_PTR(-ENOMEM);
1091 memset(req, 0, sizeof(*req));
1092
1093 rc = rpcrdma_register_internal(ia, req->rl_base, wlen,
1094 &req->rl_handle, &req->rl_iov);
1095 if (rc)
1096 goto out_free;
1097 1086
1098 req->rl_buffer = &r_xprt->rx_buf; 1087 req->rl_buffer = &r_xprt->rx_buf;
1099 return req; 1088 return req;
1100
1101out_free:
1102 kfree(req);
1103out:
1104 return ERR_PTR(rc);
1105} 1089}
1106 1090
1107static struct rpcrdma_rep * 1091static struct rpcrdma_rep *
@@ -1333,7 +1317,7 @@ rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1333 return; 1317 return;
1334 1318
1335 rpcrdma_free_regbuf(ia, req->rl_sendbuf); 1319 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
1336 rpcrdma_deregister_internal(ia, req->rl_handle, &req->rl_iov); 1320 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
1337 kfree(req); 1321 kfree(req);
1338} 1322}
1339 1323
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index aa82f8d1c5b4..84ad863fe637 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -268,12 +268,10 @@ struct rpcrdma_req {
268 enum rpcrdma_chunktype rl_rtype, rl_wtype; 268 enum rpcrdma_chunktype rl_rtype, rl_wtype;
269 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ 269 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
270 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 270 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
271 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
272 struct ib_sge rl_send_iov[4]; /* for active requests */ 271 struct ib_sge rl_send_iov[4]; /* for active requests */
272 struct rpcrdma_regbuf *rl_rdmabuf;
273 struct rpcrdma_regbuf *rl_sendbuf; 273 struct rpcrdma_regbuf *rl_sendbuf;
274 struct ib_sge rl_iov; /* for posting */ 274 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
275 struct ib_mr *rl_handle; /* handle for mem in rl_iov */
276 char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */
277}; 275};
278 276
279static inline struct rpcrdma_req * 277static inline struct rpcrdma_req *