aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-02-28 15:31:05 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2018-04-10 16:06:22 -0400
commit2dd4a012d9e73c423a8c48d7e0f2e427caecce3d (patch)
treeb222986a57b5f2c4e3240600103611655abda10d
parentf2877623082b720c1424b163cf905fff8eed4126 (diff)
xprtrdma: Move creation of rl_rdmabuf to rpcrdma_create_req
Refactor: Both rpcrdma_create_req call sites have to allocate the buffer where the transport header is built, so just move that allocation into rpcrdma_create_req. This buffer is a fixed size. There's no needed information available in call_allocate that is not also available when the transport is created. The original purpose for allocating these buffers on demand was to reduce the possibility that an allocation failure during transport creation will hork the mount operation during low memory scenarios. Some relief for this rare possibility is coming up in the next few patches. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c7
-rw-r--r--net/sunrpc/xprtrdma/transport.c25
-rw-r--r--net/sunrpc/xprtrdma/verbs.c14
3 files changed, 12 insertions, 34 deletions
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index ed1a4a3065ee..47ebac949769 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -44,13 +44,6 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
44 if (IS_ERR(req)) 44 if (IS_ERR(req))
45 return PTR_ERR(req); 45 return PTR_ERR(req);
46 46
47 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
48 DMA_TO_DEVICE, GFP_KERNEL);
49 if (IS_ERR(rb))
50 goto out_fail;
51 req->rl_rdmabuf = rb;
52 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
53
54 size = r_xprt->rx_data.inline_rsize; 47 size = r_xprt->rx_data.inline_rsize;
55 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL); 48 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
56 if (IS_ERR(rb)) 49 if (IS_ERR(rb))
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 7e39faa90c41..67e438612c18 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -537,29 +537,6 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
537 } 537 }
538} 538}
539 539
540/* Allocate a fixed-size buffer in which to construct and send the
541 * RPC-over-RDMA header for this request.
542 */
543static bool
544rpcrdma_get_rdmabuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
545 gfp_t flags)
546{
547 size_t size = RPCRDMA_HDRBUF_SIZE;
548 struct rpcrdma_regbuf *rb;
549
550 if (req->rl_rdmabuf)
551 return true;
552
553 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
554 if (IS_ERR(rb))
555 return false;
556
557 r_xprt->rx_stats.hardway_register_count += size;
558 req->rl_rdmabuf = rb;
559 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
560 return true;
561}
562
563static bool 540static bool
564rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 541rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
565 size_t size, gfp_t flags) 542 size_t size, gfp_t flags)
@@ -641,8 +618,6 @@ xprt_rdma_allocate(struct rpc_task *task)
641 if (RPC_IS_SWAPPER(task)) 618 if (RPC_IS_SWAPPER(task))
642 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 619 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
643 620
644 if (!rpcrdma_get_rdmabuf(r_xprt, req, flags))
645 goto out_fail;
646 if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags)) 621 if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags))
647 goto out_fail; 622 goto out_fail;
648 if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags)) 623 if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 626fd3074186..6a7a5a277e75 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1068,17 +1068,27 @@ struct rpcrdma_req *
1068rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) 1068rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1069{ 1069{
1070 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 1070 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1071 struct rpcrdma_regbuf *rb;
1071 struct rpcrdma_req *req; 1072 struct rpcrdma_req *req;
1072 1073
1073 req = kzalloc(sizeof(*req), GFP_KERNEL); 1074 req = kzalloc(sizeof(*req), GFP_KERNEL);
1074 if (req == NULL) 1075 if (req == NULL)
1075 return ERR_PTR(-ENOMEM); 1076 return ERR_PTR(-ENOMEM);
1076 1077
1078 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
1079 DMA_TO_DEVICE, GFP_KERNEL);
1080 if (IS_ERR(rb)) {
1081 kfree(req);
1082 return ERR_PTR(-ENOMEM);
1083 }
1084 req->rl_rdmabuf = rb;
1085 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
1086 req->rl_buffer = buffer;
1087 INIT_LIST_HEAD(&req->rl_registered);
1088
1077 spin_lock(&buffer->rb_reqslock); 1089 spin_lock(&buffer->rb_reqslock);
1078 list_add(&req->rl_all, &buffer->rb_allreqs); 1090 list_add(&req->rl_all, &buffer->rb_allreqs);
1079 spin_unlock(&buffer->rb_reqslock); 1091 spin_unlock(&buffer->rb_reqslock);
1080 req->rl_buffer = &r_xprt->rx_buf;
1081 INIT_LIST_HEAD(&req->rl_registered);
1082 return req; 1092 return req;
1083} 1093}
1084 1094