aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-01-21 11:04:33 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-01-30 10:47:49 -0500
commitc05fbb5a593571961fdb4ba06a2bff49aed9dcee (patch)
tree349a1603314fb158cd60e4855f7b7cbab512a965
parent6b1184cd4fb086a826f658b02d9d9912dd0dde08 (diff)
xprtrdma: Allocate zero pad separately from rpcrdma_buffer
Use the new rpcrdma_alloc_regbuf() API to shrink the amount of contiguous memory needed for a buffer pool by moving the zero pad buffer into a regbuf. This is for consistency with the other uses of internally registered memory. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/verbs.c29
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
3 files changed, 13 insertions, 23 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 02efcaa1bbac..7e9acd9361c5 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -549,9 +549,9 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
549 if (padlen) { 549 if (padlen) {
550 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 550 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
551 551
552 req->rl_send_iov[2].addr = ep->rep_pad.addr; 552 req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf);
553 req->rl_send_iov[2].length = padlen; 553 req->rl_send_iov[2].length = padlen;
554 req->rl_send_iov[2].lkey = ep->rep_pad.lkey; 554 req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf);
555 555
556 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; 556 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
557 req->rl_send_iov[3].length = rqst->rq_slen - rpclen; 557 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index f58521dd88e2..8a05f45d1a11 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -794,6 +794,14 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
794 ep->rep_attr.qp_type = IB_QPT_RC; 794 ep->rep_attr.qp_type = IB_QPT_RC;
795 ep->rep_attr.port_num = ~0; 795 ep->rep_attr.port_num = ~0;
796 796
797 if (cdata->padding) {
798 ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
799 GFP_KERNEL);
800 if (IS_ERR(ep->rep_padbuf))
801 return PTR_ERR(ep->rep_padbuf);
802 } else
803 ep->rep_padbuf = NULL;
804
797 dprintk("RPC: %s: requested max: dtos: send %d recv %d; " 805 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
798 "iovs: send %d recv %d\n", 806 "iovs: send %d recv %d\n",
799 __func__, 807 __func__,
@@ -876,6 +884,7 @@ out2:
876 dprintk("RPC: %s: ib_destroy_cq returned %i\n", 884 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
877 __func__, err); 885 __func__, err);
878out1: 886out1:
887 rpcrdma_free_regbuf(ia, ep->rep_padbuf);
879 return rc; 888 return rc;
880} 889}
881 890
@@ -902,11 +911,7 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
902 ia->ri_id->qp = NULL; 911 ia->ri_id->qp = NULL;
903 } 912 }
904 913
905 /* padding - could be done in rpcrdma_buffer_destroy... */ 914 rpcrdma_free_regbuf(ia, ep->rep_padbuf);
906 if (ep->rep_pad_mr) {
907 rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
908 ep->rep_pad_mr = NULL;
909 }
910 915
911 rpcrdma_clean_cq(ep->rep_attr.recv_cq); 916 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
912 rc = ib_destroy_cq(ep->rep_attr.recv_cq); 917 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
@@ -1220,12 +1225,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1220 * 1. arrays for send and recv pointers 1225 * 1. arrays for send and recv pointers
1221 * 2. arrays of struct rpcrdma_req to fill in pointers 1226 * 2. arrays of struct rpcrdma_req to fill in pointers
1222 * 3. array of struct rpcrdma_rep for replies 1227 * 3. array of struct rpcrdma_rep for replies
1223 * 4. padding, if any
1224 * Send/recv buffers in req/rep need to be registered 1228 * Send/recv buffers in req/rep need to be registered
1225 */ 1229 */
1226 len = buf->rb_max_requests * 1230 len = buf->rb_max_requests *
1227 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *)); 1231 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
1228 len += cdata->padding;
1229 1232
1230 p = kzalloc(len, GFP_KERNEL); 1233 p = kzalloc(len, GFP_KERNEL);
1231 if (p == NULL) { 1234 if (p == NULL) {
@@ -1241,18 +1244,6 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1241 buf->rb_recv_bufs = (struct rpcrdma_rep **) p; 1244 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1242 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; 1245 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1243 1246
1244 /*
1245 * Register the zeroed pad buffer, if any.
1246 */
1247 if (cdata->padding) {
1248 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1249 rc = rpcrdma_register_internal(ia, p, cdata->padding,
1250 &ep->rep_pad_mr, &ep->rep_pad);
1251 if (rc)
1252 goto out;
1253 }
1254 p += cdata->padding;
1255
1256 INIT_LIST_HEAD(&buf->rb_mws); 1247 INIT_LIST_HEAD(&buf->rb_mws);
1257 INIT_LIST_HEAD(&buf->rb_all); 1248 INIT_LIST_HEAD(&buf->rb_all);
1258 switch (ia->ri_memreg_strategy) { 1249 switch (ia->ri_memreg_strategy) {
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 2b69316dfd11..5630353ed240 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -88,8 +88,7 @@ struct rpcrdma_ep {
88 int rep_connected; 88 int rep_connected;
89 struct ib_qp_init_attr rep_attr; 89 struct ib_qp_init_attr rep_attr;
90 wait_queue_head_t rep_connect_wait; 90 wait_queue_head_t rep_connect_wait;
91 struct ib_sge rep_pad; /* holds zeroed pad */ 91 struct rpcrdma_regbuf *rep_padbuf;
92 struct ib_mr *rep_pad_mr; /* holds zeroed pad */
93 struct rdma_conn_param rep_remote_cma; 92 struct rdma_conn_param rep_remote_cma;
94 struct sockaddr_storage rep_remote_addr; 93 struct sockaddr_storage rep_remote_addr;
95 struct delayed_work rep_connect_worker; 94 struct delayed_work rep_connect_worker;