aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-04-09 13:05:44 -0400
committerJ. Bruce Fields <bfields@redhat.com>2017-04-25 17:25:54 -0400
commit6e6092ca305ad785c605d7e313727aad96c228a5 (patch)
tree51741ddf7ed22ce3a922c356dc54041b62026994
parent17f5f7f506aaca985b95df7ef7fc2ff49c36a8e9 (diff)
svcrdma: Add svc_rdma_map_reply_hdr()
Introduce a helper to DMA-map a reply's transport header before sending it. This will in part replace the map vector cache. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c36
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c61
3 files changed, 62 insertions, 38 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 287db5c179d8..002a46d1faa1 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -228,6 +228,9 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
228/* svc_rdma_sendto.c */ 228/* svc_rdma_sendto.c */
229extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, 229extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
230 struct svc_rdma_req_map *, bool); 230 struct svc_rdma_req_map *, bool);
231extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
232 struct svc_rdma_op_ctxt *ctxt,
233 __be32 *rdma_resp, unsigned int len);
231extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, 234extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
232 struct svc_rdma_op_ctxt *ctxt, 235 struct svc_rdma_op_ctxt *ctxt,
233 int num_sge, u32 inv_rkey); 236 int num_sge, u32 inv_rkey);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index f12f39c189c3..0305b33d482f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -101,50 +101,36 @@ out_notfound:
101static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, 101static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
102 struct rpc_rqst *rqst) 102 struct rpc_rqst *rqst)
103{ 103{
104 struct xdr_buf *sndbuf = &rqst->rq_snd_buf;
105 struct svc_rdma_op_ctxt *ctxt; 104 struct svc_rdma_op_ctxt *ctxt;
106 struct svc_rdma_req_map *vec;
107 int ret; 105 int ret;
108 106
109 vec = svc_rdma_get_req_map(rdma); 107 ctxt = svc_rdma_get_context(rdma);
110 ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false); 108
111 if (ret) 109 /* rpcrdma_bc_send_request builds the transport header and
110 * the backchannel RPC message in the same buffer. Thus only
111 * one SGE is needed to send both.
112 */
113 ret = svc_rdma_map_reply_hdr(rdma, ctxt, rqst->rq_buffer,
114 rqst->rq_snd_buf.len);
115 if (ret < 0)
112 goto out_err; 116 goto out_err;
113 117
114 ret = svc_rdma_repost_recv(rdma, GFP_NOIO); 118 ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
115 if (ret) 119 if (ret)
116 goto out_err; 120 goto out_err;
117 121
118 ctxt = svc_rdma_get_context(rdma);
119 ctxt->pages[0] = virt_to_page(rqst->rq_buffer);
120 ctxt->count = 1;
121
122 ctxt->direction = DMA_TO_DEVICE;
123 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
124 ctxt->sge[0].length = sndbuf->len;
125 ctxt->sge[0].addr =
126 ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,
127 sndbuf->len, DMA_TO_DEVICE);
128 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) {
129 ret = -EIO;
130 goto out_unmap;
131 }
132 svc_rdma_count_mappings(rdma, ctxt);
133
134 ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); 122 ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
135 if (ret) { 123 if (ret)
136 ret = -EIO;
137 goto out_unmap; 124 goto out_unmap;
138 }
139 125
140out_err: 126out_err:
141 svc_rdma_put_req_map(rdma, vec);
142 dprintk("svcrdma: %s returns %d\n", __func__, ret); 127 dprintk("svcrdma: %s returns %d\n", __func__, ret);
143 return ret; 128 return ret;
144 129
145out_unmap: 130out_unmap:
146 svc_rdma_unmap_dma(ctxt); 131 svc_rdma_unmap_dma(ctxt);
147 svc_rdma_put_context(ctxt, 1); 132 svc_rdma_put_context(ctxt, 1);
133 ret = -EIO;
148 goto out_err; 134 goto out_err;
149} 135}
150 136
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index f90b40d0932f..a7dc71daa776 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -217,6 +217,49 @@ static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
217 return 0; 217 return 0;
218} 218}
219 219
220static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
221 struct svc_rdma_op_ctxt *ctxt,
222 unsigned int sge_no,
223 struct page *page,
224 unsigned int offset,
225 unsigned int len)
226{
227 struct ib_device *dev = rdma->sc_cm_id->device;
228 dma_addr_t dma_addr;
229
230 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
231 if (ib_dma_mapping_error(dev, dma_addr))
232 return -EIO;
233
234 ctxt->sge[sge_no].addr = dma_addr;
235 ctxt->sge[sge_no].length = len;
236 ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
237 svc_rdma_count_mappings(rdma, ctxt);
238 return 0;
239}
240
241/**
242 * svc_rdma_map_reply_hdr - DMA map the transport header buffer
243 * @rdma: controlling transport
244 * @ctxt: op_ctxt for the Send WR
245 * @rdma_resp: buffer containing transport header
246 * @len: length of transport header
247 *
248 * Returns:
249 * %0 if the header is DMA mapped,
250 * %-EIO if DMA mapping failed.
251 */
252int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
253 struct svc_rdma_op_ctxt *ctxt,
254 __be32 *rdma_resp,
255 unsigned int len)
256{
257 ctxt->direction = DMA_TO_DEVICE;
258 ctxt->pages[0] = virt_to_page(rdma_resp);
259 ctxt->count = 1;
260 return svc_rdma_dma_map_page(rdma, ctxt, 0, ctxt->pages[0], 0, len);
261}
262
220/* Assumptions: 263/* Assumptions:
221 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE 264 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
222 */ 265 */
@@ -699,22 +742,14 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
699 err = ERR_VERS; 742 err = ERR_VERS;
700 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 743 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
701 744
745 /* Map transport header; no RPC message payload */
702 ctxt = svc_rdma_get_context(xprt); 746 ctxt = svc_rdma_get_context(xprt);
703 ctxt->direction = DMA_TO_DEVICE; 747 ret = svc_rdma_map_reply_hdr(xprt, ctxt, &rmsgp->rm_xid, length);
704 ctxt->count = 1; 748 if (ret) {
705 ctxt->pages[0] = p; 749 dprintk("svcrdma: Error %d mapping send for protocol error\n",
706 750 ret);
707 /* Prepare SGE for local address */
708 ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
709 ctxt->sge[0].length = length;
710 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
711 p, 0, length, DMA_TO_DEVICE);
712 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
713 dprintk("svcrdma: Error mapping buffer for protocol error\n");
714 svc_rdma_put_context(ctxt, 1);
715 return; 751 return;
716 } 752 }
717 svc_rdma_count_mappings(xprt, ctxt);
718 753
719 ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0); 754 ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0);
720 if (ret) { 755 if (ret) {