summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-12-16 17:22:55 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-12-18 15:34:33 -0500
commit7c7a5390dc6c8d89fc368424b69a4eef8e43f411 (patch)
tree4e45049e1737d1fc5fec1b9aca813eb92fcc0ca9 /net/sunrpc
parentc9918ff56dfb175ce427140c641280d0b4522dbe (diff)
xprtrdma: Add ro_unmap_sync method for FMR
FMR's ro_unmap method is already synchronous because ib_unmap_fmr() is a synchronous verb. However, some improvements can be made here. 1. Gather all the MRs for the RPC request onto a list, and invoke ib_unmap_fmr() once with that list. This reduces the number of doorbells when there is more than one MR to invalidate 2. Perform the DMA unmap _after_ the MRs are unmapped, not before. This is critical after invalidating a Write chunk. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@avagotech.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c64
1 files changed, 64 insertions, 0 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index f1e8dafbd507..c14f3a4bff68 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -179,6 +179,69 @@ out_maperr:
179 return rc; 179 return rc;
180} 180}
181 181
182static void
183__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
184{
185 struct ib_device *device = r_xprt->rx_ia.ri_device;
186 struct rpcrdma_mw *mw = seg->rl_mw;
187 int nsegs = seg->mr_nsegs;
188
189 seg->rl_mw = NULL;
190
191 while (nsegs--)
192 rpcrdma_unmap_one(device, seg++);
193
194 rpcrdma_put_mw(r_xprt, mw);
195}
196
197/* Invalidate all memory regions that were registered for "req".
198 *
199 * Sleeps until it is safe for the host CPU to access the
200 * previously mapped memory regions.
201 */
202static void
203fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
204{
205 struct rpcrdma_mr_seg *seg;
206 unsigned int i, nchunks;
207 struct rpcrdma_mw *mw;
208 LIST_HEAD(unmap_list);
209 int rc;
210
211 dprintk("RPC: %s: req %p\n", __func__, req);
212
213 /* ORDER: Invalidate all of the req's MRs first
214 *
215 * ib_unmap_fmr() is slow, so use a single call instead
216 * of one call per mapped MR.
217 */
218 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
219 seg = &req->rl_segments[i];
220 mw = seg->rl_mw;
221
222 list_add(&mw->r.fmr.fmr->list, &unmap_list);
223
224 i += seg->mr_nsegs;
225 }
226 rc = ib_unmap_fmr(&unmap_list);
227 if (rc)
228 pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc);
229
230 /* ORDER: Now DMA unmap all of the req's MRs, and return
231 * them to the free MW list.
232 */
233 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
234 seg = &req->rl_segments[i];
235
236 __fmr_dma_unmap(r_xprt, seg);
237
238 i += seg->mr_nsegs;
239 seg->mr_nsegs = 0;
240 }
241
242 req->rl_nchunks = 0;
243}
244
182/* Use the ib_unmap_fmr() verb to prevent further remote 245/* Use the ib_unmap_fmr() verb to prevent further remote
183 * access via RDMA READ or RDMA WRITE. 246 * access via RDMA READ or RDMA WRITE.
184 */ 247 */
@@ -231,6 +294,7 @@ fmr_op_destroy(struct rpcrdma_buffer *buf)
231 294
232const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { 295const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
233 .ro_map = fmr_op_map, 296 .ro_map = fmr_op_map,
297 .ro_unmap_sync = fmr_op_unmap_sync,
234 .ro_unmap = fmr_op_unmap, 298 .ro_unmap = fmr_op_unmap,
235 .ro_open = fmr_op_open, 299 .ro_open = fmr_op_open,
236 .ro_maxpages = fmr_op_maxpages, 300 .ro_maxpages = fmr_op_maxpages,