aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2016-01-07 14:49:20 -0500
committerDoug Ledford <dledford@redhat.com>2016-01-19 15:30:48 -0500
commit2fe81b239dbb00d0a2fd8858ac9dd4ef4a8841ee (patch)
tree82da79b378d1347b6810a7ff2ac1a0040b0c2ef4 /net
parentcc886c9ff1607eda04062bdcec963e2f8e6a3eb1 (diff)
svcrdma: Improve allocation of struct svc_rdma_req_map
To ensure this allocation cannot fail and will not sleep, pre-allocate the req_map structures per-connection. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Acked-by: Bruce Fields <bfields@fieldses.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c85
2 files changed, 78 insertions, 13 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 969a1ab75fc3..9a097f95e10b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -591,7 +591,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
591 /* Build an req vec for the XDR */ 591 /* Build an req vec for the XDR */
592 ctxt = svc_rdma_get_context(rdma); 592 ctxt = svc_rdma_get_context(rdma);
593 ctxt->direction = DMA_TO_DEVICE; 593 ctxt->direction = DMA_TO_DEVICE;
594 vec = svc_rdma_get_req_map(); 594 vec = svc_rdma_get_req_map(rdma);
595 ret = map_xdr(rdma, &rqstp->rq_res, vec); 595 ret = map_xdr(rdma, &rqstp->rq_res, vec);
596 if (ret) 596 if (ret)
597 goto err0; 597 goto err0;
@@ -630,14 +630,14 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
630 630
631 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, 631 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
632 inline_bytes); 632 inline_bytes);
633 svc_rdma_put_req_map(vec); 633 svc_rdma_put_req_map(rdma, vec);
634 dprintk("svcrdma: send_reply returns %d\n", ret); 634 dprintk("svcrdma: send_reply returns %d\n", ret);
635 return ret; 635 return ret;
636 636
637 err1: 637 err1:
638 put_page(res_page); 638 put_page(res_page);
639 err0: 639 err0:
640 svc_rdma_put_req_map(vec); 640 svc_rdma_put_req_map(rdma, vec);
641 svc_rdma_put_context(ctxt, 0); 641 svc_rdma_put_context(ctxt, 0);
642 return ret; 642 return ret;
643} 643}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9801115f6e59..0b9e17ea777e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -273,23 +273,83 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
273 } 273 }
274} 274}
275 275
276/* 276static struct svc_rdma_req_map *alloc_req_map(gfp_t flags)
277 * Temporary NFS req mappings are shared across all transport
278 * instances. These are short lived and should be bounded by the number
279 * of concurrent server threads * depth of the SQ.
280 */
281struct svc_rdma_req_map *svc_rdma_get_req_map(void)
282{ 277{
283 struct svc_rdma_req_map *map; 278 struct svc_rdma_req_map *map;
284 map = kmem_cache_alloc(svc_rdma_map_cachep, 279
285 GFP_KERNEL | __GFP_NOFAIL); 280 map = kmalloc(sizeof(*map), flags);
281 if (map)
282 INIT_LIST_HEAD(&map->free);
283 return map;
284}
285
286static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt)
287{
288 int i;
289
290 /* One for each receive buffer on this connection. */
291 i = xprt->sc_max_requests;
292
293 while (i--) {
294 struct svc_rdma_req_map *map;
295
296 map = alloc_req_map(GFP_KERNEL);
297 if (!map) {
298 dprintk("svcrdma: No memory for request map\n");
299 return false;
300 }
301 list_add(&map->free, &xprt->sc_maps);
302 }
303 return true;
304}
305
306struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *xprt)
307{
308 struct svc_rdma_req_map *map = NULL;
309
310 spin_lock(&xprt->sc_map_lock);
311 if (list_empty(&xprt->sc_maps))
312 goto out_empty;
313
314 map = list_first_entry(&xprt->sc_maps,
315 struct svc_rdma_req_map, free);
316 list_del_init(&map->free);
317 spin_unlock(&xprt->sc_map_lock);
318
319out:
286 map->count = 0; 320 map->count = 0;
287 return map; 321 return map;
322
323out_empty:
324 spin_unlock(&xprt->sc_map_lock);
325
326 /* Pre-allocation amount was incorrect */
327 map = alloc_req_map(GFP_NOIO);
328 if (map)
329 goto out;
330
331 WARN_ONCE(1, "svcrdma: empty request map list?\n");
332 return NULL;
288} 333}
289 334
290void svc_rdma_put_req_map(struct svc_rdma_req_map *map) 335void svc_rdma_put_req_map(struct svcxprt_rdma *xprt,
336 struct svc_rdma_req_map *map)
291{ 337{
292 kmem_cache_free(svc_rdma_map_cachep, map); 338 spin_lock(&xprt->sc_map_lock);
339 list_add(&map->free, &xprt->sc_maps);
340 spin_unlock(&xprt->sc_map_lock);
341}
342
343static void svc_rdma_destroy_maps(struct svcxprt_rdma *xprt)
344{
345 while (!list_empty(&xprt->sc_maps)) {
346 struct svc_rdma_req_map *map;
347
348 map = list_first_entry(&xprt->sc_maps,
349 struct svc_rdma_req_map, free);
350 list_del(&map->free);
351 kfree(map);
352 }
293} 353}
294 354
295/* ib_cq event handler */ 355/* ib_cq event handler */
@@ -593,12 +653,14 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
593 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); 653 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
594 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); 654 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
595 INIT_LIST_HEAD(&cma_xprt->sc_ctxts); 655 INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
656 INIT_LIST_HEAD(&cma_xprt->sc_maps);
596 init_waitqueue_head(&cma_xprt->sc_send_wait); 657 init_waitqueue_head(&cma_xprt->sc_send_wait);
597 658
598 spin_lock_init(&cma_xprt->sc_lock); 659 spin_lock_init(&cma_xprt->sc_lock);
599 spin_lock_init(&cma_xprt->sc_rq_dto_lock); 660 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
600 spin_lock_init(&cma_xprt->sc_frmr_q_lock); 661 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
601 spin_lock_init(&cma_xprt->sc_ctxt_lock); 662 spin_lock_init(&cma_xprt->sc_ctxt_lock);
663 spin_lock_init(&cma_xprt->sc_map_lock);
602 664
603 if (listener) 665 if (listener)
604 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); 666 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
@@ -988,6 +1050,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
988 1050
989 if (!svc_rdma_prealloc_ctxts(newxprt)) 1051 if (!svc_rdma_prealloc_ctxts(newxprt))
990 goto errout; 1052 goto errout;
1053 if (!svc_rdma_prealloc_maps(newxprt))
1054 goto errout;
991 1055
992 /* 1056 /*
993 * Limit ORD based on client limit, local device limit, and 1057 * Limit ORD based on client limit, local device limit, and
@@ -1259,6 +1323,7 @@ static void __svc_rdma_free(struct work_struct *work)
1259 1323
1260 rdma_dealloc_frmr_q(rdma); 1324 rdma_dealloc_frmr_q(rdma);
1261 svc_rdma_destroy_ctxts(rdma); 1325 svc_rdma_destroy_ctxts(rdma);
1326 svc_rdma_destroy_maps(rdma);
1262 1327
1263 /* Destroy the QP if present (not a listener) */ 1328 /* Destroy the QP if present (not a listener) */
1264 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) 1329 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))