aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2014-07-29 17:24:36 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2014-07-31 16:22:55 -0400
commitc2922c0235aac1c787fa81e24d7d7e93c2202275 (patch)
treec7e0e93d36583f3cdfec4b0b7c543f18ada59891 /net/sunrpc
parent3111d72c7ced444b1034f6e365e0e02444c68aa8 (diff)
xprtrdma: Properly handle exhaustion of the rb_mws list
If the rb_mws list is exhausted, clean up and return NULL so that call_allocate() will delay and try again. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Tested-by: Shirley Ma <shirley.ma@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/verbs.c103
1 files changed, 71 insertions, 32 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 0ad7d10f13a7..017f0abb2a86 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1256,6 +1256,67 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1256 kfree(buf->rb_pool); 1256 kfree(buf->rb_pool);
1257} 1257}
1258 1258
1259/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
1260 * some req segments uninitialized.
1261 */
1262static void
1263rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
1264{
1265 if (*mw) {
1266 list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
1267 *mw = NULL;
1268 }
1269}
1270
1271/* Cycle mw's back in reverse order, and "spin" them.
1272 * This delays and scrambles reuse as much as possible.
1273 */
1274static void
1275rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1276{
1277 struct rpcrdma_mr_seg *seg = req->rl_segments;
1278 struct rpcrdma_mr_seg *seg1 = seg;
1279 int i;
1280
1281 for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
1282 rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf);
1283 rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf);
1284}
1285
1286static void
1287rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1288{
1289 buf->rb_send_bufs[--buf->rb_send_index] = req;
1290 req->rl_niovs = 0;
1291 if (req->rl_reply) {
1292 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1293 req->rl_reply->rr_func = NULL;
1294 req->rl_reply = NULL;
1295 }
1296}
1297
1298static struct rpcrdma_req *
1299rpcrdma_buffer_get_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1300{
1301 struct rpcrdma_mw *r;
1302 int i;
1303
1304 i = RPCRDMA_MAX_SEGS - 1;
1305 while (!list_empty(&buf->rb_mws)) {
1306 r = list_entry(buf->rb_mws.next,
1307 struct rpcrdma_mw, mw_list);
1308 list_del(&r->mw_list);
1309 req->rl_segments[i].mr_chunk.rl_mw = r;
1310 if (unlikely(i-- == 0))
1311 return req; /* Success */
1312 }
1313
1314 /* Not enough entries on rb_mws for this req */
1315 rpcrdma_buffer_put_sendbuf(req, buf);
1316 rpcrdma_buffer_put_mrs(req, buf);
1317 return NULL;
1318}
1319
1259/* 1320/*
1260 * Get a set of request/reply buffers. 1321 * Get a set of request/reply buffers.
1261 * 1322 *
@@ -1268,10 +1329,9 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1268struct rpcrdma_req * 1329struct rpcrdma_req *
1269rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) 1330rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1270{ 1331{
1332 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
1271 struct rpcrdma_req *req; 1333 struct rpcrdma_req *req;
1272 unsigned long flags; 1334 unsigned long flags;
1273 int i;
1274 struct rpcrdma_mw *r;
1275 1335
1276 spin_lock_irqsave(&buffers->rb_lock, flags); 1336 spin_lock_irqsave(&buffers->rb_lock, flags);
1277 if (buffers->rb_send_index == buffers->rb_max_requests) { 1337 if (buffers->rb_send_index == buffers->rb_max_requests) {
@@ -1291,14 +1351,13 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1291 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL; 1351 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1292 } 1352 }
1293 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; 1353 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1294 if (!list_empty(&buffers->rb_mws)) { 1354 switch (ia->ri_memreg_strategy) {
1295 i = RPCRDMA_MAX_SEGS - 1; 1355 case RPCRDMA_FRMR:
1296 do { 1356 case RPCRDMA_MTHCAFMR:
1297 r = list_entry(buffers->rb_mws.next, 1357 req = rpcrdma_buffer_get_mrs(req, buffers);
1298 struct rpcrdma_mw, mw_list); 1358 break;
1299 list_del(&r->mw_list); 1359 default:
1300 req->rl_segments[i].mr_chunk.rl_mw = r; 1360 break;
1301 } while (--i >= 0);
1302 } 1361 }
1303 spin_unlock_irqrestore(&buffers->rb_lock, flags); 1362 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1304 return req; 1363 return req;
@@ -1313,34 +1372,14 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
1313{ 1372{
1314 struct rpcrdma_buffer *buffers = req->rl_buffer; 1373 struct rpcrdma_buffer *buffers = req->rl_buffer;
1315 struct rpcrdma_ia *ia = rdmab_to_ia(buffers); 1374 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
1316 int i;
1317 unsigned long flags; 1375 unsigned long flags;
1318 1376
1319 spin_lock_irqsave(&buffers->rb_lock, flags); 1377 spin_lock_irqsave(&buffers->rb_lock, flags);
1320 buffers->rb_send_bufs[--buffers->rb_send_index] = req; 1378 rpcrdma_buffer_put_sendbuf(req, buffers);
1321 req->rl_niovs = 0;
1322 if (req->rl_reply) {
1323 buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply;
1324 req->rl_reply->rr_func = NULL;
1325 req->rl_reply = NULL;
1326 }
1327 switch (ia->ri_memreg_strategy) { 1379 switch (ia->ri_memreg_strategy) {
1328 case RPCRDMA_FRMR: 1380 case RPCRDMA_FRMR:
1329 case RPCRDMA_MTHCAFMR: 1381 case RPCRDMA_MTHCAFMR:
1330 /* 1382 rpcrdma_buffer_put_mrs(req, buffers);
1331 * Cycle mw's back in reverse order, and "spin" them.
1332 * This delays and scrambles reuse as much as possible.
1333 */
1334 i = 1;
1335 do {
1336 struct rpcrdma_mw **mw;
1337 mw = &req->rl_segments[i].mr_chunk.rl_mw;
1338 list_add_tail(&(*mw)->mw_list, &buffers->rb_mws);
1339 *mw = NULL;
1340 } while (++i < RPCRDMA_MAX_SEGS);
1341 list_add_tail(&req->rl_segments[0].mr_chunk.rl_mw->mw_list,
1342 &buffers->rb_mws);
1343 req->rl_segments[0].mr_chunk.rl_mw = NULL;
1344 break; 1383 break;
1345 default: 1384 default:
1346 break; 1385 break;