aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2014-07-29 17:24:54 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2014-07-31 16:22:55 -0400
commitddb6bebcc64678fcf73eb9e21f80c6dacfa093a7 (patch)
tree29a5e7d2d7b1883a5e5cc78d372b6009fb410196 /net
parent9f9d802a28a107937ecda4ff78de2ab5cedd439d (diff)
xprtrdma: Reset FRMRs after a flushed LOCAL_INV Work Request
When a LOCAL_INV Work Request is flushed, it leaves an FRMR in the VALID state. This FRMR can be returned by rpcrdma_buffer_get(), and must be knocked down in rpcrdma_register_frmr_external() before it can be re-used. Instead, capture these in rpcrdma_buffer_get(), and reset them. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Tested-by: Shirley Ma <shirley.ma@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/verbs.c94
1 files changed, 92 insertions, 2 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3a6376a77fcc..ca55acf42365 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1358,8 +1358,91 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1358 } 1358 }
1359} 1359}
1360 1360
1361/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external().
1362 * Redo only the ib_post_send().
1363 */
1364static void
1365rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia)
1366{
1367 struct rpcrdma_xprt *r_xprt =
1368 container_of(ia, struct rpcrdma_xprt, rx_ia);
1369 struct ib_send_wr invalidate_wr, *bad_wr;
1370 int rc;
1371
1372 dprintk("RPC: %s: FRMR %p is stale\n", __func__, r);
1373
1374 /* When this FRMR is re-inserted into rb_mws, it is no longer stale */
1375 r->r.frmr.fr_state = FRMR_IS_VALID;
1376
1377 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
1378 invalidate_wr.wr_id = (unsigned long)(void *)r;
1379 invalidate_wr.opcode = IB_WR_LOCAL_INV;
1380 invalidate_wr.send_flags = IB_SEND_SIGNALED;
1381 invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey;
1382 DECR_CQCOUNT(&r_xprt->rx_ep);
1383
1384 dprintk("RPC: %s: frmr %p invalidating rkey %08x\n",
1385 __func__, r, r->r.frmr.fr_mr->rkey);
1386
1387 read_lock(&ia->ri_qplock);
1388 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1389 read_unlock(&ia->ri_qplock);
1390 if (rc) {
1391 /* Force rpcrdma_buffer_get() to retry */
1392 r->r.frmr.fr_state = FRMR_IS_STALE;
1393 dprintk("RPC: %s: ib_post_send failed, %i\n",
1394 __func__, rc);
1395 }
1396}
1397
1398static void
1399rpcrdma_retry_flushed_linv(struct list_head *stale,
1400 struct rpcrdma_buffer *buf)
1401{
1402 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1403 struct list_head *pos;
1404 struct rpcrdma_mw *r;
1405 unsigned long flags;
1406
1407 list_for_each(pos, stale) {
1408 r = list_entry(pos, struct rpcrdma_mw, mw_list);
1409 rpcrdma_retry_local_inv(r, ia);
1410 }
1411
1412 spin_lock_irqsave(&buf->rb_lock, flags);
1413 list_splice_tail(stale, &buf->rb_mws);
1414 spin_unlock_irqrestore(&buf->rb_lock, flags);
1415}
1416
1361static struct rpcrdma_req * 1417static struct rpcrdma_req *
1362rpcrdma_buffer_get_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) 1418rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
1419 struct list_head *stale)
1420{
1421 struct rpcrdma_mw *r;
1422 int i;
1423
1424 i = RPCRDMA_MAX_SEGS - 1;
1425 while (!list_empty(&buf->rb_mws)) {
1426 r = list_entry(buf->rb_mws.next,
1427 struct rpcrdma_mw, mw_list);
1428 list_del(&r->mw_list);
1429 if (r->r.frmr.fr_state == FRMR_IS_STALE) {
1430 list_add(&r->mw_list, stale);
1431 continue;
1432 }
1433 req->rl_segments[i].mr_chunk.rl_mw = r;
1434 if (unlikely(i-- == 0))
1435 return req; /* Success */
1436 }
1437
1438 /* Not enough entries on rb_mws for this req */
1439 rpcrdma_buffer_put_sendbuf(req, buf);
1440 rpcrdma_buffer_put_mrs(req, buf);
1441 return NULL;
1442}
1443
1444static struct rpcrdma_req *
1445rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1363{ 1446{
1364 struct rpcrdma_mw *r; 1447 struct rpcrdma_mw *r;
1365 int i; 1448 int i;
@@ -1393,6 +1476,7 @@ struct rpcrdma_req *
1393rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) 1476rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1394{ 1477{
1395 struct rpcrdma_ia *ia = rdmab_to_ia(buffers); 1478 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
1479 struct list_head stale;
1396 struct rpcrdma_req *req; 1480 struct rpcrdma_req *req;
1397 unsigned long flags; 1481 unsigned long flags;
1398 1482
@@ -1414,15 +1498,21 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1414 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL; 1498 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1415 } 1499 }
1416 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; 1500 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1501
1502 INIT_LIST_HEAD(&stale);
1417 switch (ia->ri_memreg_strategy) { 1503 switch (ia->ri_memreg_strategy) {
1418 case RPCRDMA_FRMR: 1504 case RPCRDMA_FRMR:
1505 req = rpcrdma_buffer_get_frmrs(req, buffers, &stale);
1506 break;
1419 case RPCRDMA_MTHCAFMR: 1507 case RPCRDMA_MTHCAFMR:
1420 req = rpcrdma_buffer_get_mrs(req, buffers); 1508 req = rpcrdma_buffer_get_fmrs(req, buffers);
1421 break; 1509 break;
1422 default: 1510 default:
1423 break; 1511 break;
1424 } 1512 }
1425 spin_unlock_irqrestore(&buffers->rb_lock, flags); 1513 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1514 if (!list_empty(&stale))
1515 rpcrdma_retry_flushed_linv(&stale, buffers);
1426 return req; 1516 return req;
1427} 1517}
1428 1518