diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2016-11-29 10:52:57 -0500 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2016-11-29 16:45:44 -0500 |
commit | a100fda1a2e1fa6c52373b9c7985a0bd3459bf4c (patch) | |
tree | e4c49a3f6a390938405da40e9906271d78930246 | |
parent | 48016dce46ad504a378849490bfb99c98be5cfaa (diff) |
xprtrdma: Refactor FRMR invalidation
Clean up: After some recent updates, clarifications can be made to
the FRMR invalidation logic.
- Both the remote and local invalidation case mark the frmr INVALID,
so make that a common path.
- Manage the WR list more "tastefully" by replacing the conditional
that discriminates between the list head and ->next pointers.
- Use mw->mw_handle in all cases, since that has the same value as
f->fr_mr->rkey, and is already in cache.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 57 |
1 files changed, 21 insertions, 36 deletions
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index e99bf6180136..900dc4024d2c 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -457,26 +457,6 @@ out_senderr: | |||
457 | return -ENOTCONN; | 457 | return -ENOTCONN; |
458 | } | 458 | } |
459 | 459 | ||
460 | static struct ib_send_wr * | ||
461 | __frwr_prepare_linv_wr(struct rpcrdma_mw *mw) | ||
462 | { | ||
463 | struct rpcrdma_frmr *f = &mw->frmr; | ||
464 | struct ib_send_wr *invalidate_wr; | ||
465 | |||
466 | dprintk("RPC: %s: invalidating frmr %p\n", __func__, f); | ||
467 | |||
468 | f->fr_state = FRMR_IS_INVALID; | ||
469 | invalidate_wr = &f->fr_invwr; | ||
470 | |||
471 | memset(invalidate_wr, 0, sizeof(*invalidate_wr)); | ||
472 | f->fr_cqe.done = frwr_wc_localinv; | ||
473 | invalidate_wr->wr_cqe = &f->fr_cqe; | ||
474 | invalidate_wr->opcode = IB_WR_LOCAL_INV; | ||
475 | invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey; | ||
476 | |||
477 | return invalidate_wr; | ||
478 | } | ||
479 | |||
480 | /* Invalidate all memory regions that were registered for "req". | 460 | /* Invalidate all memory regions that were registered for "req". |
481 | * | 461 | * |
482 | * Sleeps until it is safe for the host CPU to access the | 462 | * Sleeps until it is safe for the host CPU to access the |
@@ -487,7 +467,7 @@ __frwr_prepare_linv_wr(struct rpcrdma_mw *mw) | |||
487 | static void | 467 | static void |
488 | frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | 468 | frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
489 | { | 469 | { |
490 | struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr; | 470 | struct ib_send_wr *first, **prev, *last, *bad_wr; |
491 | struct rpcrdma_rep *rep = req->rl_reply; | 471 | struct rpcrdma_rep *rep = req->rl_reply; |
492 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | 472 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
493 | struct rpcrdma_mw *mw, *tmp; | 473 | struct rpcrdma_mw *mw, *tmp; |
@@ -503,23 +483,28 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |||
503 | */ | 483 | */ |
504 | f = NULL; | 484 | f = NULL; |
505 | count = 0; | 485 | count = 0; |
506 | invalidate_wrs = pos = prev = NULL; | 486 | prev = &first; |
507 | list_for_each_entry(mw, &req->rl_registered, mw_list) { | 487 | list_for_each_entry(mw, &req->rl_registered, mw_list) { |
488 | mw->frmr.fr_state = FRMR_IS_INVALID; | ||
489 | |||
508 | if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) && | 490 | if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) && |
509 | (mw->mw_handle == rep->rr_inv_rkey)) { | 491 | (mw->mw_handle == rep->rr_inv_rkey)) |
510 | mw->frmr.fr_state = FRMR_IS_INVALID; | ||
511 | continue; | 492 | continue; |
512 | } | ||
513 | 493 | ||
514 | pos = __frwr_prepare_linv_wr(mw); | 494 | f = &mw->frmr; |
495 | dprintk("RPC: %s: invalidating frmr %p\n", | ||
496 | __func__, f); | ||
497 | |||
498 | f->fr_cqe.done = frwr_wc_localinv; | ||
499 | last = &f->fr_invwr; | ||
500 | memset(last, 0, sizeof(*last)); | ||
501 | last->wr_cqe = &f->fr_cqe; | ||
502 | last->opcode = IB_WR_LOCAL_INV; | ||
503 | last->ex.invalidate_rkey = mw->mw_handle; | ||
515 | count++; | 504 | count++; |
516 | 505 | ||
517 | if (!invalidate_wrs) | 506 | *prev = last; |
518 | invalidate_wrs = pos; | 507 | prev = &last->next; |
519 | else | ||
520 | prev->next = pos; | ||
521 | prev = pos; | ||
522 | f = &mw->frmr; | ||
523 | } | 508 | } |
524 | if (!f) | 509 | if (!f) |
525 | goto unmap; | 510 | goto unmap; |
@@ -528,7 +513,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |||
528 | * last WR in the chain completes, all WRs in the chain | 513 | * last WR in the chain completes, all WRs in the chain |
529 | * are complete. | 514 | * are complete. |
530 | */ | 515 | */ |
531 | f->fr_invwr.send_flags = IB_SEND_SIGNALED; | 516 | last->send_flags = IB_SEND_SIGNALED; |
532 | f->fr_cqe.done = frwr_wc_localinv_wake; | 517 | f->fr_cqe.done = frwr_wc_localinv_wake; |
533 | reinit_completion(&f->fr_linv_done); | 518 | reinit_completion(&f->fr_linv_done); |
534 | 519 | ||
@@ -543,7 +528,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |||
543 | * unless ri_id->qp is a valid pointer. | 528 | * unless ri_id->qp is a valid pointer. |
544 | */ | 529 | */ |
545 | r_xprt->rx_stats.local_inv_needed++; | 530 | r_xprt->rx_stats.local_inv_needed++; |
546 | rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr); | 531 | rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); |
547 | if (rc) | 532 | if (rc) |
548 | goto reset_mrs; | 533 | goto reset_mrs; |
549 | 534 | ||
@@ -554,7 +539,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |||
554 | */ | 539 | */ |
555 | unmap: | 540 | unmap: |
556 | list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { | 541 | list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { |
557 | dprintk("RPC: %s: unmapping frmr %p\n", | 542 | dprintk("RPC: %s: DMA unmapping frmr %p\n", |
558 | __func__, &mw->frmr); | 543 | __func__, &mw->frmr); |
559 | list_del_init(&mw->mw_list); | 544 | list_del_init(&mw->mw_list); |
560 | ib_dma_unmap_sg(ia->ri_device, | 545 | ib_dma_unmap_sg(ia->ri_device, |
@@ -572,7 +557,7 @@ reset_mrs: | |||
572 | */ | 557 | */ |
573 | list_for_each_entry(mw, &req->rl_registered, mw_list) { | 558 | list_for_each_entry(mw, &req->rl_registered, mw_list) { |
574 | f = &mw->frmr; | 559 | f = &mw->frmr; |
575 | if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) { | 560 | if (mw->mw_handle == bad_wr->ex.invalidate_rkey) { |
576 | __frwr_reset_mr(ia, mw); | 561 | __frwr_reset_mr(ia, mw); |
577 | bad_wr = bad_wr->next; | 562 | bad_wr = bad_wr->next; |
578 | } | 563 | } |