diff options
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 34 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 9 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 165 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 2 |
4 files changed, 7 insertions, 203 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 02b29419e372..46b51727227b 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -199,7 +199,6 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
199 | return 0; | 199 | return 0; |
200 | 200 | ||
201 | do { | 201 | do { |
202 | /* bind/register the memory, then build chunk from result. */ | ||
203 | int n = rpcrdma_register_external(seg, nsegs, | 202 | int n = rpcrdma_register_external(seg, nsegs, |
204 | cur_wchunk != NULL, r_xprt); | 203 | cur_wchunk != NULL, r_xprt); |
205 | if (n <= 0) | 204 | if (n <= 0) |
@@ -698,16 +697,6 @@ rpcrdma_conn_func(struct rpcrdma_ep *ep) | |||
698 | } | 697 | } |
699 | 698 | ||
700 | /* | 699 | /* |
701 | * This function is called when memory window unbind which we are waiting | ||
702 | * for completes. Just use rr_func (zeroed by upcall) to signal completion. | ||
703 | */ | ||
704 | static void | ||
705 | rpcrdma_unbind_func(struct rpcrdma_rep *rep) | ||
706 | { | ||
707 | wake_up(&rep->rr_unbind); | ||
708 | } | ||
709 | |||
710 | /* | ||
711 | * Called as a tasklet to do req/reply match and complete a request | 700 | * Called as a tasklet to do req/reply match and complete a request |
712 | * Errors must result in the RPC task either being awakened, or | 701 | * Errors must result in the RPC task either being awakened, or |
713 | * allowed to timeout, to discover the errors at that time. | 702 | * allowed to timeout, to discover the errors at that time. |
@@ -721,7 +710,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
721 | struct rpc_xprt *xprt = rep->rr_xprt; | 710 | struct rpc_xprt *xprt = rep->rr_xprt; |
722 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 711 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
723 | __be32 *iptr; | 712 | __be32 *iptr; |
724 | int i, rdmalen, status; | 713 | int rdmalen, status; |
725 | 714 | ||
726 | /* Check status. If bad, signal disconnect and return rep to pool */ | 715 | /* Check status. If bad, signal disconnect and return rep to pool */ |
727 | if (rep->rr_len == ~0U) { | 716 | if (rep->rr_len == ~0U) { |
@@ -850,27 +839,6 @@ badheader: | |||
850 | break; | 839 | break; |
851 | } | 840 | } |
852 | 841 | ||
853 | /* If using mw bind, start the deregister process now. */ | ||
854 | /* (Note: if mr_free(), cannot perform it here, in tasklet context) */ | ||
855 | if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) { | ||
856 | case RPCRDMA_MEMWINDOWS: | ||
857 | for (i = 0; req->rl_nchunks-- > 1;) | ||
858 | i += rpcrdma_deregister_external( | ||
859 | &req->rl_segments[i], r_xprt, NULL); | ||
860 | /* Optionally wait (not here) for unbinds to complete */ | ||
861 | rep->rr_func = rpcrdma_unbind_func; | ||
862 | (void) rpcrdma_deregister_external(&req->rl_segments[i], | ||
863 | r_xprt, rep); | ||
864 | break; | ||
865 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
866 | for (i = 0; req->rl_nchunks--;) | ||
867 | i += rpcrdma_deregister_external(&req->rl_segments[i], | ||
868 | r_xprt, NULL); | ||
869 | break; | ||
870 | default: | ||
871 | break; | ||
872 | } | ||
873 | |||
874 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", | 842 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", |
875 | __func__, xprt, rqst, status); | 843 | __func__, xprt, rqst, status); |
876 | xprt_complete_rqst(rqst->rq_task, status); | 844 | xprt_complete_rqst(rqst->rq_task, status); |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8c5035a2792e..c23b0c13f77f 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -566,9 +566,7 @@ xprt_rdma_free(void *buffer) | |||
566 | __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : ""); | 566 | __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : ""); |
567 | 567 | ||
568 | /* | 568 | /* |
569 | * Finish the deregistration. When using mw bind, this was | 569 | * Finish the deregistration. The process is considered |
570 | * begun in rpcrdma_reply_handler(). In all other modes, we | ||
571 | * do it here, in thread context. The process is considered | ||
572 | * complete when the rr_func vector becomes NULL - this | 570 | * complete when the rr_func vector becomes NULL - this |
573 | * was put in place during rpcrdma_reply_handler() - the wait | 571 | * was put in place during rpcrdma_reply_handler() - the wait |
574 | * call below will not block if the dereg is "done". If | 572 | * call below will not block if the dereg is "done". If |
@@ -580,11 +578,6 @@ xprt_rdma_free(void *buffer) | |||
580 | &req->rl_segments[i], r_xprt, NULL); | 578 | &req->rl_segments[i], r_xprt, NULL); |
581 | } | 579 | } |
582 | 580 | ||
583 | if (rep && wait_event_interruptible(rep->rr_unbind, !rep->rr_func)) { | ||
584 | rep->rr_func = NULL; /* abandon the callback */ | ||
585 | req->rl_reply = NULL; | ||
586 | } | ||
587 | |||
588 | if (req->rl_iov.length == 0) { /* see allocate above */ | 581 | if (req->rl_iov.length == 0) { /* see allocate above */ |
589 | struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer; | 582 | struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer; |
590 | oreq->rl_reply = req->rl_reply; | 583 | oreq->rl_reply = req->rl_reply; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 4a4e4eafdb32..304c7adc83c4 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -152,7 +152,7 @@ void rpcrdma_event_process(struct ib_wc *wc) | |||
152 | dprintk("RPC: %s: event rep %p status %X opcode %X length %u\n", | 152 | dprintk("RPC: %s: event rep %p status %X opcode %X length %u\n", |
153 | __func__, rep, wc->status, wc->opcode, wc->byte_len); | 153 | __func__, rep, wc->status, wc->opcode, wc->byte_len); |
154 | 154 | ||
155 | if (!rep) /* send or bind completion that we don't care about */ | 155 | if (!rep) /* send completion that we don't care about */ |
156 | return; | 156 | return; |
157 | 157 | ||
158 | if (IB_WC_SUCCESS != wc->status) { | 158 | if (IB_WC_SUCCESS != wc->status) { |
@@ -197,8 +197,6 @@ void rpcrdma_event_process(struct ib_wc *wc) | |||
197 | } | 197 | } |
198 | atomic_set(&rep->rr_buffer->rb_credits, credits); | 198 | atomic_set(&rep->rr_buffer->rb_credits, credits); |
199 | } | 199 | } |
200 | /* fall through */ | ||
201 | case IB_WC_BIND_MW: | ||
202 | rpcrdma_schedule_tasklet(rep); | 200 | rpcrdma_schedule_tasklet(rep); |
203 | break; | 201 | break; |
204 | default: | 202 | default: |
@@ -233,7 +231,7 @@ rpcrdma_cq_poll(struct ib_cq *cq) | |||
233 | /* | 231 | /* |
234 | * rpcrdma_cq_event_upcall | 232 | * rpcrdma_cq_event_upcall |
235 | * | 233 | * |
236 | * This upcall handles recv, send, bind and unbind events. | 234 | * This upcall handles recv and send events. |
237 | * It is reentrant but processes single events in order to maintain | 235 | * It is reentrant but processes single events in order to maintain |
238 | * ordering of receives to keep server credits. | 236 | * ordering of receives to keep server credits. |
239 | * | 237 | * |
@@ -494,16 +492,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
494 | } | 492 | } |
495 | 493 | ||
496 | switch (memreg) { | 494 | switch (memreg) { |
497 | case RPCRDMA_MEMWINDOWS: | ||
498 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
499 | if (!(devattr.device_cap_flags & IB_DEVICE_MEM_WINDOW)) { | ||
500 | dprintk("RPC: %s: MEMWINDOWS registration " | ||
501 | "specified but not supported by adapter, " | ||
502 | "using slower RPCRDMA_REGISTER\n", | ||
503 | __func__); | ||
504 | memreg = RPCRDMA_REGISTER; | ||
505 | } | ||
506 | break; | ||
507 | case RPCRDMA_MTHCAFMR: | 495 | case RPCRDMA_MTHCAFMR: |
508 | if (!ia->ri_id->device->alloc_fmr) { | 496 | if (!ia->ri_id->device->alloc_fmr) { |
509 | #if RPCRDMA_PERSISTENT_REGISTRATION | 497 | #if RPCRDMA_PERSISTENT_REGISTRATION |
@@ -567,16 +555,13 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
567 | IB_ACCESS_REMOTE_READ; | 555 | IB_ACCESS_REMOTE_READ; |
568 | goto register_setup; | 556 | goto register_setup; |
569 | #endif | 557 | #endif |
570 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
571 | case RPCRDMA_MEMWINDOWS: | ||
572 | mem_priv = IB_ACCESS_LOCAL_WRITE | | ||
573 | IB_ACCESS_MW_BIND; | ||
574 | goto register_setup; | ||
575 | case RPCRDMA_MTHCAFMR: | 558 | case RPCRDMA_MTHCAFMR: |
576 | if (ia->ri_have_dma_lkey) | 559 | if (ia->ri_have_dma_lkey) |
577 | break; | 560 | break; |
578 | mem_priv = IB_ACCESS_LOCAL_WRITE; | 561 | mem_priv = IB_ACCESS_LOCAL_WRITE; |
562 | #if RPCRDMA_PERSISTENT_REGISTRATION | ||
579 | register_setup: | 563 | register_setup: |
564 | #endif | ||
580 | ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); | 565 | ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); |
581 | if (IS_ERR(ia->ri_bind_mem)) { | 566 | if (IS_ERR(ia->ri_bind_mem)) { |
582 | printk(KERN_ALERT "%s: ib_get_dma_mr for " | 567 | printk(KERN_ALERT "%s: ib_get_dma_mr for " |
@@ -699,14 +684,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
699 | } | 684 | } |
700 | break; | 685 | break; |
701 | } | 686 | } |
702 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
703 | case RPCRDMA_MEMWINDOWS: | ||
704 | /* Add room for mw_binds+unbinds - overkill! */ | ||
705 | ep->rep_attr.cap.max_send_wr++; | ||
706 | ep->rep_attr.cap.max_send_wr *= (2 * RPCRDMA_MAX_SEGS); | ||
707 | if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) | ||
708 | return -EINVAL; | ||
709 | break; | ||
710 | default: | 687 | default: |
711 | break; | 688 | break; |
712 | } | 689 | } |
@@ -728,14 +705,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
728 | 705 | ||
729 | /* set trigger for requesting send completion */ | 706 | /* set trigger for requesting send completion */ |
730 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 /* - 1*/; | 707 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 /* - 1*/; |
731 | switch (ia->ri_memreg_strategy) { | ||
732 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
733 | case RPCRDMA_MEMWINDOWS: | ||
734 | ep->rep_cqinit -= RPCRDMA_MAX_SEGS; | ||
735 | break; | ||
736 | default: | ||
737 | break; | ||
738 | } | ||
739 | if (ep->rep_cqinit <= 2) | 708 | if (ep->rep_cqinit <= 2) |
740 | ep->rep_cqinit = 0; | 709 | ep->rep_cqinit = 0; |
741 | INIT_CQCOUNT(ep); | 710 | INIT_CQCOUNT(ep); |
@@ -743,11 +712,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
743 | init_waitqueue_head(&ep->rep_connect_wait); | 712 | init_waitqueue_head(&ep->rep_connect_wait); |
744 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); | 713 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
745 | 714 | ||
746 | /* | ||
747 | * Create a single cq for receive dto and mw_bind (only ever | ||
748 | * care about unbind, really). Send completions are suppressed. | ||
749 | * Use single threaded tasklet upcalls to maintain ordering. | ||
750 | */ | ||
751 | ep->rep_cq = ib_create_cq(ia->ri_id->device, rpcrdma_cq_event_upcall, | 715 | ep->rep_cq = ib_create_cq(ia->ri_id->device, rpcrdma_cq_event_upcall, |
752 | rpcrdma_cq_async_error_upcall, NULL, | 716 | rpcrdma_cq_async_error_upcall, NULL, |
753 | ep->rep_attr.cap.max_recv_wr + | 717 | ep->rep_attr.cap.max_recv_wr + |
@@ -1020,11 +984,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1020 | len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * | 984 | len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * |
1021 | sizeof(struct rpcrdma_mw); | 985 | sizeof(struct rpcrdma_mw); |
1022 | break; | 986 | break; |
1023 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
1024 | case RPCRDMA_MEMWINDOWS: | ||
1025 | len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * | ||
1026 | sizeof(struct rpcrdma_mw); | ||
1027 | break; | ||
1028 | default: | 987 | default: |
1029 | break; | 988 | break; |
1030 | } | 989 | } |
@@ -1055,11 +1014,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1055 | } | 1014 | } |
1056 | p += cdata->padding; | 1015 | p += cdata->padding; |
1057 | 1016 | ||
1058 | /* | ||
1059 | * Allocate the fmr's, or mw's for mw_bind chunk registration. | ||
1060 | * We "cycle" the mw's in order to minimize rkey reuse, | ||
1061 | * and also reduce unbind-to-bind collision. | ||
1062 | */ | ||
1063 | INIT_LIST_HEAD(&buf->rb_mws); | 1017 | INIT_LIST_HEAD(&buf->rb_mws); |
1064 | r = (struct rpcrdma_mw *)p; | 1018 | r = (struct rpcrdma_mw *)p; |
1065 | switch (ia->ri_memreg_strategy) { | 1019 | switch (ia->ri_memreg_strategy) { |
@@ -1107,21 +1061,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1107 | ++r; | 1061 | ++r; |
1108 | } | 1062 | } |
1109 | break; | 1063 | break; |
1110 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
1111 | case RPCRDMA_MEMWINDOWS: | ||
1112 | /* Allocate one extra request's worth, for full cycling */ | ||
1113 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { | ||
1114 | r->r.mw = ib_alloc_mw(ia->ri_pd, IB_MW_TYPE_1); | ||
1115 | if (IS_ERR(r->r.mw)) { | ||
1116 | rc = PTR_ERR(r->r.mw); | ||
1117 | dprintk("RPC: %s: ib_alloc_mw" | ||
1118 | " failed %i\n", __func__, rc); | ||
1119 | goto out; | ||
1120 | } | ||
1121 | list_add(&r->mw_list, &buf->rb_mws); | ||
1122 | ++r; | ||
1123 | } | ||
1124 | break; | ||
1125 | default: | 1064 | default: |
1126 | break; | 1065 | break; |
1127 | } | 1066 | } |
@@ -1170,7 +1109,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1170 | memset(rep, 0, sizeof(struct rpcrdma_rep)); | 1109 | memset(rep, 0, sizeof(struct rpcrdma_rep)); |
1171 | buf->rb_recv_bufs[i] = rep; | 1110 | buf->rb_recv_bufs[i] = rep; |
1172 | buf->rb_recv_bufs[i]->rr_buffer = buf; | 1111 | buf->rb_recv_bufs[i]->rr_buffer = buf; |
1173 | init_waitqueue_head(&rep->rr_unbind); | ||
1174 | 1112 | ||
1175 | rc = rpcrdma_register_internal(ia, rep->rr_base, | 1113 | rc = rpcrdma_register_internal(ia, rep->rr_base, |
1176 | len - offsetof(struct rpcrdma_rep, rr_base), | 1114 | len - offsetof(struct rpcrdma_rep, rr_base), |
@@ -1204,7 +1142,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
1204 | 1142 | ||
1205 | /* clean up in reverse order from create | 1143 | /* clean up in reverse order from create |
1206 | * 1. recv mr memory (mr free, then kfree) | 1144 | * 1. recv mr memory (mr free, then kfree) |
1207 | * 1a. bind mw memory | ||
1208 | * 2. send mr memory (mr free, then kfree) | 1145 | * 2. send mr memory (mr free, then kfree) |
1209 | * 3. padding (if any) [moved to rpcrdma_ep_destroy] | 1146 | * 3. padding (if any) [moved to rpcrdma_ep_destroy] |
1210 | * 4. arrays | 1147 | * 4. arrays |
@@ -1248,15 +1185,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
1248 | " failed %i\n", | 1185 | " failed %i\n", |
1249 | __func__, rc); | 1186 | __func__, rc); |
1250 | break; | 1187 | break; |
1251 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
1252 | case RPCRDMA_MEMWINDOWS: | ||
1253 | rc = ib_dealloc_mw(r->r.mw); | ||
1254 | if (rc) | ||
1255 | dprintk("RPC: %s:" | ||
1256 | " ib_dealloc_mw" | ||
1257 | " failed %i\n", | ||
1258 | __func__, rc); | ||
1259 | break; | ||
1260 | default: | 1188 | default: |
1261 | break; | 1189 | break; |
1262 | } | 1190 | } |
@@ -1331,15 +1259,12 @@ rpcrdma_buffer_put(struct rpcrdma_req *req) | |||
1331 | req->rl_niovs = 0; | 1259 | req->rl_niovs = 0; |
1332 | if (req->rl_reply) { | 1260 | if (req->rl_reply) { |
1333 | buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply; | 1261 | buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply; |
1334 | init_waitqueue_head(&req->rl_reply->rr_unbind); | ||
1335 | req->rl_reply->rr_func = NULL; | 1262 | req->rl_reply->rr_func = NULL; |
1336 | req->rl_reply = NULL; | 1263 | req->rl_reply = NULL; |
1337 | } | 1264 | } |
1338 | switch (ia->ri_memreg_strategy) { | 1265 | switch (ia->ri_memreg_strategy) { |
1339 | case RPCRDMA_FRMR: | 1266 | case RPCRDMA_FRMR: |
1340 | case RPCRDMA_MTHCAFMR: | 1267 | case RPCRDMA_MTHCAFMR: |
1341 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
1342 | case RPCRDMA_MEMWINDOWS: | ||
1343 | /* | 1268 | /* |
1344 | * Cycle mw's back in reverse order, and "spin" them. | 1269 | * Cycle mw's back in reverse order, and "spin" them. |
1345 | * This delays and scrambles reuse as much as possible. | 1270 | * This delays and scrambles reuse as much as possible. |
@@ -1384,8 +1309,7 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req) | |||
1384 | 1309 | ||
1385 | /* | 1310 | /* |
1386 | * Put reply buffers back into pool when not attached to | 1311 | * Put reply buffers back into pool when not attached to |
1387 | * request. This happens in error conditions, and when | 1312 | * request. This happens in error conditions. |
1388 | * aborting unbinds. Pre-decrement counter/array index. | ||
1389 | */ | 1313 | */ |
1390 | void | 1314 | void |
1391 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | 1315 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) |
@@ -1688,74 +1612,6 @@ rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg, | |||
1688 | } | 1612 | } |
1689 | 1613 | ||
1690 | static int | 1614 | static int |
1691 | rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg, | ||
1692 | int *nsegs, int writing, struct rpcrdma_ia *ia, | ||
1693 | struct rpcrdma_xprt *r_xprt) | ||
1694 | { | ||
1695 | int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE : | ||
1696 | IB_ACCESS_REMOTE_READ); | ||
1697 | struct ib_mw_bind param; | ||
1698 | int rc; | ||
1699 | |||
1700 | *nsegs = 1; | ||
1701 | rpcrdma_map_one(ia, seg, writing); | ||
1702 | param.bind_info.mr = ia->ri_bind_mem; | ||
1703 | param.wr_id = 0ULL; /* no send cookie */ | ||
1704 | param.bind_info.addr = seg->mr_dma; | ||
1705 | param.bind_info.length = seg->mr_len; | ||
1706 | param.send_flags = 0; | ||
1707 | param.bind_info.mw_access_flags = mem_priv; | ||
1708 | |||
1709 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1710 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); | ||
1711 | if (rc) { | ||
1712 | dprintk("RPC: %s: failed ib_bind_mw " | ||
1713 | "%u@0x%llx status %i\n", | ||
1714 | __func__, seg->mr_len, | ||
1715 | (unsigned long long)seg->mr_dma, rc); | ||
1716 | rpcrdma_unmap_one(ia, seg); | ||
1717 | } else { | ||
1718 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; | ||
1719 | seg->mr_base = param.bind_info.addr; | ||
1720 | seg->mr_nsegs = 1; | ||
1721 | } | ||
1722 | return rc; | ||
1723 | } | ||
1724 | |||
1725 | static int | ||
1726 | rpcrdma_deregister_memwin_external(struct rpcrdma_mr_seg *seg, | ||
1727 | struct rpcrdma_ia *ia, | ||
1728 | struct rpcrdma_xprt *r_xprt, void **r) | ||
1729 | { | ||
1730 | struct ib_mw_bind param; | ||
1731 | LIST_HEAD(l); | ||
1732 | int rc; | ||
1733 | |||
1734 | BUG_ON(seg->mr_nsegs != 1); | ||
1735 | param.bind_info.mr = ia->ri_bind_mem; | ||
1736 | param.bind_info.addr = 0ULL; /* unbind */ | ||
1737 | param.bind_info.length = 0; | ||
1738 | param.bind_info.mw_access_flags = 0; | ||
1739 | if (*r) { | ||
1740 | param.wr_id = (u64) (unsigned long) *r; | ||
1741 | param.send_flags = IB_SEND_SIGNALED; | ||
1742 | INIT_CQCOUNT(&r_xprt->rx_ep); | ||
1743 | } else { | ||
1744 | param.wr_id = 0ULL; | ||
1745 | param.send_flags = 0; | ||
1746 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1747 | } | ||
1748 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); | ||
1749 | rpcrdma_unmap_one(ia, seg); | ||
1750 | if (rc) | ||
1751 | dprintk("RPC: %s: failed ib_(un)bind_mw," | ||
1752 | " status %i\n", __func__, rc); | ||
1753 | else | ||
1754 | *r = NULL; /* will upcall on completion */ | ||
1755 | return rc; | ||
1756 | } | ||
1757 | |||
1758 | static int | ||
1759 | rpcrdma_register_default_external(struct rpcrdma_mr_seg *seg, | 1615 | rpcrdma_register_default_external(struct rpcrdma_mr_seg *seg, |
1760 | int *nsegs, int writing, struct rpcrdma_ia *ia) | 1616 | int *nsegs, int writing, struct rpcrdma_ia *ia) |
1761 | { | 1617 | { |
@@ -1845,12 +1701,6 @@ rpcrdma_register_external(struct rpcrdma_mr_seg *seg, | |||
1845 | rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia); | 1701 | rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia); |
1846 | break; | 1702 | break; |
1847 | 1703 | ||
1848 | /* Registration using memory windows */ | ||
1849 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
1850 | case RPCRDMA_MEMWINDOWS: | ||
1851 | rc = rpcrdma_register_memwin_external(seg, &nsegs, writing, ia, r_xprt); | ||
1852 | break; | ||
1853 | |||
1854 | /* Default registration each time */ | 1704 | /* Default registration each time */ |
1855 | default: | 1705 | default: |
1856 | rc = rpcrdma_register_default_external(seg, &nsegs, writing, ia); | 1706 | rc = rpcrdma_register_default_external(seg, &nsegs, writing, ia); |
@@ -1887,11 +1737,6 @@ rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg, | |||
1887 | rc = rpcrdma_deregister_fmr_external(seg, ia); | 1737 | rc = rpcrdma_deregister_fmr_external(seg, ia); |
1888 | break; | 1738 | break; |
1889 | 1739 | ||
1890 | case RPCRDMA_MEMWINDOWS_ASYNC: | ||
1891 | case RPCRDMA_MEMWINDOWS: | ||
1892 | rc = rpcrdma_deregister_memwin_external(seg, ia, r_xprt, &r); | ||
1893 | break; | ||
1894 | |||
1895 | default: | 1740 | default: |
1896 | rc = rpcrdma_deregister_default_external(seg, ia); | 1741 | rc = rpcrdma_deregister_default_external(seg, ia); |
1897 | break; | 1742 | break; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index c620d1332933..bf08ee0b81e0 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -127,7 +127,6 @@ struct rpcrdma_rep { | |||
127 | struct rpc_xprt *rr_xprt; /* needed for request/reply matching */ | 127 | struct rpc_xprt *rr_xprt; /* needed for request/reply matching */ |
128 | void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */ | 128 | void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */ |
129 | struct list_head rr_list; /* tasklet list */ | 129 | struct list_head rr_list; /* tasklet list */ |
130 | wait_queue_head_t rr_unbind; /* optional unbind wait */ | ||
131 | struct ib_sge rr_iov; /* for posting */ | 130 | struct ib_sge rr_iov; /* for posting */ |
132 | struct ib_mr *rr_handle; /* handle for mem in rr_iov */ | 131 | struct ib_mr *rr_handle; /* handle for mem in rr_iov */ |
133 | char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */ | 132 | char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */ |
@@ -162,7 +161,6 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ | |||
162 | struct ib_mr *rl_mr; /* if registered directly */ | 161 | struct ib_mr *rl_mr; /* if registered directly */ |
163 | struct rpcrdma_mw { /* if registered from region */ | 162 | struct rpcrdma_mw { /* if registered from region */ |
164 | union { | 163 | union { |
165 | struct ib_mw *mw; | ||
166 | struct ib_fmr *fmr; | 164 | struct ib_fmr *fmr; |
167 | struct { | 165 | struct { |
168 | struct ib_fast_reg_page_list *fr_pgl; | 166 | struct ib_fast_reg_page_list *fr_pgl; |