diff options
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 18 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 80 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 18 |
3 files changed, 78 insertions, 38 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 0194de814933..926bdb44f3de 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -263,9 +263,9 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, | |||
263 | frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT; | 263 | frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT; |
264 | for (page_no = 0; page_no < frmr->page_list_len; page_no++) { | 264 | for (page_no = 0; page_no < frmr->page_list_len; page_no++) { |
265 | frmr->page_list->page_list[page_no] = | 265 | frmr->page_list->page_list[page_no] = |
266 | ib_dma_map_single(xprt->sc_cm_id->device, | 266 | ib_dma_map_page(xprt->sc_cm_id->device, |
267 | page_address(rqstp->rq_arg.pages[page_no]), | 267 | rqstp->rq_arg.pages[page_no], 0, |
268 | PAGE_SIZE, DMA_FROM_DEVICE); | 268 | PAGE_SIZE, DMA_FROM_DEVICE); |
269 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 269 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
270 | frmr->page_list->page_list[page_no])) | 270 | frmr->page_list->page_list[page_no])) |
271 | goto fatal_err; | 271 | goto fatal_err; |
@@ -309,17 +309,21 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt, | |||
309 | int count) | 309 | int count) |
310 | { | 310 | { |
311 | int i; | 311 | int i; |
312 | unsigned long off; | ||
312 | 313 | ||
313 | ctxt->count = count; | 314 | ctxt->count = count; |
314 | ctxt->direction = DMA_FROM_DEVICE; | 315 | ctxt->direction = DMA_FROM_DEVICE; |
315 | for (i = 0; i < count; i++) { | 316 | for (i = 0; i < count; i++) { |
316 | ctxt->sge[i].length = 0; /* in case map fails */ | 317 | ctxt->sge[i].length = 0; /* in case map fails */ |
317 | if (!frmr) { | 318 | if (!frmr) { |
319 | BUG_ON(0 == virt_to_page(vec[i].iov_base)); | ||
320 | off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; | ||
318 | ctxt->sge[i].addr = | 321 | ctxt->sge[i].addr = |
319 | ib_dma_map_single(xprt->sc_cm_id->device, | 322 | ib_dma_map_page(xprt->sc_cm_id->device, |
320 | vec[i].iov_base, | 323 | virt_to_page(vec[i].iov_base), |
321 | vec[i].iov_len, | 324 | off, |
322 | DMA_FROM_DEVICE); | 325 | vec[i].iov_len, |
326 | DMA_FROM_DEVICE); | ||
323 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 327 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
324 | ctxt->sge[i].addr)) | 328 | ctxt->sge[i].addr)) |
325 | return -EINVAL; | 329 | return -EINVAL; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index b15e1ebb2bfa..d4f5e0e43f09 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -70,8 +70,8 @@ | |||
70 | * on extra page for the RPCRMDA header. | 70 | * on extra page for the RPCRMDA header. |
71 | */ | 71 | */ |
72 | static int fast_reg_xdr(struct svcxprt_rdma *xprt, | 72 | static int fast_reg_xdr(struct svcxprt_rdma *xprt, |
73 | struct xdr_buf *xdr, | 73 | struct xdr_buf *xdr, |
74 | struct svc_rdma_req_map *vec) | 74 | struct svc_rdma_req_map *vec) |
75 | { | 75 | { |
76 | int sge_no; | 76 | int sge_no; |
77 | u32 sge_bytes; | 77 | u32 sge_bytes; |
@@ -96,21 +96,25 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
96 | vec->count = 2; | 96 | vec->count = 2; |
97 | sge_no++; | 97 | sge_no++; |
98 | 98 | ||
99 | /* Build the FRMR */ | 99 | /* Map the XDR head */ |
100 | frmr->kva = frva; | 100 | frmr->kva = frva; |
101 | frmr->direction = DMA_TO_DEVICE; | 101 | frmr->direction = DMA_TO_DEVICE; |
102 | frmr->access_flags = 0; | 102 | frmr->access_flags = 0; |
103 | frmr->map_len = PAGE_SIZE; | 103 | frmr->map_len = PAGE_SIZE; |
104 | frmr->page_list_len = 1; | 104 | frmr->page_list_len = 1; |
105 | page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK; | ||
105 | frmr->page_list->page_list[page_no] = | 106 | frmr->page_list->page_list[page_no] = |
106 | ib_dma_map_single(xprt->sc_cm_id->device, | 107 | ib_dma_map_page(xprt->sc_cm_id->device, |
107 | (void *)xdr->head[0].iov_base, | 108 | virt_to_page(xdr->head[0].iov_base), |
108 | PAGE_SIZE, DMA_TO_DEVICE); | 109 | page_off, |
110 | PAGE_SIZE - page_off, | ||
111 | DMA_TO_DEVICE); | ||
109 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 112 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
110 | frmr->page_list->page_list[page_no])) | 113 | frmr->page_list->page_list[page_no])) |
111 | goto fatal_err; | 114 | goto fatal_err; |
112 | atomic_inc(&xprt->sc_dma_used); | 115 | atomic_inc(&xprt->sc_dma_used); |
113 | 116 | ||
117 | /* Map the XDR page list */ | ||
114 | page_off = xdr->page_base; | 118 | page_off = xdr->page_base; |
115 | page_bytes = xdr->page_len + page_off; | 119 | page_bytes = xdr->page_len + page_off; |
116 | if (!page_bytes) | 120 | if (!page_bytes) |
@@ -128,9 +132,9 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
128 | page_bytes -= sge_bytes; | 132 | page_bytes -= sge_bytes; |
129 | 133 | ||
130 | frmr->page_list->page_list[page_no] = | 134 | frmr->page_list->page_list[page_no] = |
131 | ib_dma_map_single(xprt->sc_cm_id->device, | 135 | ib_dma_map_page(xprt->sc_cm_id->device, |
132 | page_address(page), | 136 | page, page_off, |
133 | PAGE_SIZE, DMA_TO_DEVICE); | 137 | sge_bytes, DMA_TO_DEVICE); |
134 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 138 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
135 | frmr->page_list->page_list[page_no])) | 139 | frmr->page_list->page_list[page_no])) |
136 | goto fatal_err; | 140 | goto fatal_err; |
@@ -166,8 +170,10 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
166 | vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off; | 170 | vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off; |
167 | 171 | ||
168 | frmr->page_list->page_list[page_no] = | 172 | frmr->page_list->page_list[page_no] = |
169 | ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE, | 173 | ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va), |
170 | DMA_TO_DEVICE); | 174 | page_off, |
175 | PAGE_SIZE, | ||
176 | DMA_TO_DEVICE); | ||
171 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 177 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
172 | frmr->page_list->page_list[page_no])) | 178 | frmr->page_list->page_list[page_no])) |
173 | goto fatal_err; | 179 | goto fatal_err; |
@@ -245,6 +251,35 @@ static int map_xdr(struct svcxprt_rdma *xprt, | |||
245 | return 0; | 251 | return 0; |
246 | } | 252 | } |
247 | 253 | ||
254 | static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt, | ||
255 | struct xdr_buf *xdr, | ||
256 | u32 xdr_off, size_t len, int dir) | ||
257 | { | ||
258 | struct page *page; | ||
259 | dma_addr_t dma_addr; | ||
260 | if (xdr_off < xdr->head[0].iov_len) { | ||
261 | /* This offset is in the head */ | ||
262 | xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK; | ||
263 | page = virt_to_page(xdr->head[0].iov_base); | ||
264 | } else { | ||
265 | xdr_off -= xdr->head[0].iov_len; | ||
266 | if (xdr_off < xdr->page_len) { | ||
267 | /* This offset is in the page list */ | ||
268 | page = xdr->pages[xdr_off >> PAGE_SHIFT]; | ||
269 | xdr_off &= ~PAGE_MASK; | ||
270 | } else { | ||
271 | /* This offset is in the tail */ | ||
272 | xdr_off -= xdr->page_len; | ||
273 | xdr_off += (unsigned long) | ||
274 | xdr->tail[0].iov_base & ~PAGE_MASK; | ||
275 | page = virt_to_page(xdr->tail[0].iov_base); | ||
276 | } | ||
277 | } | ||
278 | dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off, | ||
279 | min_t(size_t, PAGE_SIZE, len), dir); | ||
280 | return dma_addr; | ||
281 | } | ||
282 | |||
248 | /* Assumptions: | 283 | /* Assumptions: |
249 | * - We are using FRMR | 284 | * - We are using FRMR |
250 | * - or - | 285 | * - or - |
@@ -293,10 +328,9 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
293 | sge[sge_no].length = sge_bytes; | 328 | sge[sge_no].length = sge_bytes; |
294 | if (!vec->frmr) { | 329 | if (!vec->frmr) { |
295 | sge[sge_no].addr = | 330 | sge[sge_no].addr = |
296 | ib_dma_map_single(xprt->sc_cm_id->device, | 331 | dma_map_xdr(xprt, &rqstp->rq_res, xdr_off, |
297 | (void *) | 332 | sge_bytes, DMA_TO_DEVICE); |
298 | vec->sge[xdr_sge_no].iov_base + sge_off, | 333 | xdr_off += sge_bytes; |
299 | sge_bytes, DMA_TO_DEVICE); | ||
300 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 334 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
301 | sge[sge_no].addr)) | 335 | sge[sge_no].addr)) |
302 | goto err; | 336 | goto err; |
@@ -494,7 +528,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, | |||
494 | * In all three cases, this function prepares the RPCRDMA header in | 528 | * In all three cases, this function prepares the RPCRDMA header in |
495 | * sge[0], the 'type' parameter indicates the type to place in the | 529 | * sge[0], the 'type' parameter indicates the type to place in the |
496 | * RPCRDMA header, and the 'byte_count' field indicates how much of | 530 | * RPCRDMA header, and the 'byte_count' field indicates how much of |
497 | * the XDR to include in this RDMA_SEND. | 531 | * the XDR to include in this RDMA_SEND. NB: The offset of the payload |
532 | * to send is zero in the XDR. | ||
498 | */ | 533 | */ |
499 | static int send_reply(struct svcxprt_rdma *rdma, | 534 | static int send_reply(struct svcxprt_rdma *rdma, |
500 | struct svc_rqst *rqstp, | 535 | struct svc_rqst *rqstp, |
@@ -536,23 +571,24 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
536 | ctxt->sge[0].lkey = rdma->sc_dma_lkey; | 571 | ctxt->sge[0].lkey = rdma->sc_dma_lkey; |
537 | ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); | 572 | ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); |
538 | ctxt->sge[0].addr = | 573 | ctxt->sge[0].addr = |
539 | ib_dma_map_single(rdma->sc_cm_id->device, page_address(page), | 574 | ib_dma_map_page(rdma->sc_cm_id->device, page, 0, |
540 | ctxt->sge[0].length, DMA_TO_DEVICE); | 575 | ctxt->sge[0].length, DMA_TO_DEVICE); |
541 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) | 576 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) |
542 | goto err; | 577 | goto err; |
543 | atomic_inc(&rdma->sc_dma_used); | 578 | atomic_inc(&rdma->sc_dma_used); |
544 | 579 | ||
545 | ctxt->direction = DMA_TO_DEVICE; | 580 | ctxt->direction = DMA_TO_DEVICE; |
546 | 581 | ||
547 | /* Determine how many of our SGE are to be transmitted */ | 582 | /* Map the payload indicated by 'byte_count' */ |
548 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { | 583 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { |
584 | int xdr_off = 0; | ||
549 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); | 585 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); |
550 | byte_count -= sge_bytes; | 586 | byte_count -= sge_bytes; |
551 | if (!vec->frmr) { | 587 | if (!vec->frmr) { |
552 | ctxt->sge[sge_no].addr = | 588 | ctxt->sge[sge_no].addr = |
553 | ib_dma_map_single(rdma->sc_cm_id->device, | 589 | dma_map_xdr(rdma, &rqstp->rq_res, xdr_off, |
554 | vec->sge[sge_no].iov_base, | 590 | sge_bytes, DMA_TO_DEVICE); |
555 | sge_bytes, DMA_TO_DEVICE); | 591 | xdr_off += sge_bytes; |
556 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, | 592 | if (ib_dma_mapping_error(rdma->sc_cm_id->device, |
557 | ctxt->sge[sge_no].addr)) | 593 | ctxt->sge[sge_no].addr)) |
558 | goto err; | 594 | goto err; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 950a206600c0..e87e000e984c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -121,7 +121,7 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) | |||
121 | */ | 121 | */ |
122 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { | 122 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { |
123 | atomic_dec(&xprt->sc_dma_used); | 123 | atomic_dec(&xprt->sc_dma_used); |
124 | ib_dma_unmap_single(xprt->sc_cm_id->device, | 124 | ib_dma_unmap_page(xprt->sc_cm_id->device, |
125 | ctxt->sge[i].addr, | 125 | ctxt->sge[i].addr, |
126 | ctxt->sge[i].length, | 126 | ctxt->sge[i].length, |
127 | ctxt->direction); | 127 | ctxt->direction); |
@@ -503,8 +503,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |||
503 | BUG_ON(sge_no >= xprt->sc_max_sge); | 503 | BUG_ON(sge_no >= xprt->sc_max_sge); |
504 | page = svc_rdma_get_page(); | 504 | page = svc_rdma_get_page(); |
505 | ctxt->pages[sge_no] = page; | 505 | ctxt->pages[sge_no] = page; |
506 | pa = ib_dma_map_single(xprt->sc_cm_id->device, | 506 | pa = ib_dma_map_page(xprt->sc_cm_id->device, |
507 | page_address(page), PAGE_SIZE, | 507 | page, 0, PAGE_SIZE, |
508 | DMA_FROM_DEVICE); | 508 | DMA_FROM_DEVICE); |
509 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) | 509 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
510 | goto err_put_ctxt; | 510 | goto err_put_ctxt; |
@@ -800,8 +800,8 @@ static void frmr_unmap_dma(struct svcxprt_rdma *xprt, | |||
800 | if (ib_dma_mapping_error(frmr->mr->device, addr)) | 800 | if (ib_dma_mapping_error(frmr->mr->device, addr)) |
801 | continue; | 801 | continue; |
802 | atomic_dec(&xprt->sc_dma_used); | 802 | atomic_dec(&xprt->sc_dma_used); |
803 | ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE, | 803 | ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE, |
804 | frmr->direction); | 804 | frmr->direction); |
805 | } | 805 | } |
806 | } | 806 | } |
807 | 807 | ||
@@ -1276,7 +1276,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | |||
1276 | atomic_read(&xprt->sc_sq_count) < | 1276 | atomic_read(&xprt->sc_sq_count) < |
1277 | xprt->sc_sq_depth); | 1277 | xprt->sc_sq_depth); |
1278 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) | 1278 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
1279 | return 0; | 1279 | return -ENOTCONN; |
1280 | continue; | 1280 | continue; |
1281 | } | 1281 | } |
1282 | /* Take a transport ref for each WR posted */ | 1282 | /* Take a transport ref for each WR posted */ |
@@ -1322,8 +1322,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1322 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); | 1322 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); |
1323 | 1323 | ||
1324 | /* Prepare SGE for local address */ | 1324 | /* Prepare SGE for local address */ |
1325 | sge.addr = ib_dma_map_single(xprt->sc_cm_id->device, | 1325 | sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, |
1326 | page_address(p), PAGE_SIZE, DMA_FROM_DEVICE); | 1326 | p, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
1327 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { | 1327 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { |
1328 | put_page(p); | 1328 | put_page(p); |
1329 | return; | 1329 | return; |
@@ -1350,7 +1350,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1350 | if (ret) { | 1350 | if (ret) { |
1351 | dprintk("svcrdma: Error %d posting send for protocol error\n", | 1351 | dprintk("svcrdma: Error %d posting send for protocol error\n", |
1352 | ret); | 1352 | ret); |
1353 | ib_dma_unmap_single(xprt->sc_cm_id->device, | 1353 | ib_dma_unmap_page(xprt->sc_cm_id->device, |
1354 | sge.addr, PAGE_SIZE, | 1354 | sge.addr, PAGE_SIZE, |
1355 | DMA_FROM_DEVICE); | 1355 | DMA_FROM_DEVICE); |
1356 | svc_rdma_put_context(ctxt, 1); | 1356 | svc_rdma_put_context(ctxt, 1); |