aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/svc_rdma_sendto.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_sendto.c')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c166
1 files changed, 79 insertions, 87 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index fb82b1b683f8..a19b22b452a3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -63,52 +63,44 @@
63 * SGE[2..sge_count-2] data from xdr->pages[] 63 * SGE[2..sge_count-2] data from xdr->pages[]
64 * SGE[sge_count-1] data from xdr->tail. 64 * SGE[sge_count-1] data from xdr->tail.
65 * 65 *
66 * The max SGE we need is the length of the XDR / pagesize + one for
67 * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68 * reserves a page for both the request and the reply header, and this
69 * array is only concerned with the reply we are assured that we have
70 * on extra page for the RPCRMDA header.
66 */ 71 */
67static struct ib_sge *xdr_to_sge(struct svcxprt_rdma *xprt, 72static void xdr_to_sge(struct svcxprt_rdma *xprt,
68 struct xdr_buf *xdr, 73 struct xdr_buf *xdr,
69 struct ib_sge *sge, 74 struct svc_rdma_req_map *vec)
70 int *sge_count)
71{ 75{
72 /* Max we need is the length of the XDR / pagesize + one for
73 * head + one for tail + one for RPCRDMA header
74 */
75 int sge_max = (xdr->len+PAGE_SIZE-1) / PAGE_SIZE + 3; 76 int sge_max = (xdr->len+PAGE_SIZE-1) / PAGE_SIZE + 3;
76 int sge_no; 77 int sge_no;
77 u32 byte_count = xdr->len;
78 u32 sge_bytes; 78 u32 sge_bytes;
79 u32 page_bytes; 79 u32 page_bytes;
80 int page_off; 80 u32 page_off;
81 int page_no; 81 int page_no;
82 82
83 BUG_ON(xdr->len !=
84 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
85
83 /* Skip the first sge, this is for the RPCRDMA header */ 86 /* Skip the first sge, this is for the RPCRDMA header */
84 sge_no = 1; 87 sge_no = 1;
85 88
86 /* Head SGE */ 89 /* Head SGE */
87 sge[sge_no].addr = ib_dma_map_single(xprt->sc_cm_id->device, 90 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
88 xdr->head[0].iov_base, 91 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
89 xdr->head[0].iov_len,
90 DMA_TO_DEVICE);
91 sge_bytes = min_t(u32, byte_count, xdr->head[0].iov_len);
92 byte_count -= sge_bytes;
93 sge[sge_no].length = sge_bytes;
94 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
95 sge_no++; 92 sge_no++;
96 93
97 /* pages SGE */ 94 /* pages SGE */
98 page_no = 0; 95 page_no = 0;
99 page_bytes = xdr->page_len; 96 page_bytes = xdr->page_len;
100 page_off = xdr->page_base; 97 page_off = xdr->page_base;
101 while (byte_count && page_bytes) { 98 while (page_bytes) {
102 sge_bytes = min_t(u32, byte_count, (PAGE_SIZE-page_off)); 99 vec->sge[sge_no].iov_base =
103 sge[sge_no].addr = 100 page_address(xdr->pages[page_no]) + page_off;
104 ib_dma_map_page(xprt->sc_cm_id->device, 101 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
105 xdr->pages[page_no], page_off,
106 sge_bytes, DMA_TO_DEVICE);
107 sge_bytes = min(sge_bytes, page_bytes);
108 byte_count -= sge_bytes;
109 page_bytes -= sge_bytes; 102 page_bytes -= sge_bytes;
110 sge[sge_no].length = sge_bytes; 103 vec->sge[sge_no].iov_len = sge_bytes;
111 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
112 104
113 sge_no++; 105 sge_no++;
114 page_no++; 106 page_no++;
@@ -116,36 +108,24 @@ static struct ib_sge *xdr_to_sge(struct svcxprt_rdma *xprt,
116 } 108 }
117 109
118 /* Tail SGE */ 110 /* Tail SGE */
119 if (byte_count && xdr->tail[0].iov_len) { 111 if (xdr->tail[0].iov_len) {
120 sge[sge_no].addr = 112 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
121 ib_dma_map_single(xprt->sc_cm_id->device, 113 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
122 xdr->tail[0].iov_base,
123 xdr->tail[0].iov_len,
124 DMA_TO_DEVICE);
125 sge_bytes = min_t(u32, byte_count, xdr->tail[0].iov_len);
126 byte_count -= sge_bytes;
127 sge[sge_no].length = sge_bytes;
128 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
129 sge_no++; 114 sge_no++;
130 } 115 }
131 116
132 BUG_ON(sge_no > sge_max); 117 BUG_ON(sge_no > sge_max);
133 BUG_ON(byte_count != 0); 118 vec->count = sge_no;
134
135 *sge_count = sge_no;
136 return sge;
137} 119}
138 120
139
140/* Assumptions: 121/* Assumptions:
141 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE 122 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
142 */ 123 */
143static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, 124static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
144 u32 rmr, u64 to, 125 u32 rmr, u64 to,
145 u32 xdr_off, int write_len, 126 u32 xdr_off, int write_len,
146 struct ib_sge *xdr_sge, int sge_count) 127 struct svc_rdma_req_map *vec)
147{ 128{
148 struct svc_rdma_op_ctxt *tmp_sge_ctxt;
149 struct ib_send_wr write_wr; 129 struct ib_send_wr write_wr;
150 struct ib_sge *sge; 130 struct ib_sge *sge;
151 int xdr_sge_no; 131 int xdr_sge_no;
@@ -154,25 +134,23 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
154 int sge_off; 134 int sge_off;
155 int bc; 135 int bc;
156 struct svc_rdma_op_ctxt *ctxt; 136 struct svc_rdma_op_ctxt *ctxt;
157 int ret = 0;
158 137
159 BUG_ON(sge_count > RPCSVC_MAXPAGES); 138 BUG_ON(vec->count > RPCSVC_MAXPAGES);
160 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " 139 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
161 "write_len=%d, xdr_sge=%p, sge_count=%d\n", 140 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
162 rmr, (unsigned long long)to, xdr_off, 141 rmr, (unsigned long long)to, xdr_off,
163 write_len, xdr_sge, sge_count); 142 write_len, vec->sge, vec->count);
164 143
165 ctxt = svc_rdma_get_context(xprt); 144 ctxt = svc_rdma_get_context(xprt);
166 ctxt->count = 0; 145 ctxt->direction = DMA_TO_DEVICE;
167 tmp_sge_ctxt = svc_rdma_get_context(xprt); 146 sge = ctxt->sge;
168 sge = tmp_sge_ctxt->sge;
169 147
170 /* Find the SGE associated with xdr_off */ 148 /* Find the SGE associated with xdr_off */
171 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < sge_count; 149 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
172 xdr_sge_no++) { 150 xdr_sge_no++) {
173 if (xdr_sge[xdr_sge_no].length > bc) 151 if (vec->sge[xdr_sge_no].iov_len > bc)
174 break; 152 break;
175 bc -= xdr_sge[xdr_sge_no].length; 153 bc -= vec->sge[xdr_sge_no].iov_len;
176 } 154 }
177 155
178 sge_off = bc; 156 sge_off = bc;
@@ -180,21 +158,28 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
180 sge_no = 0; 158 sge_no = 0;
181 159
182 /* Copy the remaining SGE */ 160 /* Copy the remaining SGE */
183 while (bc != 0 && xdr_sge_no < sge_count) { 161 while (bc != 0 && xdr_sge_no < vec->count) {
184 sge[sge_no].addr = xdr_sge[xdr_sge_no].addr + sge_off; 162 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
185 sge[sge_no].lkey = xdr_sge[xdr_sge_no].lkey;
186 sge_bytes = min((size_t)bc, 163 sge_bytes = min((size_t)bc,
187 (size_t)(xdr_sge[xdr_sge_no].length-sge_off)); 164 (size_t)(vec->sge[xdr_sge_no].iov_len-sge_off));
188 sge[sge_no].length = sge_bytes; 165 sge[sge_no].length = sge_bytes;
189 166 atomic_inc(&xprt->sc_dma_used);
167 sge[sge_no].addr =
168 ib_dma_map_single(xprt->sc_cm_id->device,
169 (void *)
170 vec->sge[xdr_sge_no].iov_base + sge_off,
171 sge_bytes, DMA_TO_DEVICE);
172 if (dma_mapping_error(sge[sge_no].addr))
173 goto err;
190 sge_off = 0; 174 sge_off = 0;
191 sge_no++; 175 sge_no++;
176 ctxt->count++;
192 xdr_sge_no++; 177 xdr_sge_no++;
193 bc -= sge_bytes; 178 bc -= sge_bytes;
194 } 179 }
195 180
196 BUG_ON(bc != 0); 181 BUG_ON(bc != 0);
197 BUG_ON(xdr_sge_no > sge_count); 182 BUG_ON(xdr_sge_no > vec->count);
198 183
199 /* Prepare WRITE WR */ 184 /* Prepare WRITE WR */
200 memset(&write_wr, 0, sizeof write_wr); 185 memset(&write_wr, 0, sizeof write_wr);
@@ -209,21 +194,20 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
209 194
210 /* Post It */ 195 /* Post It */
211 atomic_inc(&rdma_stat_write); 196 atomic_inc(&rdma_stat_write);
212 if (svc_rdma_send(xprt, &write_wr)) { 197 if (svc_rdma_send(xprt, &write_wr))
213 svc_rdma_put_context(ctxt, 1); 198 goto err;
214 /* Fatal error, close transport */ 199 return 0;
215 ret = -EIO; 200 err:
216 } 201 svc_rdma_put_context(ctxt, 0);
217 svc_rdma_put_context(tmp_sge_ctxt, 0); 202 /* Fatal error, close transport */
218 return ret; 203 return -EIO;
219} 204}
220 205
221static int send_write_chunks(struct svcxprt_rdma *xprt, 206static int send_write_chunks(struct svcxprt_rdma *xprt,
222 struct rpcrdma_msg *rdma_argp, 207 struct rpcrdma_msg *rdma_argp,
223 struct rpcrdma_msg *rdma_resp, 208 struct rpcrdma_msg *rdma_resp,
224 struct svc_rqst *rqstp, 209 struct svc_rqst *rqstp,
225 struct ib_sge *sge, 210 struct svc_rdma_req_map *vec)
226 int sge_count)
227{ 211{
228 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; 212 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
229 int write_len; 213 int write_len;
@@ -269,8 +253,7 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
269 rs_offset + chunk_off, 253 rs_offset + chunk_off,
270 xdr_off, 254 xdr_off,
271 this_write, 255 this_write,
272 sge, 256 vec);
273 sge_count);
274 if (ret) { 257 if (ret) {
275 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 258 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
276 ret); 259 ret);
@@ -292,8 +275,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
292 struct rpcrdma_msg *rdma_argp, 275 struct rpcrdma_msg *rdma_argp,
293 struct rpcrdma_msg *rdma_resp, 276 struct rpcrdma_msg *rdma_resp,
294 struct svc_rqst *rqstp, 277 struct svc_rqst *rqstp,
295 struct ib_sge *sge, 278 struct svc_rdma_req_map *vec)
296 int sge_count)
297{ 279{
298 u32 xfer_len = rqstp->rq_res.len; 280 u32 xfer_len = rqstp->rq_res.len;
299 int write_len; 281 int write_len;
@@ -341,8 +323,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
341 rs_offset + chunk_off, 323 rs_offset + chunk_off,
342 xdr_off, 324 xdr_off,
343 this_write, 325 this_write,
344 sge, 326 vec);
345 sge_count);
346 if (ret) { 327 if (ret) {
347 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 328 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
348 ret); 329 ret);
@@ -380,7 +361,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
380 struct page *page, 361 struct page *page,
381 struct rpcrdma_msg *rdma_resp, 362 struct rpcrdma_msg *rdma_resp,
382 struct svc_rdma_op_ctxt *ctxt, 363 struct svc_rdma_op_ctxt *ctxt,
383 int sge_count, 364 struct svc_rdma_req_map *vec,
384 int byte_count) 365 int byte_count)
385{ 366{
386 struct ib_send_wr send_wr; 367 struct ib_send_wr send_wr;
@@ -405,6 +386,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
405 ctxt->count = 1; 386 ctxt->count = 1;
406 387
407 /* Prepare the SGE for the RPCRDMA Header */ 388 /* Prepare the SGE for the RPCRDMA Header */
389 atomic_inc(&rdma->sc_dma_used);
408 ctxt->sge[0].addr = 390 ctxt->sge[0].addr =
409 ib_dma_map_page(rdma->sc_cm_id->device, 391 ib_dma_map_page(rdma->sc_cm_id->device,
410 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 392 page, 0, PAGE_SIZE, DMA_TO_DEVICE);
@@ -413,10 +395,16 @@ static int send_reply(struct svcxprt_rdma *rdma,
413 ctxt->sge[0].lkey = rdma->sc_phys_mr->lkey; 395 ctxt->sge[0].lkey = rdma->sc_phys_mr->lkey;
414 396
415 /* Determine how many of our SGE are to be transmitted */ 397 /* Determine how many of our SGE are to be transmitted */
416 for (sge_no = 1; byte_count && sge_no < sge_count; sge_no++) { 398 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
417 sge_bytes = min((size_t)ctxt->sge[sge_no].length, 399 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
418 (size_t)byte_count);
419 byte_count -= sge_bytes; 400 byte_count -= sge_bytes;
401 atomic_inc(&rdma->sc_dma_used);
402 ctxt->sge[sge_no].addr =
403 ib_dma_map_single(rdma->sc_cm_id->device,
404 vec->sge[sge_no].iov_base,
405 sge_bytes, DMA_TO_DEVICE);
406 ctxt->sge[sge_no].length = sge_bytes;
407 ctxt->sge[sge_no].lkey = rdma->sc_phys_mr->lkey;
420 } 408 }
421 BUG_ON(byte_count != 0); 409 BUG_ON(byte_count != 0);
422 410
@@ -428,8 +416,10 @@ static int send_reply(struct svcxprt_rdma *rdma,
428 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; 416 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
429 ctxt->count++; 417 ctxt->count++;
430 rqstp->rq_respages[page_no] = NULL; 418 rqstp->rq_respages[page_no] = NULL;
419 /* If there are more pages than SGE, terminate SGE list */
420 if (page_no+1 >= sge_no)
421 ctxt->sge[page_no+1].length = 0;
431 } 422 }
432
433 BUG_ON(sge_no > rdma->sc_max_sge); 423 BUG_ON(sge_no > rdma->sc_max_sge);
434 memset(&send_wr, 0, sizeof send_wr); 424 memset(&send_wr, 0, sizeof send_wr);
435 ctxt->wr_op = IB_WR_SEND; 425 ctxt->wr_op = IB_WR_SEND;
@@ -473,20 +463,20 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
473 enum rpcrdma_proc reply_type; 463 enum rpcrdma_proc reply_type;
474 int ret; 464 int ret;
475 int inline_bytes; 465 int inline_bytes;
476 struct ib_sge *sge;
477 int sge_count = 0;
478 struct page *res_page; 466 struct page *res_page;
479 struct svc_rdma_op_ctxt *ctxt; 467 struct svc_rdma_op_ctxt *ctxt;
468 struct svc_rdma_req_map *vec;
480 469
481 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); 470 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
482 471
483 /* Get the RDMA request header. */ 472 /* Get the RDMA request header. */
484 rdma_argp = xdr_start(&rqstp->rq_arg); 473 rdma_argp = xdr_start(&rqstp->rq_arg);
485 474
486 /* Build an SGE for the XDR */ 475 /* Build an req vec for the XDR */
487 ctxt = svc_rdma_get_context(rdma); 476 ctxt = svc_rdma_get_context(rdma);
488 ctxt->direction = DMA_TO_DEVICE; 477 ctxt->direction = DMA_TO_DEVICE;
489 sge = xdr_to_sge(rdma, &rqstp->rq_res, ctxt->sge, &sge_count); 478 vec = svc_rdma_get_req_map();
479 xdr_to_sge(rdma, &rqstp->rq_res, vec);
490 480
491 inline_bytes = rqstp->rq_res.len; 481 inline_bytes = rqstp->rq_res.len;
492 482
@@ -503,7 +493,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
503 493
504 /* Send any write-chunk data and build resp write-list */ 494 /* Send any write-chunk data and build resp write-list */
505 ret = send_write_chunks(rdma, rdma_argp, rdma_resp, 495 ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
506 rqstp, sge, sge_count); 496 rqstp, vec);
507 if (ret < 0) { 497 if (ret < 0) {
508 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n", 498 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
509 ret); 499 ret);
@@ -513,7 +503,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
513 503
514 /* Send any reply-list data and update resp reply-list */ 504 /* Send any reply-list data and update resp reply-list */
515 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp, 505 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
516 rqstp, sge, sge_count); 506 rqstp, vec);
517 if (ret < 0) { 507 if (ret < 0) {
518 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n", 508 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
519 ret); 509 ret);
@@ -521,11 +511,13 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
521 } 511 }
522 inline_bytes -= ret; 512 inline_bytes -= ret;
523 513
524 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, sge_count, 514 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
525 inline_bytes); 515 inline_bytes);
516 svc_rdma_put_req_map(vec);
526 dprintk("svcrdma: send_reply returns %d\n", ret); 517 dprintk("svcrdma: send_reply returns %d\n", ret);
527 return ret; 518 return ret;
528 error: 519 error:
520 svc_rdma_put_req_map(vec);
529 svc_rdma_put_context(ctxt, 0); 521 svc_rdma_put_context(ctxt, 0);
530 put_page(res_page); 522 put_page(res_page);
531 return ret; 523 return ret;