aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2016-03-01 13:05:36 -0500
committerJ. Bruce Fields <bfields@redhat.com>2016-03-01 16:06:32 -0500
commit08ae4e7fedc6a853ad69d6d8abb760b55988608a (patch)
tree753202037514310e96260ac600a14caf7f0bcd13
parent4ce85c8cf8a8ecfff932642cf20677d7efc90ad3 (diff)
svcrdma: Find client-provided write and reply chunks once per reply
The client provides the location of Write chunks into which the server writes bulk payload. The client provides these when the Upper Layer Protocol wants direct data placement and the Binding allows it. (For NFS, this is READ and READLINK operations). The client also provides the location of a Reply chunk into which the server writes the non-bulk part of an RPC reply. The client provides this chunk whenever it believes the reply can be larger than its receive buffers. The server then uses the presence of these chunks to determine how it will form its reply message. svc_rdma_sendto() was looking for Write and Reply chunks multiple times for every reply message. It would be more efficient to do it just once. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c80
1 files changed, 36 insertions, 44 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index df57f3ce6cd2..79fa661295a2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -166,10 +166,10 @@ svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
166 * reply array is present 166 * reply array is present
167 */ 167 */
168static struct rpcrdma_write_array * 168static struct rpcrdma_write_array *
169svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp) 169svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
170 struct rpcrdma_write_array *wr_ary)
170{ 171{
171 struct rpcrdma_read_chunk *rch; 172 struct rpcrdma_read_chunk *rch;
172 struct rpcrdma_write_array *wr_ary;
173 struct rpcrdma_write_array *rp_ary; 173 struct rpcrdma_write_array *rp_ary;
174 174
175 /* XXX: Need to fix when reply chunk may occur with read list 175 /* XXX: Need to fix when reply chunk may occur with read list
@@ -191,7 +191,6 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
191 goto found_it; 191 goto found_it;
192 } 192 }
193 193
194 wr_ary = svc_rdma_get_write_array(rmsgp);
195 if (wr_ary) { 194 if (wr_ary) {
196 int chunk = be32_to_cpu(wr_ary->wc_nchunks); 195 int chunk = be32_to_cpu(wr_ary->wc_nchunks);
197 196
@@ -302,8 +301,9 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
302 return -EIO; 301 return -EIO;
303} 302}
304 303
304noinline
305static int send_write_chunks(struct svcxprt_rdma *xprt, 305static int send_write_chunks(struct svcxprt_rdma *xprt,
306 struct rpcrdma_msg *rdma_argp, 306 struct rpcrdma_write_array *wr_ary,
307 struct rpcrdma_msg *rdma_resp, 307 struct rpcrdma_msg *rdma_resp,
308 struct svc_rqst *rqstp, 308 struct svc_rqst *rqstp,
309 struct svc_rdma_req_map *vec) 309 struct svc_rdma_req_map *vec)
@@ -314,25 +314,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
314 int chunk_off; 314 int chunk_off;
315 int chunk_no; 315 int chunk_no;
316 int nchunks; 316 int nchunks;
317 struct rpcrdma_write_array *arg_ary;
318 struct rpcrdma_write_array *res_ary; 317 struct rpcrdma_write_array *res_ary;
319 int ret; 318 int ret;
320 319
321 arg_ary = svc_rdma_get_write_array(rdma_argp);
322 if (!arg_ary)
323 return 0;
324 res_ary = (struct rpcrdma_write_array *) 320 res_ary = (struct rpcrdma_write_array *)
325 &rdma_resp->rm_body.rm_chunks[1]; 321 &rdma_resp->rm_body.rm_chunks[1];
326 322
327 /* Write chunks start at the pagelist */ 323 /* Write chunks start at the pagelist */
328 nchunks = be32_to_cpu(arg_ary->wc_nchunks); 324 nchunks = be32_to_cpu(wr_ary->wc_nchunks);
329 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; 325 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
330 xfer_len && chunk_no < nchunks; 326 xfer_len && chunk_no < nchunks;
331 chunk_no++) { 327 chunk_no++) {
332 struct rpcrdma_segment *arg_ch; 328 struct rpcrdma_segment *arg_ch;
333 u64 rs_offset; 329 u64 rs_offset;
334 330
335 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 331 arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
336 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length)); 332 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
337 333
338 /* Prepare the response chunk given the length actually 334 /* Prepare the response chunk given the length actually
@@ -350,11 +346,8 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
350 xdr_off, 346 xdr_off,
351 write_len, 347 write_len,
352 vec); 348 vec);
353 if (ret <= 0) { 349 if (ret <= 0)
354 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 350 goto out_err;
355 ret);
356 return -EIO;
357 }
358 chunk_off += ret; 351 chunk_off += ret;
359 xdr_off += ret; 352 xdr_off += ret;
360 xfer_len -= ret; 353 xfer_len -= ret;
@@ -365,10 +358,15 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
365 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no); 358 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
366 359
367 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; 360 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
361
362out_err:
363 pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
364 return -EIO;
368} 365}
369 366
367noinline
370static int send_reply_chunks(struct svcxprt_rdma *xprt, 368static int send_reply_chunks(struct svcxprt_rdma *xprt,
371 struct rpcrdma_msg *rdma_argp, 369 struct rpcrdma_write_array *rp_ary,
372 struct rpcrdma_msg *rdma_resp, 370 struct rpcrdma_msg *rdma_resp,
373 struct svc_rqst *rqstp, 371 struct svc_rqst *rqstp,
374 struct svc_rdma_req_map *vec) 372 struct svc_rdma_req_map *vec)
@@ -380,25 +378,21 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
380 int chunk_off; 378 int chunk_off;
381 int nchunks; 379 int nchunks;
382 struct rpcrdma_segment *ch; 380 struct rpcrdma_segment *ch;
383 struct rpcrdma_write_array *arg_ary;
384 struct rpcrdma_write_array *res_ary; 381 struct rpcrdma_write_array *res_ary;
385 int ret; 382 int ret;
386 383
387 arg_ary = svc_rdma_get_reply_array(rdma_argp);
388 if (!arg_ary)
389 return 0;
390 /* XXX: need to fix when reply lists occur with read-list and or 384 /* XXX: need to fix when reply lists occur with read-list and or
391 * write-list */ 385 * write-list */
392 res_ary = (struct rpcrdma_write_array *) 386 res_ary = (struct rpcrdma_write_array *)
393 &rdma_resp->rm_body.rm_chunks[2]; 387 &rdma_resp->rm_body.rm_chunks[2];
394 388
395 /* xdr offset starts at RPC message */ 389 /* xdr offset starts at RPC message */
396 nchunks = be32_to_cpu(arg_ary->wc_nchunks); 390 nchunks = be32_to_cpu(rp_ary->wc_nchunks);
397 for (xdr_off = 0, chunk_no = 0; 391 for (xdr_off = 0, chunk_no = 0;
398 xfer_len && chunk_no < nchunks; 392 xfer_len && chunk_no < nchunks;
399 chunk_no++) { 393 chunk_no++) {
400 u64 rs_offset; 394 u64 rs_offset;
401 ch = &arg_ary->wc_array[chunk_no].wc_target; 395 ch = &rp_ary->wc_array[chunk_no].wc_target;
402 write_len = min(xfer_len, be32_to_cpu(ch->rs_length)); 396 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
403 397
404 /* Prepare the reply chunk given the length actually 398 /* Prepare the reply chunk given the length actually
@@ -415,11 +409,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
415 xdr_off, 409 xdr_off,
416 write_len, 410 write_len,
417 vec); 411 vec);
418 if (ret <= 0) { 412 if (ret <= 0)
419 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n", 413 goto out_err;
420 ret);
421 return -EIO;
422 }
423 chunk_off += ret; 414 chunk_off += ret;
424 xdr_off += ret; 415 xdr_off += ret;
425 xfer_len -= ret; 416 xfer_len -= ret;
@@ -430,6 +421,10 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
430 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no); 421 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
431 422
432 return rqstp->rq_res.len; 423 return rqstp->rq_res.len;
424
425out_err:
426 pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
427 return -EIO;
433} 428}
434 429
435/* This function prepares the portion of the RPCRDMA message to be 430/* This function prepares the portion of the RPCRDMA message to be
@@ -573,7 +568,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
573 container_of(xprt, struct svcxprt_rdma, sc_xprt); 568 container_of(xprt, struct svcxprt_rdma, sc_xprt);
574 struct rpcrdma_msg *rdma_argp; 569 struct rpcrdma_msg *rdma_argp;
575 struct rpcrdma_msg *rdma_resp; 570 struct rpcrdma_msg *rdma_resp;
576 struct rpcrdma_write_array *reply_ary; 571 struct rpcrdma_write_array *wr_ary, *rp_ary;
577 enum rpcrdma_proc reply_type; 572 enum rpcrdma_proc reply_type;
578 int ret; 573 int ret;
579 int inline_bytes; 574 int inline_bytes;
@@ -587,6 +582,8 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
587 * places this at the start of page 0. 582 * places this at the start of page 0.
588 */ 583 */
589 rdma_argp = page_address(rqstp->rq_pages[0]); 584 rdma_argp = page_address(rqstp->rq_pages[0]);
585 wr_ary = svc_rdma_get_write_array(rdma_argp);
586 rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
590 587
591 /* Build an req vec for the XDR */ 588 /* Build an req vec for the XDR */
592 ctxt = svc_rdma_get_context(rdma); 589 ctxt = svc_rdma_get_context(rdma);
@@ -603,8 +600,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
603 if (!res_page) 600 if (!res_page)
604 goto err0; 601 goto err0;
605 rdma_resp = page_address(res_page); 602 rdma_resp = page_address(res_page);
606 reply_ary = svc_rdma_get_reply_array(rdma_argp); 603 if (rp_ary)
607 if (reply_ary)
608 reply_type = RDMA_NOMSG; 604 reply_type = RDMA_NOMSG;
609 else 605 else
610 reply_type = RDMA_MSG; 606 reply_type = RDMA_MSG;
@@ -612,24 +608,20 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
612 rdma_resp, reply_type); 608 rdma_resp, reply_type);
613 609
614 /* Send any write-chunk data and build resp write-list */ 610 /* Send any write-chunk data and build resp write-list */
615 ret = send_write_chunks(rdma, rdma_argp, rdma_resp, 611 if (wr_ary) {
616 rqstp, vec); 612 ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
617 if (ret < 0) { 613 if (ret < 0)
618 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n", 614 goto err1;
619 ret); 615 inline_bytes -= ret;
620 goto err1;
621 } 616 }
622 inline_bytes -= ret;
623 617
624 /* Send any reply-list data and update resp reply-list */ 618 /* Send any reply-list data and update resp reply-list */
625 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp, 619 if (rp_ary) {
626 rqstp, vec); 620 ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
627 if (ret < 0) { 621 if (ret < 0)
628 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n", 622 goto err1;
629 ret); 623 inline_bytes -= ret;
630 goto err1;
631 } 624 }
632 inline_bytes -= ret;
633 625
634 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, 626 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
635 inline_bytes); 627 inline_bytes);