aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c71
-rw-r--r--net/sunrpc/xprtrdma/transport.c5
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h10
3 files changed, 34 insertions, 52 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 91ffde82fa0c..41456d9e5a7d 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -53,6 +53,14 @@
53# define RPCDBG_FACILITY RPCDBG_TRANS 53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif 54#endif
55 55
56enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
56#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 64#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57static const char transfertypes[][12] = { 65static const char transfertypes[][12] = {
58 "pure inline", /* no chunks */ 66 "pure inline", /* no chunks */
@@ -284,28 +292,6 @@ out:
284} 292}
285 293
286/* 294/*
287 * Marshal chunks. This routine returns the header length
288 * consumed by marshaling.
289 *
290 * Returns positive RPC/RDMA header size, or negative errno.
291 */
292
293ssize_t
294rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
295{
296 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
297 struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf);
298
299 if (req->rl_rtype != rpcrdma_noch)
300 result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
301 headerp, req->rl_rtype);
302 else if (req->rl_wtype != rpcrdma_noch)
303 result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
304 headerp, req->rl_wtype);
305 return result;
306}
307
308/*
309 * Copy write data inline. 295 * Copy write data inline.
310 * This function is used for "small" requests. Data which is passed 296 * This function is used for "small" requests. Data which is passed
311 * to RPC via iovecs (or page list) is copied directly into the 297 * to RPC via iovecs (or page list) is copied directly into the
@@ -397,6 +383,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
397 char *base; 383 char *base;
398 size_t rpclen, padlen; 384 size_t rpclen, padlen;
399 ssize_t hdrlen; 385 ssize_t hdrlen;
386 enum rpcrdma_chunktype rtype, wtype;
400 struct rpcrdma_msg *headerp; 387 struct rpcrdma_msg *headerp;
401 388
402 /* 389 /*
@@ -433,13 +420,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
433 * into pages; otherwise use reply chunks. 420 * into pages; otherwise use reply chunks.
434 */ 421 */
435 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) 422 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
436 req->rl_wtype = rpcrdma_noch; 423 wtype = rpcrdma_noch;
437 else if (rqst->rq_rcv_buf.page_len == 0) 424 else if (rqst->rq_rcv_buf.page_len == 0)
438 req->rl_wtype = rpcrdma_replych; 425 wtype = rpcrdma_replych;
439 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 426 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
440 req->rl_wtype = rpcrdma_writech; 427 wtype = rpcrdma_writech;
441 else 428 else
442 req->rl_wtype = rpcrdma_replych; 429 wtype = rpcrdma_replych;
443 430
444 /* 431 /*
445 * Chunks needed for arguments? 432 * Chunks needed for arguments?
@@ -456,16 +443,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
456 * TBD check NFSv4 setacl 443 * TBD check NFSv4 setacl
457 */ 444 */
458 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) 445 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
459 req->rl_rtype = rpcrdma_noch; 446 rtype = rpcrdma_noch;
460 else if (rqst->rq_snd_buf.page_len == 0) 447 else if (rqst->rq_snd_buf.page_len == 0)
461 req->rl_rtype = rpcrdma_areadch; 448 rtype = rpcrdma_areadch;
462 else 449 else
463 req->rl_rtype = rpcrdma_readch; 450 rtype = rpcrdma_readch;
464 451
465 /* The following simplification is not true forever */ 452 /* The following simplification is not true forever */
466 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych) 453 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
467 req->rl_wtype = rpcrdma_noch; 454 wtype = rpcrdma_noch;
468 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) { 455 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
469 dprintk("RPC: %s: cannot marshal multiple chunk lists\n", 456 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
470 __func__); 457 __func__);
471 return -EIO; 458 return -EIO;
@@ -479,7 +466,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
479 * When padding is in use and applies to the transfer, insert 466 * When padding is in use and applies to the transfer, insert
480 * it and change the message type. 467 * it and change the message type.
481 */ 468 */
482 if (req->rl_rtype == rpcrdma_noch) { 469 if (rtype == rpcrdma_noch) {
483 470
484 padlen = rpcrdma_inline_pullup(rqst, 471 padlen = rpcrdma_inline_pullup(rqst,
485 RPCRDMA_INLINE_PAD_VALUE(rqst)); 472 RPCRDMA_INLINE_PAD_VALUE(rqst));
@@ -494,7 +481,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
494 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; 481 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
495 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; 482 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
496 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ 483 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
497 if (req->rl_wtype != rpcrdma_noch) { 484 if (wtype != rpcrdma_noch) {
498 dprintk("RPC: %s: invalid chunk list\n", 485 dprintk("RPC: %s: invalid chunk list\n",
499 __func__); 486 __func__);
500 return -EIO; 487 return -EIO;
@@ -515,18 +502,26 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
515 * on receive. Therefore, we request a reply chunk 502 * on receive. Therefore, we request a reply chunk
516 * for non-writes wherever feasible and efficient. 503 * for non-writes wherever feasible and efficient.
517 */ 504 */
518 if (req->rl_wtype == rpcrdma_noch) 505 if (wtype == rpcrdma_noch)
519 req->rl_wtype = rpcrdma_replych; 506 wtype = rpcrdma_replych;
520 } 507 }
521 } 508 }
522 509
523 hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen); 510 if (rtype != rpcrdma_noch) {
511 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
512 headerp, rtype);
513 wtype = rtype; /* simplify dprintk */
514
515 } else if (wtype != rpcrdma_noch) {
516 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
517 headerp, wtype);
518 }
524 if (hdrlen < 0) 519 if (hdrlen < 0)
525 return hdrlen; 520 return hdrlen;
526 521
527 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" 522 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
528 " headerp 0x%p base 0x%p lkey 0x%x\n", 523 " headerp 0x%p base 0x%p lkey 0x%x\n",
529 __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen, 524 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
530 headerp, base, rdmab_lkey(req->rl_rdmabuf)); 525 headerp, base, rdmab_lkey(req->rl_rdmabuf));
531 526
532 /* 527 /*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 9be7f97205ba..97f656292feb 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -608,10 +608,7 @@ xprt_rdma_send_request(struct rpc_task *task)
608 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 608 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
609 int rc = 0; 609 int rc = 0;
610 610
611 if (req->rl_niovs == 0) 611 rc = rpcrdma_marshal_req(rqst);
612 rc = rpcrdma_marshal_req(rqst);
613 else if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_ALLPHYSICAL)
614 rc = rpcrdma_marshal_chunks(rqst, 0);
615 if (rc < 0) 612 if (rc < 0)
616 goto failed_marshal; 613 goto failed_marshal;
617 614
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0a16fb6f0885..c8afd83e8b75 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -143,14 +143,6 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
143 return (struct rpcrdma_msg *)rb->rg_base; 143 return (struct rpcrdma_msg *)rb->rg_base;
144} 144}
145 145
146enum rpcrdma_chunktype {
147 rpcrdma_noch = 0,
148 rpcrdma_readch,
149 rpcrdma_areadch,
150 rpcrdma_writech,
151 rpcrdma_replych
152};
153
154/* 146/*
155 * struct rpcrdma_rep -- this structure encapsulates state required to recv 147 * struct rpcrdma_rep -- this structure encapsulates state required to recv
156 * and complete a reply, asychronously. It needs several pieces of 148 * and complete a reply, asychronously. It needs several pieces of
@@ -258,7 +250,6 @@ struct rpcrdma_req {
258 unsigned int rl_niovs; /* 0, 2 or 4 */ 250 unsigned int rl_niovs; /* 0, 2 or 4 */
259 unsigned int rl_nchunks; /* non-zero if chunks */ 251 unsigned int rl_nchunks; /* non-zero if chunks */
260 unsigned int rl_connect_cookie; /* retry detection */ 252 unsigned int rl_connect_cookie; /* retry detection */
261 enum rpcrdma_chunktype rl_rtype, rl_wtype;
262 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ 253 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
263 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 254 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
264 struct ib_sge rl_send_iov[4]; /* for active requests */ 255 struct ib_sge rl_send_iov[4]; /* for active requests */
@@ -418,7 +409,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
418/* 409/*
419 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 410 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
420 */ 411 */
421ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t);
422int rpcrdma_marshal_req(struct rpc_rqst *); 412int rpcrdma_marshal_req(struct rpc_rqst *);
423size_t rpcrdma_max_payload(struct rpcrdma_xprt *); 413size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
424 414