summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-10-20 10:48:03 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-11-17 13:47:56 -0500
commita062a2a3efc5fece106d96d4a5165f3f23b5cbda (patch)
treeefb5b6a91d7a1b06154d85db7643861ff8735159 /net/sunrpc
parent857f9acab9343788fe59f7be3a4710131b705db4 (diff)
xprtrdma: "Unoptimize" rpcrdma_prepare_hdr_sge()
Commit 655fec6987be ("xprtrdma: Use gathered Send for large inline messages") assumed that, since the zeroeth element of the Send SGE array always pointed to req->rl_rdmabuf, it needed to be initialized just once. This was a valid assumption because the Send SGE array and rl_rdmabuf both live in the same rpcrdma_req. In a subsequent patch, the Send SGE array will be separated from the rpcrdma_req, so the zeroeth element of the SGE array needs to be initialized every time. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e3ece9843f9d..7fd102960a81 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -533,7 +533,7 @@ rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
533 sge->addr, sge->length, DMA_TO_DEVICE); 533 sge->addr, sge->length, DMA_TO_DEVICE);
534} 534}
535 535
536/* Prepare the RPC-over-RDMA header SGE. 536/* Prepare an SGE for the RPC-over-RDMA transport header.
537 */ 537 */
538static bool 538static bool
539rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, 539rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
@@ -542,13 +542,11 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
542 struct rpcrdma_regbuf *rb = req->rl_rdmabuf; 542 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
543 struct ib_sge *sge = &req->rl_send_sge[0]; 543 struct ib_sge *sge = &req->rl_send_sge[0];
544 544
545 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { 545 if (!rpcrdma_dma_map_regbuf(ia, rb))
546 if (!__rpcrdma_dma_map_regbuf(ia, rb)) 546 goto out_regbuf;
547 goto out_regbuf; 547 sge->addr = rdmab_addr(rb);
548 sge->addr = rdmab_addr(rb);
549 sge->lkey = rdmab_lkey(rb);
550 }
551 sge->length = len; 548 sge->length = len;
549 sge->lkey = rdmab_lkey(rb);
552 550
553 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, 551 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
554 sge->length, DMA_TO_DEVICE); 552 sge->length, DMA_TO_DEVICE);