aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-03-30 14:35:44 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-03-31 09:52:53 -0400
commitd654788e98f74f2df8dfc6079fa314938f739486 (patch)
treec1ce64224f93f867ba4220f3f4cebb0b3f0fd9ee
parente46ac34c3c34e408435656a5fed605c4c787d081 (diff)
xprtrdma: Make rpcrdma_{un}map_one() into inline functions
These functions are called in a loop for each page transferred via RDMA READ or WRITE. Extract loop invariants and inline them to reduce CPU overhead. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com> Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com> Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c10
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c10
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c10
-rw-r--r--net/sunrpc/xprtrdma/verbs.c44
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h45
5 files changed, 73 insertions, 46 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index e8a9837f8d63..a91ba2c8ef1e 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -85,6 +85,8 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
85 int nsegs, bool writing) 85 int nsegs, bool writing)
86{ 86{
87 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 87 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
88 struct ib_device *device = ia->ri_id->device;
89 enum dma_data_direction direction = rpcrdma_data_dir(writing);
88 struct rpcrdma_mr_seg *seg1 = seg; 90 struct rpcrdma_mr_seg *seg1 = seg;
89 struct rpcrdma_mw *mw = seg1->rl_mw; 91 struct rpcrdma_mw *mw = seg1->rl_mw;
90 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; 92 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
@@ -97,7 +99,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
97 if (nsegs > RPCRDMA_MAX_FMR_SGES) 99 if (nsegs > RPCRDMA_MAX_FMR_SGES)
98 nsegs = RPCRDMA_MAX_FMR_SGES; 100 nsegs = RPCRDMA_MAX_FMR_SGES;
99 for (i = 0; i < nsegs;) { 101 for (i = 0; i < nsegs;) {
100 rpcrdma_map_one(ia, seg, writing); 102 rpcrdma_map_one(device, seg, direction);
101 physaddrs[i] = seg->mr_dma; 103 physaddrs[i] = seg->mr_dma;
102 len += seg->mr_len; 104 len += seg->mr_len;
103 ++seg; 105 ++seg;
@@ -123,7 +125,7 @@ out_maperr:
123 __func__, len, (unsigned long long)seg1->mr_dma, 125 __func__, len, (unsigned long long)seg1->mr_dma,
124 pageoff, i, rc); 126 pageoff, i, rc);
125 while (i--) 127 while (i--)
126 rpcrdma_unmap_one(ia, --seg); 128 rpcrdma_unmap_one(device, --seg);
127 return rc; 129 return rc;
128} 130}
129 131
@@ -135,14 +137,16 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
135{ 137{
136 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 138 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
137 struct rpcrdma_mr_seg *seg1 = seg; 139 struct rpcrdma_mr_seg *seg1 = seg;
140 struct ib_device *device;
138 int rc, nsegs = seg->mr_nsegs; 141 int rc, nsegs = seg->mr_nsegs;
139 LIST_HEAD(l); 142 LIST_HEAD(l);
140 143
141 list_add(&seg1->rl_mw->r.fmr->list, &l); 144 list_add(&seg1->rl_mw->r.fmr->list, &l);
142 rc = ib_unmap_fmr(&l); 145 rc = ib_unmap_fmr(&l);
143 read_lock(&ia->ri_qplock); 146 read_lock(&ia->ri_qplock);
147 device = ia->ri_id->device;
144 while (seg1->mr_nsegs--) 148 while (seg1->mr_nsegs--)
145 rpcrdma_unmap_one(ia, seg++); 149 rpcrdma_unmap_one(device, seg++);
146 read_unlock(&ia->ri_qplock); 150 read_unlock(&ia->ri_qplock);
147 if (rc) 151 if (rc)
148 goto out_err; 152 goto out_err;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index ea59c1b435ff..0a7b9df70133 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -178,6 +178,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
178 int nsegs, bool writing) 178 int nsegs, bool writing)
179{ 179{
180 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 180 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
181 struct ib_device *device = ia->ri_id->device;
182 enum dma_data_direction direction = rpcrdma_data_dir(writing);
181 struct rpcrdma_mr_seg *seg1 = seg; 183 struct rpcrdma_mr_seg *seg1 = seg;
182 struct rpcrdma_mw *mw = seg1->rl_mw; 184 struct rpcrdma_mw *mw = seg1->rl_mw;
183 struct rpcrdma_frmr *frmr = &mw->r.frmr; 185 struct rpcrdma_frmr *frmr = &mw->r.frmr;
@@ -197,7 +199,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
197 if (nsegs > ia->ri_max_frmr_depth) 199 if (nsegs > ia->ri_max_frmr_depth)
198 nsegs = ia->ri_max_frmr_depth; 200 nsegs = ia->ri_max_frmr_depth;
199 for (page_no = i = 0; i < nsegs;) { 201 for (page_no = i = 0; i < nsegs;) {
200 rpcrdma_map_one(ia, seg, writing); 202 rpcrdma_map_one(device, seg, direction);
201 pa = seg->mr_dma; 203 pa = seg->mr_dma;
202 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { 204 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
203 frmr->fr_pgl->page_list[page_no++] = pa; 205 frmr->fr_pgl->page_list[page_no++] = pa;
@@ -247,7 +249,7 @@ out_senderr:
247 ib_update_fast_reg_key(mr, --key); 249 ib_update_fast_reg_key(mr, --key);
248 frmr->fr_state = FRMR_IS_INVALID; 250 frmr->fr_state = FRMR_IS_INVALID;
249 while (i--) 251 while (i--)
250 rpcrdma_unmap_one(ia, --seg); 252 rpcrdma_unmap_one(device, --seg);
251 return rc; 253 return rc;
252} 254}
253 255
@@ -261,6 +263,7 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
261 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 263 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
262 struct ib_send_wr invalidate_wr, *bad_wr; 264 struct ib_send_wr invalidate_wr, *bad_wr;
263 int rc, nsegs = seg->mr_nsegs; 265 int rc, nsegs = seg->mr_nsegs;
266 struct ib_device *device;
264 267
265 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; 268 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
266 269
@@ -271,8 +274,9 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
271 DECR_CQCOUNT(&r_xprt->rx_ep); 274 DECR_CQCOUNT(&r_xprt->rx_ep);
272 275
273 read_lock(&ia->ri_qplock); 276 read_lock(&ia->ri_qplock);
277 device = ia->ri_id->device;
274 while (seg1->mr_nsegs--) 278 while (seg1->mr_nsegs--)
275 rpcrdma_unmap_one(ia, seg++); 279 rpcrdma_unmap_one(device, seg++);
276 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); 280 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
277 read_unlock(&ia->ri_qplock); 281 read_unlock(&ia->ri_qplock);
278 if (rc) 282 if (rc)
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 0ba130bed1fc..ba518af16787 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -50,7 +50,8 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
50{ 50{
51 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 51 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
52 52
53 rpcrdma_map_one(ia, seg, writing); 53 rpcrdma_map_one(ia->ri_id->device, seg,
54 rpcrdma_data_dir(writing));
54 seg->mr_rkey = ia->ri_bind_mem->rkey; 55 seg->mr_rkey = ia->ri_bind_mem->rkey;
55 seg->mr_base = seg->mr_dma; 56 seg->mr_base = seg->mr_dma;
56 seg->mr_nsegs = 1; 57 seg->mr_nsegs = 1;
@@ -62,7 +63,12 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
62static int 63static int
63physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) 64physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
64{ 65{
65 rpcrdma_unmap_one(&r_xprt->rx_ia, seg); 66 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
67
68 read_lock(&ia->ri_qplock);
69 rpcrdma_unmap_one(ia->ri_id->device, seg);
70 read_unlock(&ia->ri_qplock);
71
66 return 1; 72 return 1;
67} 73}
68 74
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index cac06f290a26..4870d272e006 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1436,6 +1436,14 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1436 * Wrappers for internal-use kmalloc memory registration, used by buffer code. 1436 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1437 */ 1437 */
1438 1438
1439void
1440rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1441{
1442 dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
1443 seg->mr_offset,
1444 (unsigned long long)seg->mr_dma, seg->mr_dmalen);
1445}
1446
1439static int 1447static int
1440rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, 1448rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1441 struct ib_mr **mrp, struct ib_sge *iov) 1449 struct ib_mr **mrp, struct ib_sge *iov)
@@ -1561,42 +1569,6 @@ rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1561} 1569}
1562 1570
1563/* 1571/*
1564 * Wrappers for chunk registration, shared by read/write chunk code.
1565 */
1566
1567void
1568rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, bool writing)
1569{
1570 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1571 seg->mr_dmalen = seg->mr_len;
1572 if (seg->mr_page)
1573 seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
1574 seg->mr_page, offset_in_page(seg->mr_offset),
1575 seg->mr_dmalen, seg->mr_dir);
1576 else
1577 seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
1578 seg->mr_offset,
1579 seg->mr_dmalen, seg->mr_dir);
1580 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
1581 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1582 __func__,
1583 (unsigned long long)seg->mr_dma,
1584 seg->mr_offset, seg->mr_dmalen);
1585 }
1586}
1587
1588void
1589rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
1590{
1591 if (seg->mr_page)
1592 ib_dma_unmap_page(ia->ri_id->device,
1593 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1594 else
1595 ib_dma_unmap_single(ia->ri_id->device,
1596 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1597}
1598
1599/*
1600 * Prepost any receive buffer, then post send. 1572 * Prepost any receive buffer, then post send.
1601 * 1573 *
1602 * Receive buffer is donated to hardware, reclaimed upon recv completion. 1574 * Receive buffer is donated to hardware, reclaimed upon recv completion.
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 54bcbe47075d..78e0b8beaa36 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -424,8 +424,49 @@ void rpcrdma_free_regbuf(struct rpcrdma_ia *,
424 struct rpcrdma_regbuf *); 424 struct rpcrdma_regbuf *);
425 425
426unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *); 426unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
427void rpcrdma_map_one(struct rpcrdma_ia *, struct rpcrdma_mr_seg *, bool); 427
428void rpcrdma_unmap_one(struct rpcrdma_ia *, struct rpcrdma_mr_seg *); 428/*
429 * Wrappers for chunk registration, shared by read/write chunk code.
430 */
431
432void rpcrdma_mapping_error(struct rpcrdma_mr_seg *);
433
434static inline enum dma_data_direction
435rpcrdma_data_dir(bool writing)
436{
437 return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
438}
439
440static inline void
441rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg,
442 enum dma_data_direction direction)
443{
444 seg->mr_dir = direction;
445 seg->mr_dmalen = seg->mr_len;
446
447 if (seg->mr_page)
448 seg->mr_dma = ib_dma_map_page(device,
449 seg->mr_page, offset_in_page(seg->mr_offset),
450 seg->mr_dmalen, seg->mr_dir);
451 else
452 seg->mr_dma = ib_dma_map_single(device,
453 seg->mr_offset,
454 seg->mr_dmalen, seg->mr_dir);
455
456 if (ib_dma_mapping_error(device, seg->mr_dma))
457 rpcrdma_mapping_error(seg);
458}
459
460static inline void
461rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg)
462{
463 if (seg->mr_page)
464 ib_dma_unmap_page(device,
465 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
466 else
467 ib_dma_unmap_single(device,
468 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
469}
429 470
430/* 471/*
431 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c 472 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c