summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-12-14 20:57:47 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2018-01-16 11:19:50 -0500
commitce5b3717828356ce2c61e5a2a830df970fc90fb9 (patch)
tree47ddc073a57349ca01c162dbed957062d802e217 /net/sunrpc
parent30b5416bf0fd3bfef55543343ad1e85d32e32de4 (diff)
xprtrdma: Replace all usage of "frmr" with "frwr"
Clean up: Over time, the industry has adopted the term "frwr" instead of "frmr". The term "frwr" is now more widely recognized. For the past couple of years I've attempted to add new code using "frwr" , but there still remains plenty of older code that still uses "frmr". Replace all usage of "frmr" to avoid confusion. While we're churning code, rename variables unhelpfully called "f" to "frwr", to improve code clarity. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c176
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h18
4 files changed, 99 insertions, 99 deletions
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index e1f73037b554..185eb69e5fb5 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -1,11 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (c) 2015 Oracle. All rights reserved. 3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */ 5 */
6 6
7/* Lightweight memory registration using Fast Registration Work 7/* Lightweight memory registration using Fast Registration Work
8 * Requests (FRWR). Also referred to sometimes as FRMR mode. 8 * Requests (FRWR).
9 * 9 *
10 * FRWR features ordered asynchronous registration and deregistration 10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest 11 * of arbitrarily sized memory regions. This is the fastest and safest
@@ -15,9 +15,9 @@
15/* Normal operation 15/* Normal operation
16 * 16 *
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG 17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frmr_op_map). When the RDMA operation is finished, this 18 * Work Request (frwr_op_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request 19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frmr_op_unmap). 20 * (frwr_op_unmap).
21 * 21 *
22 * Typically these Work Requests are not signaled, and neither are RDMA 22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to 23 * SEND Work Requests (with the exception of signaling occasionally to
@@ -98,12 +98,12 @@ out_not_supported:
98static int 98static int
99frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 99frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
100{ 100{
101 unsigned int depth = ia->ri_max_frmr_depth; 101 unsigned int depth = ia->ri_max_frwr_depth;
102 struct rpcrdma_frmr *f = &r->frmr; 102 struct rpcrdma_frwr *frwr = &r->frwr;
103 int rc; 103 int rc;
104 104
105 f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); 105 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
106 if (IS_ERR(f->fr_mr)) 106 if (IS_ERR(frwr->fr_mr))
107 goto out_mr_err; 107 goto out_mr_err;
108 108
109 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL); 109 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
@@ -111,11 +111,11 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
111 goto out_list_err; 111 goto out_list_err;
112 112
113 sg_init_table(r->mw_sg, depth); 113 sg_init_table(r->mw_sg, depth);
114 init_completion(&f->fr_linv_done); 114 init_completion(&frwr->fr_linv_done);
115 return 0; 115 return 0;
116 116
117out_mr_err: 117out_mr_err:
118 rc = PTR_ERR(f->fr_mr); 118 rc = PTR_ERR(frwr->fr_mr);
119 dprintk("RPC: %s: ib_alloc_mr status %i\n", 119 dprintk("RPC: %s: ib_alloc_mr status %i\n",
120 __func__, rc); 120 __func__, rc);
121 return rc; 121 return rc;
@@ -124,7 +124,7 @@ out_list_err:
124 rc = -ENOMEM; 124 rc = -ENOMEM;
125 dprintk("RPC: %s: sg allocation failure\n", 125 dprintk("RPC: %s: sg allocation failure\n",
126 __func__); 126 __func__);
127 ib_dereg_mr(f->fr_mr); 127 ib_dereg_mr(frwr->fr_mr);
128 return rc; 128 return rc;
129} 129}
130 130
@@ -137,7 +137,7 @@ frwr_op_release_mr(struct rpcrdma_mw *r)
137 if (!list_empty(&r->mw_list)) 137 if (!list_empty(&r->mw_list))
138 list_del(&r->mw_list); 138 list_del(&r->mw_list);
139 139
140 rc = ib_dereg_mr(r->frmr.fr_mr); 140 rc = ib_dereg_mr(r->frwr.fr_mr);
141 if (rc) 141 if (rc)
142 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", 142 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
143 r, rc); 143 r, rc);
@@ -148,41 +148,41 @@ frwr_op_release_mr(struct rpcrdma_mw *r)
148static int 148static int
149__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 149__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
150{ 150{
151 struct rpcrdma_frmr *f = &r->frmr; 151 struct rpcrdma_frwr *frwr = &r->frwr;
152 int rc; 152 int rc;
153 153
154 rc = ib_dereg_mr(f->fr_mr); 154 rc = ib_dereg_mr(frwr->fr_mr);
155 if (rc) { 155 if (rc) {
156 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n", 156 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
157 rc, r); 157 rc, r);
158 return rc; 158 return rc;
159 } 159 }
160 160
161 f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, 161 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
162 ia->ri_max_frmr_depth); 162 ia->ri_max_frwr_depth);
163 if (IS_ERR(f->fr_mr)) { 163 if (IS_ERR(frwr->fr_mr)) {
164 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n", 164 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
165 PTR_ERR(f->fr_mr), r); 165 PTR_ERR(frwr->fr_mr), r);
166 return PTR_ERR(f->fr_mr); 166 return PTR_ERR(frwr->fr_mr);
167 } 167 }
168 168
169 dprintk("RPC: %s: recovered FRMR %p\n", __func__, f); 169 dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
170 f->fr_state = FRMR_IS_INVALID; 170 frwr->fr_state = FRWR_IS_INVALID;
171 return 0; 171 return 0;
172} 172}
173 173
174/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR. 174/* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
175 */ 175 */
176static void 176static void
177frwr_op_recover_mr(struct rpcrdma_mw *mw) 177frwr_op_recover_mr(struct rpcrdma_mw *mw)
178{ 178{
179 enum rpcrdma_frmr_state state = mw->frmr.fr_state; 179 enum rpcrdma_frwr_state state = mw->frwr.fr_state;
180 struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 180 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
181 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 181 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
182 int rc; 182 int rc;
183 183
184 rc = __frwr_reset_mr(ia, mw); 184 rc = __frwr_reset_mr(ia, mw);
185 if (state != FRMR_FLUSHED_LI) 185 if (state != FRWR_FLUSHED_LI)
186 ib_dma_unmap_sg(ia->ri_device, 186 ib_dma_unmap_sg(ia->ri_device,
187 mw->mw_sg, mw->mw_nents, mw->mw_dir); 187 mw->mw_sg, mw->mw_nents, mw->mw_dir);
188 if (rc) 188 if (rc)
@@ -193,7 +193,7 @@ frwr_op_recover_mr(struct rpcrdma_mw *mw)
193 return; 193 return;
194 194
195out_release: 195out_release:
196 pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw); 196 pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mw);
197 r_xprt->rx_stats.mrs_orphaned++; 197 r_xprt->rx_stats.mrs_orphaned++;
198 198
199 spin_lock(&r_xprt->rx_buf.rb_mwlock); 199 spin_lock(&r_xprt->rx_buf.rb_mwlock);
@@ -214,31 +214,31 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
214 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 214 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
215 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; 215 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
216 216
217 ia->ri_max_frmr_depth = 217 ia->ri_max_frwr_depth =
218 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 218 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
219 attrs->max_fast_reg_page_list_len); 219 attrs->max_fast_reg_page_list_len);
220 dprintk("RPC: %s: device's max FR page list len = %u\n", 220 dprintk("RPC: %s: device's max FR page list len = %u\n",
221 __func__, ia->ri_max_frmr_depth); 221 __func__, ia->ri_max_frwr_depth);
222 222
223 /* Add room for frmr register and invalidate WRs. 223 /* Add room for frwr register and invalidate WRs.
224 * 1. FRMR reg WR for head 224 * 1. FRWR reg WR for head
225 * 2. FRMR invalidate WR for head 225 * 2. FRWR invalidate WR for head
226 * 3. N FRMR reg WRs for pagelist 226 * 3. N FRWR reg WRs for pagelist
227 * 4. N FRMR invalidate WRs for pagelist 227 * 4. N FRWR invalidate WRs for pagelist
228 * 5. FRMR reg WR for tail 228 * 5. FRWR reg WR for tail
229 * 6. FRMR invalidate WR for tail 229 * 6. FRWR invalidate WR for tail
230 * 7. The RDMA_SEND WR 230 * 7. The RDMA_SEND WR
231 */ 231 */
232 depth = 7; 232 depth = 7;
233 233
234 /* Calculate N if the device max FRMR depth is smaller than 234 /* Calculate N if the device max FRWR depth is smaller than
235 * RPCRDMA_MAX_DATA_SEGS. 235 * RPCRDMA_MAX_DATA_SEGS.
236 */ 236 */
237 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { 237 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
238 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; 238 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
239 do { 239 do {
240 depth += 2; /* FRMR reg + invalidate */ 240 depth += 2; /* FRWR reg + invalidate */
241 delta -= ia->ri_max_frmr_depth; 241 delta -= ia->ri_max_frwr_depth;
242 } while (delta > 0); 242 } while (delta > 0);
243 } 243 }
244 244
@@ -252,7 +252,7 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
252 } 252 }
253 253
254 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / 254 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
255 ia->ri_max_frmr_depth); 255 ia->ri_max_frwr_depth);
256 return 0; 256 return 0;
257} 257}
258 258
@@ -265,7 +265,7 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
265 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 265 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
266 266
267 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 267 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
268 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth); 268 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
269} 269}
270 270
271static void 271static void
@@ -286,14 +286,14 @@ __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
286static void 286static void
287frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) 287frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
288{ 288{
289 struct rpcrdma_frmr *frmr; 289 struct rpcrdma_frwr *frwr;
290 struct ib_cqe *cqe; 290 struct ib_cqe *cqe;
291 291
292 /* WARNING: Only wr_cqe and status are reliable at this point */ 292 /* WARNING: Only wr_cqe and status are reliable at this point */
293 if (wc->status != IB_WC_SUCCESS) { 293 if (wc->status != IB_WC_SUCCESS) {
294 cqe = wc->wr_cqe; 294 cqe = wc->wr_cqe;
295 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 295 frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe);
296 frmr->fr_state = FRMR_FLUSHED_FR; 296 frwr->fr_state = FRWR_FLUSHED_FR;
297 __frwr_sendcompletion_flush(wc, "fastreg"); 297 __frwr_sendcompletion_flush(wc, "fastreg");
298 } 298 }
299} 299}
@@ -307,14 +307,14 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
307static void 307static void
308frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) 308frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
309{ 309{
310 struct rpcrdma_frmr *frmr; 310 struct rpcrdma_frwr *frwr;
311 struct ib_cqe *cqe; 311 struct ib_cqe *cqe;
312 312
313 /* WARNING: Only wr_cqe and status are reliable at this point */ 313 /* WARNING: Only wr_cqe and status are reliable at this point */
314 if (wc->status != IB_WC_SUCCESS) { 314 if (wc->status != IB_WC_SUCCESS) {
315 cqe = wc->wr_cqe; 315 cqe = wc->wr_cqe;
316 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 316 frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe);
317 frmr->fr_state = FRMR_FLUSHED_LI; 317 frwr->fr_state = FRWR_FLUSHED_LI;
318 __frwr_sendcompletion_flush(wc, "localinv"); 318 __frwr_sendcompletion_flush(wc, "localinv");
319 } 319 }
320} 320}
@@ -329,17 +329,17 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
329static void 329static void
330frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) 330frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
331{ 331{
332 struct rpcrdma_frmr *frmr; 332 struct rpcrdma_frwr *frwr;
333 struct ib_cqe *cqe; 333 struct ib_cqe *cqe;
334 334
335 /* WARNING: Only wr_cqe and status are reliable at this point */ 335 /* WARNING: Only wr_cqe and status are reliable at this point */
336 cqe = wc->wr_cqe; 336 cqe = wc->wr_cqe;
337 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 337 frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe);
338 if (wc->status != IB_WC_SUCCESS) { 338 if (wc->status != IB_WC_SUCCESS) {
339 frmr->fr_state = FRMR_FLUSHED_LI; 339 frwr->fr_state = FRWR_FLUSHED_LI;
340 __frwr_sendcompletion_flush(wc, "localinv"); 340 __frwr_sendcompletion_flush(wc, "localinv");
341 } 341 }
342 complete(&frmr->fr_linv_done); 342 complete(&frwr->fr_linv_done);
343} 343}
344 344
345/* Post a REG_MR Work Request to register a memory region 345/* Post a REG_MR Work Request to register a memory region
@@ -351,8 +351,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
351{ 351{
352 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 352 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
353 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; 353 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
354 struct rpcrdma_frwr *frwr;
354 struct rpcrdma_mw *mw; 355 struct rpcrdma_mw *mw;
355 struct rpcrdma_frmr *frmr;
356 struct ib_mr *mr; 356 struct ib_mr *mr;
357 struct ib_reg_wr *reg_wr; 357 struct ib_reg_wr *reg_wr;
358 struct ib_send_wr *bad_wr; 358 struct ib_send_wr *bad_wr;
@@ -366,14 +366,13 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
366 mw = rpcrdma_get_mw(r_xprt); 366 mw = rpcrdma_get_mw(r_xprt);
367 if (!mw) 367 if (!mw)
368 return ERR_PTR(-ENOBUFS); 368 return ERR_PTR(-ENOBUFS);
369 } while (mw->frmr.fr_state != FRMR_IS_INVALID); 369 } while (mw->frwr.fr_state != FRWR_IS_INVALID);
370 frmr = &mw->frmr; 370 frwr = &mw->frwr;
371 frmr->fr_state = FRMR_IS_VALID; 371 frwr->fr_state = FRWR_IS_VALID;
372 mr = frmr->fr_mr; 372 mr = frwr->fr_mr;
373 reg_wr = &frmr->fr_regwr; 373
374 374 if (nsegs > ia->ri_max_frwr_depth)
375 if (nsegs > ia->ri_max_frmr_depth) 375 nsegs = ia->ri_max_frwr_depth;
376 nsegs = ia->ri_max_frmr_depth;
377 for (i = 0; i < nsegs;) { 376 for (i = 0; i < nsegs;) {
378 if (seg->mr_page) 377 if (seg->mr_page)
379 sg_set_page(&mw->mw_sg[i], 378 sg_set_page(&mw->mw_sg[i],
@@ -402,16 +401,17 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
402 if (unlikely(n != mw->mw_nents)) 401 if (unlikely(n != mw->mw_nents))
403 goto out_mapmr_err; 402 goto out_mapmr_err;
404 403
405 dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n", 404 dprintk("RPC: %s: Using frwr %p to map %u segments (%llu bytes)\n",
406 __func__, frmr, mw->mw_nents, mr->length); 405 __func__, frwr, mw->mw_nents, mr->length);
407 406
408 key = (u8)(mr->rkey & 0x000000FF); 407 key = (u8)(mr->rkey & 0x000000FF);
409 ib_update_fast_reg_key(mr, ++key); 408 ib_update_fast_reg_key(mr, ++key);
410 409
410 reg_wr = &frwr->fr_regwr;
411 reg_wr->wr.next = NULL; 411 reg_wr->wr.next = NULL;
412 reg_wr->wr.opcode = IB_WR_REG_MR; 412 reg_wr->wr.opcode = IB_WR_REG_MR;
413 frmr->fr_cqe.done = frwr_wc_fastreg; 413 frwr->fr_cqe.done = frwr_wc_fastreg;
414 reg_wr->wr.wr_cqe = &frmr->fr_cqe; 414 reg_wr->wr.wr_cqe = &frwr->fr_cqe;
415 reg_wr->wr.num_sge = 0; 415 reg_wr->wr.num_sge = 0;
416 reg_wr->wr.send_flags = 0; 416 reg_wr->wr.send_flags = 0;
417 reg_wr->mr = mr; 417 reg_wr->mr = mr;
@@ -434,18 +434,18 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
434out_dmamap_err: 434out_dmamap_err:
435 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", 435 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
436 mw->mw_sg, i); 436 mw->mw_sg, i);
437 frmr->fr_state = FRMR_IS_INVALID; 437 frwr->fr_state = FRWR_IS_INVALID;
438 rpcrdma_put_mw(r_xprt, mw); 438 rpcrdma_put_mw(r_xprt, mw);
439 return ERR_PTR(-EIO); 439 return ERR_PTR(-EIO);
440 440
441out_mapmr_err: 441out_mapmr_err:
442 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", 442 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
443 frmr->fr_mr, n, mw->mw_nents); 443 frwr->fr_mr, n, mw->mw_nents);
444 rpcrdma_defer_mr_recovery(mw); 444 rpcrdma_defer_mr_recovery(mw);
445 return ERR_PTR(-EIO); 445 return ERR_PTR(-EIO);
446 446
447out_senderr: 447out_senderr:
448 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc); 448 pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc);
449 rpcrdma_defer_mr_recovery(mw); 449 rpcrdma_defer_mr_recovery(mw);
450 return ERR_PTR(-ENOTCONN); 450 return ERR_PTR(-ENOTCONN);
451} 451}
@@ -462,7 +462,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mws)
462 struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 462 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
463 463
464 list_del(&mw->mw_list); 464 list_del(&mw->mw_list);
465 mw->frmr.fr_state = FRMR_IS_INVALID; 465 mw->frwr.fr_state = FRWR_IS_INVALID;
466 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 466 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
467 mw->mw_sg, mw->mw_nents, mw->mw_dir); 467 mw->mw_sg, mw->mw_nents, mw->mw_dir);
468 rpcrdma_put_mw(r_xprt, mw); 468 rpcrdma_put_mw(r_xprt, mw);
@@ -483,7 +483,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
483{ 483{
484 struct ib_send_wr *first, **prev, *last, *bad_wr; 484 struct ib_send_wr *first, **prev, *last, *bad_wr;
485 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 485 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
486 struct rpcrdma_frmr *f; 486 struct rpcrdma_frwr *frwr;
487 struct rpcrdma_mw *mw; 487 struct rpcrdma_mw *mw;
488 int count, rc; 488 int count, rc;
489 489
@@ -492,20 +492,20 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
492 * Chain the LOCAL_INV Work Requests and post them with 492 * Chain the LOCAL_INV Work Requests and post them with
493 * a single ib_post_send() call. 493 * a single ib_post_send() call.
494 */ 494 */
495 f = NULL; 495 frwr = NULL;
496 count = 0; 496 count = 0;
497 prev = &first; 497 prev = &first;
498 list_for_each_entry(mw, mws, mw_list) { 498 list_for_each_entry(mw, mws, mw_list) {
499 mw->frmr.fr_state = FRMR_IS_INVALID; 499 mw->frwr.fr_state = FRWR_IS_INVALID;
500 500
501 f = &mw->frmr; 501 frwr = &mw->frwr;
502 dprintk("RPC: %s: invalidating frmr %p\n", 502 dprintk("RPC: %s: invalidating frwr %p\n",
503 __func__, f); 503 __func__, frwr);
504 504
505 f->fr_cqe.done = frwr_wc_localinv; 505 frwr->fr_cqe.done = frwr_wc_localinv;
506 last = &f->fr_invwr; 506 last = &frwr->fr_invwr;
507 memset(last, 0, sizeof(*last)); 507 memset(last, 0, sizeof(*last));
508 last->wr_cqe = &f->fr_cqe; 508 last->wr_cqe = &frwr->fr_cqe;
509 last->opcode = IB_WR_LOCAL_INV; 509 last->opcode = IB_WR_LOCAL_INV;
510 last->ex.invalidate_rkey = mw->mw_handle; 510 last->ex.invalidate_rkey = mw->mw_handle;
511 count++; 511 count++;
@@ -513,7 +513,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
513 *prev = last; 513 *prev = last;
514 prev = &last->next; 514 prev = &last->next;
515 } 515 }
516 if (!f) 516 if (!frwr)
517 goto unmap; 517 goto unmap;
518 518
519 /* Strong send queue ordering guarantees that when the 519 /* Strong send queue ordering guarantees that when the
@@ -521,8 +521,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
521 * are complete. 521 * are complete.
522 */ 522 */
523 last->send_flags = IB_SEND_SIGNALED; 523 last->send_flags = IB_SEND_SIGNALED;
524 f->fr_cqe.done = frwr_wc_localinv_wake; 524 frwr->fr_cqe.done = frwr_wc_localinv_wake;
525 reinit_completion(&f->fr_linv_done); 525 reinit_completion(&frwr->fr_linv_done);
526 526
527 /* Transport disconnect drains the receive CQ before it 527 /* Transport disconnect drains the receive CQ before it
528 * replaces the QP. The RPC reply handler won't call us 528 * replaces the QP. The RPC reply handler won't call us
@@ -532,7 +532,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
532 bad_wr = NULL; 532 bad_wr = NULL;
533 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); 533 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
534 if (bad_wr != first) 534 if (bad_wr != first)
535 wait_for_completion(&f->fr_linv_done); 535 wait_for_completion(&frwr->fr_linv_done);
536 if (rc) 536 if (rc)
537 goto reset_mrs; 537 goto reset_mrs;
538 538
@@ -542,8 +542,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
542unmap: 542unmap:
543 while (!list_empty(mws)) { 543 while (!list_empty(mws)) {
544 mw = rpcrdma_pop_mw(mws); 544 mw = rpcrdma_pop_mw(mws);
545 dprintk("RPC: %s: DMA unmapping frmr %p\n", 545 dprintk("RPC: %s: DMA unmapping frwr %p\n",
546 __func__, &mw->frmr); 546 __func__, &mw->frwr);
547 ib_dma_unmap_sg(ia->ri_device, 547 ib_dma_unmap_sg(ia->ri_device,
548 mw->mw_sg, mw->mw_nents, mw->mw_dir); 548 mw->mw_sg, mw->mw_nents, mw->mw_dir);
549 rpcrdma_put_mw(r_xprt, mw); 549 rpcrdma_put_mw(r_xprt, mw);
@@ -551,15 +551,15 @@ unmap:
551 return; 551 return;
552 552
553reset_mrs: 553reset_mrs:
554 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc); 554 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
555 555
556 /* Find and reset the MRs in the LOCAL_INV WRs that did not 556 /* Find and reset the MRs in the LOCAL_INV WRs that did not
557 * get posted. 557 * get posted.
558 */ 558 */
559 while (bad_wr) { 559 while (bad_wr) {
560 f = container_of(bad_wr, struct rpcrdma_frmr, 560 frwr = container_of(bad_wr, struct rpcrdma_frwr,
561 fr_invwr); 561 fr_invwr);
562 mw = container_of(f, struct rpcrdma_mw, frmr); 562 mw = container_of(frwr, struct rpcrdma_mw, frwr);
563 563
564 __frwr_reset_mr(ia, mw); 564 __frwr_reset_mr(ia, mw);
565 565
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index be8c4e62d3f2..ddf0d87812ef 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -67,7 +67,7 @@
67static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 67static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
68unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 68unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
69static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 69static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
70unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; 70unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
71int xprt_rdma_pad_optimize; 71int xprt_rdma_pad_optimize;
72 72
73#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 73#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index d6c737d4c36b..840579919ad0 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -388,7 +388,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
388 } 388 }
389 389
390 switch (xprt_rdma_memreg_strategy) { 390 switch (xprt_rdma_memreg_strategy) {
391 case RPCRDMA_FRMR: 391 case RPCRDMA_FRWR:
392 if (frwr_is_supported(ia)) { 392 if (frwr_is_supported(ia)) {
393 ia->ri_ops = &rpcrdma_frwr_memreg_ops; 393 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
394 break; 394 break;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index e084130d3d84..f52269afaa09 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -73,7 +73,7 @@ struct rpcrdma_ia {
73 struct completion ri_remove_done; 73 struct completion ri_remove_done;
74 int ri_async_rc; 74 int ri_async_rc;
75 unsigned int ri_max_segs; 75 unsigned int ri_max_segs;
76 unsigned int ri_max_frmr_depth; 76 unsigned int ri_max_frwr_depth;
77 unsigned int ri_max_inline_write; 77 unsigned int ri_max_inline_write;
78 unsigned int ri_max_inline_read; 78 unsigned int ri_max_inline_read;
79 unsigned int ri_max_send_sges; 79 unsigned int ri_max_send_sges;
@@ -242,17 +242,17 @@ enum {
242 * rpcrdma_deregister_external() uses this metadata to unmap and 242 * rpcrdma_deregister_external() uses this metadata to unmap and
243 * release these resources when an RPC is complete. 243 * release these resources when an RPC is complete.
244 */ 244 */
245enum rpcrdma_frmr_state { 245enum rpcrdma_frwr_state {
246 FRMR_IS_INVALID, /* ready to be used */ 246 FRWR_IS_INVALID, /* ready to be used */
247 FRMR_IS_VALID, /* in use */ 247 FRWR_IS_VALID, /* in use */
248 FRMR_FLUSHED_FR, /* flushed FASTREG WR */ 248 FRWR_FLUSHED_FR, /* flushed FASTREG WR */
249 FRMR_FLUSHED_LI, /* flushed LOCALINV WR */ 249 FRWR_FLUSHED_LI, /* flushed LOCALINV WR */
250}; 250};
251 251
252struct rpcrdma_frmr { 252struct rpcrdma_frwr {
253 struct ib_mr *fr_mr; 253 struct ib_mr *fr_mr;
254 struct ib_cqe fr_cqe; 254 struct ib_cqe fr_cqe;
255 enum rpcrdma_frmr_state fr_state; 255 enum rpcrdma_frwr_state fr_state;
256 struct completion fr_linv_done; 256 struct completion fr_linv_done;
257 union { 257 union {
258 struct ib_reg_wr fr_regwr; 258 struct ib_reg_wr fr_regwr;
@@ -272,7 +272,7 @@ struct rpcrdma_mw {
272 enum dma_data_direction mw_dir; 272 enum dma_data_direction mw_dir;
273 union { 273 union {
274 struct rpcrdma_fmr fmr; 274 struct rpcrdma_fmr fmr;
275 struct rpcrdma_frmr frmr; 275 struct rpcrdma_frwr frwr;
276 }; 276 };
277 struct rpcrdma_xprt *mw_xprt; 277 struct rpcrdma_xprt *mw_xprt;
278 u32 mw_handle; 278 u32 mw_handle;