aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2016-06-29 13:52:29 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2016-07-11 15:50:43 -0400
commitd48b1d295079f5e45b5c38683b7be353af1b2bda (patch)
treed9c696c1ae0ad03ca621fbe0f17bc1b88908296a
parent564471d2f2f1ddaf02119b8759813666db93abba (diff)
xprtrdma: Move init and release helpers
Clean up: Moving these helpers in a separate patch makes later patches more readable. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c118
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c90
2 files changed, 119 insertions, 89 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index c748ff6f6877..d52458496bc7 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -35,6 +35,12 @@
35/* Maximum scatter/gather per FMR */ 35/* Maximum scatter/gather per FMR */
36#define RPCRDMA_MAX_FMR_SGES (64) 36#define RPCRDMA_MAX_FMR_SGES (64)
37 37
38/* Access mode of externally registered pages */
39enum {
40 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
41 IB_ACCESS_REMOTE_READ,
42};
43
38static struct workqueue_struct *fmr_recovery_wq; 44static struct workqueue_struct *fmr_recovery_wq;
39 45
40#define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND) 46#define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND)
@@ -60,6 +66,44 @@ fmr_destroy_recovery_wq(void)
60} 66}
61 67
62static int 68static int
69__fmr_init(struct rpcrdma_mw *mw, struct ib_pd *pd)
70{
71 static struct ib_fmr_attr fmr_attr = {
72 .max_pages = RPCRDMA_MAX_FMR_SGES,
73 .max_maps = 1,
74 .page_shift = PAGE_SHIFT
75 };
76
77 mw->fmr.physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
78 sizeof(u64), GFP_KERNEL);
79 if (!mw->fmr.physaddrs)
80 goto out_free;
81
82 mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
83 sizeof(*mw->mw_sg), GFP_KERNEL);
84 if (!mw->mw_sg)
85 goto out_free;
86
87 sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
88
89 mw->fmr.fmr = ib_alloc_fmr(pd, RPCRDMA_FMR_ACCESS_FLAGS,
90 &fmr_attr);
91 if (IS_ERR(mw->fmr.fmr))
92 goto out_fmr_err;
93
94 return 0;
95
96out_fmr_err:
97 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
98 PTR_ERR(mw->fmr.fmr));
99
100out_free:
101 kfree(mw->mw_sg);
102 kfree(mw->fmr.physaddrs);
103 return -ENOMEM;
104}
105
106static int
63__fmr_unmap(struct rpcrdma_mw *mw) 107__fmr_unmap(struct rpcrdma_mw *mw)
64{ 108{
65 LIST_HEAD(l); 109 LIST_HEAD(l);
@@ -71,6 +115,30 @@ __fmr_unmap(struct rpcrdma_mw *mw)
71 return rc; 115 return rc;
72} 116}
73 117
118static void
119__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
120{
121 struct ib_device *device = r_xprt->rx_ia.ri_device;
122 int nsegs = seg->mr_nsegs;
123
124 while (nsegs--)
125 rpcrdma_unmap_one(device, seg++);
126}
127
128static void
129__fmr_release(struct rpcrdma_mw *r)
130{
131 int rc;
132
133 kfree(r->fmr.physaddrs);
134 kfree(r->mw_sg);
135
136 rc = ib_dealloc_fmr(r->fmr.fmr);
137 if (rc)
138 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
139 r, rc);
140}
141
74/* Deferred reset of a single FMR. Generate a fresh rkey by 142/* Deferred reset of a single FMR. Generate a fresh rkey by
75 * replacing the MR. There's no recovery if this fails. 143 * replacing the MR. There's no recovery if this fails.
76 */ 144 */
@@ -119,12 +187,6 @@ static int
119fmr_op_init(struct rpcrdma_xprt *r_xprt) 187fmr_op_init(struct rpcrdma_xprt *r_xprt)
120{ 188{
121 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 189 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
122 int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
123 struct ib_fmr_attr fmr_attr = {
124 .max_pages = RPCRDMA_MAX_FMR_SGES,
125 .max_maps = 1,
126 .page_shift = PAGE_SHIFT
127 };
128 struct ib_pd *pd = r_xprt->rx_ia.ri_pd; 190 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
129 struct rpcrdma_mw *r; 191 struct rpcrdma_mw *r;
130 int i, rc; 192 int i, rc;
@@ -138,35 +200,22 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
138 i *= buf->rb_max_requests; /* one set for each RPC slot */ 200 i *= buf->rb_max_requests; /* one set for each RPC slot */
139 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); 201 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
140 202
141 rc = -ENOMEM;
142 while (i--) { 203 while (i--) {
143 r = kzalloc(sizeof(*r), GFP_KERNEL); 204 r = kzalloc(sizeof(*r), GFP_KERNEL);
144 if (!r) 205 if (!r)
145 goto out; 206 return -ENOMEM;
146
147 r->fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
148 sizeof(u64), GFP_KERNEL);
149 if (!r->fmr.physaddrs)
150 goto out_free;
151 207
152 r->fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); 208 rc = __fmr_init(r, pd);
153 if (IS_ERR(r->fmr.fmr)) 209 if (rc) {
154 goto out_fmr_err; 210 kfree(r);
211 return rc;
212 }
155 213
156 r->mw_xprt = r_xprt; 214 r->mw_xprt = r_xprt;
157 list_add(&r->mw_list, &buf->rb_mws); 215 list_add(&r->mw_list, &buf->rb_mws);
158 list_add(&r->mw_all, &buf->rb_all); 216 list_add(&r->mw_all, &buf->rb_all);
159 } 217 }
160 return 0; 218 return 0;
161
162out_fmr_err:
163 rc = PTR_ERR(r->fmr.fmr);
164 dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc);
165 kfree(r->fmr.physaddrs);
166out_free:
167 kfree(r);
168out:
169 return rc;
170} 219}
171 220
172/* Use the ib_map_phys_fmr() verb to register a memory region 221/* Use the ib_map_phys_fmr() verb to register a memory region
@@ -235,16 +284,6 @@ out_maperr:
235 return rc; 284 return rc;
236} 285}
237 286
238static void
239__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
240{
241 struct ib_device *device = r_xprt->rx_ia.ri_device;
242 int nsegs = seg->mr_nsegs;
243
244 while (nsegs--)
245 rpcrdma_unmap_one(device, seg++);
246}
247
248/* Invalidate all memory regions that were registered for "req". 287/* Invalidate all memory regions that were registered for "req".
249 * 288 *
250 * Sleeps until it is safe for the host CPU to access the 289 * Sleeps until it is safe for the host CPU to access the
@@ -337,18 +376,11 @@ static void
337fmr_op_destroy(struct rpcrdma_buffer *buf) 376fmr_op_destroy(struct rpcrdma_buffer *buf)
338{ 377{
339 struct rpcrdma_mw *r; 378 struct rpcrdma_mw *r;
340 int rc;
341 379
342 while (!list_empty(&buf->rb_all)) { 380 while (!list_empty(&buf->rb_all)) {
343 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); 381 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
344 list_del(&r->mw_all); 382 list_del(&r->mw_all);
345 kfree(r->fmr.physaddrs); 383 __fmr_release(r);
346
347 rc = ib_dealloc_fmr(r->fmr.fmr);
348 if (rc)
349 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
350 __func__, rc);
351
352 kfree(r); 384 kfree(r);
353 } 385 }
354} 386}
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index f02ab80aa6ee..9cd60bf0917d 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -99,6 +99,50 @@ frwr_destroy_recovery_wq(void)
99} 99}
100 100
101static int 101static int
102__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, unsigned int depth)
103{
104 struct rpcrdma_frmr *f = &r->frmr;
105 int rc;
106
107 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
108 if (IS_ERR(f->fr_mr))
109 goto out_mr_err;
110
111 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
112 if (!r->mw_sg)
113 goto out_list_err;
114
115 sg_init_table(r->mw_sg, depth);
116 init_completion(&f->fr_linv_done);
117 return 0;
118
119out_mr_err:
120 rc = PTR_ERR(f->fr_mr);
121 dprintk("RPC: %s: ib_alloc_mr status %i\n",
122 __func__, rc);
123 return rc;
124
125out_list_err:
126 rc = -ENOMEM;
127 dprintk("RPC: %s: sg allocation failure\n",
128 __func__);
129 ib_dereg_mr(f->fr_mr);
130 return rc;
131}
132
133static void
134__frwr_release(struct rpcrdma_mw *r)
135{
136 int rc;
137
138 rc = ib_dereg_mr(r->frmr.fr_mr);
139 if (rc)
140 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
141 r, rc);
142 kfree(r->mw_sg);
143}
144
145static int
102__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 146__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
103{ 147{
104 struct rpcrdma_frmr *f = &r->frmr; 148 struct rpcrdma_frmr *f = &r->frmr;
@@ -165,52 +209,6 @@ __frwr_queue_recovery(struct rpcrdma_mw *r)
165} 209}
166 210
167static int 211static int
168__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, unsigned int depth)
169{
170 struct rpcrdma_frmr *f = &r->frmr;
171 int rc;
172
173 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
174 if (IS_ERR(f->fr_mr))
175 goto out_mr_err;
176
177 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
178 if (!r->mw_sg)
179 goto out_list_err;
180
181 sg_init_table(r->mw_sg, depth);
182
183 init_completion(&f->fr_linv_done);
184
185 return 0;
186
187out_mr_err:
188 rc = PTR_ERR(f->fr_mr);
189 dprintk("RPC: %s: ib_alloc_mr status %i\n",
190 __func__, rc);
191 return rc;
192
193out_list_err:
194 rc = -ENOMEM;
195 dprintk("RPC: %s: sg allocation failure\n",
196 __func__);
197 ib_dereg_mr(f->fr_mr);
198 return rc;
199}
200
201static void
202__frwr_release(struct rpcrdma_mw *r)
203{
204 int rc;
205
206 rc = ib_dereg_mr(r->frmr.fr_mr);
207 if (rc)
208 dprintk("RPC: %s: ib_dereg_mr status %i\n",
209 __func__, rc);
210 kfree(r->mw_sg);
211}
212
213static int
214frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 212frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
215 struct rpcrdma_create_data_internal *cdata) 213 struct rpcrdma_create_data_internal *cdata)
216{ 214{