aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorShlomo Pongratz <shlomop@mellanox.com>2013-07-28 05:35:37 -0400
committerRoland Dreier <roland@purestorage.com>2013-08-09 20:18:08 -0400
commit986db0d6c08125bdf50d8ffdc3b0307aa2871e3e (patch)
tree630ea8d21de9764dd39afbbcb066824ae922866f /drivers
parentf91424cf5b0f22a33dea4ccfb0780ee45517b3c8 (diff)
IB/iser: Restructure allocation/deallocation of connection resources
This is a preparation step to a patch that accepts the number of max SCSI commands to be supported a session from user space iSCSI tools. Move the allocation of the login buffer, FMR pool and its associated page vector from iser_create_ib_conn_res() (which is called prior when we actually know how many commands should be supported) to iser_alloc_rx_descriptors() (which is called during the iscsi connection bind step where this quantity is known). Also do small refactoring around the deallocation to make that path similar to the allocation one. Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c92
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c128
3 files changed, 151 insertions, 71 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index d694bcd479fe..fee8829053e0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -395,4 +395,6 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
395int iser_initialize_task_headers(struct iscsi_task *task, 395int iser_initialize_task_headers(struct iscsi_task *task,
396 struct iser_tx_desc *tx_desc); 396 struct iser_tx_desc *tx_desc);
397int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); 397int iser_alloc_rx_descriptors(struct iser_conn *ib_conn);
398int iser_create_fmr_pool(struct iser_conn *ib_conn);
399void iser_free_fmr_pool(struct iser_conn *ib_conn);
398#endif 400#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index b6d81a86c976..626d950b64a8 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -170,6 +170,76 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
170 } 170 }
171} 171}
172 172
173static void iser_free_login_buf(struct iser_conn *ib_conn)
174{
175 if (!ib_conn->login_buf)
176 return;
177
178 if (ib_conn->login_req_dma)
179 ib_dma_unmap_single(ib_conn->device->ib_device,
180 ib_conn->login_req_dma,
181 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
182
183 if (ib_conn->login_resp_dma)
184 ib_dma_unmap_single(ib_conn->device->ib_device,
185 ib_conn->login_resp_dma,
186 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
187
188 kfree(ib_conn->login_buf);
189
190 /* make sure we never redo any unmapping */
191 ib_conn->login_req_dma = 0;
192 ib_conn->login_resp_dma = 0;
193 ib_conn->login_buf = NULL;
194}
195
196static int iser_alloc_login_buf(struct iser_conn *ib_conn)
197{
198 struct iser_device *device;
199 int req_err, resp_err;
200
201 BUG_ON(ib_conn->device == NULL);
202
203 device = ib_conn->device;
204
205 ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
206 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
207 if (!ib_conn->login_buf)
208 goto out_err;
209
210 ib_conn->login_req_buf = ib_conn->login_buf;
211 ib_conn->login_resp_buf = ib_conn->login_buf +
212 ISCSI_DEF_MAX_RECV_SEG_LEN;
213
214 ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
215 (void *)ib_conn->login_req_buf,
216 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
217
218 ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
219 (void *)ib_conn->login_resp_buf,
220 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
221
222 req_err = ib_dma_mapping_error(device->ib_device,
223 ib_conn->login_req_dma);
224 resp_err = ib_dma_mapping_error(device->ib_device,
225 ib_conn->login_resp_dma);
226
227 if (req_err || resp_err) {
228 if (req_err)
229 ib_conn->login_req_dma = 0;
230 if (resp_err)
231 ib_conn->login_resp_dma = 0;
232 goto free_login_buf;
233 }
234 return 0;
235
236free_login_buf:
237 iser_free_login_buf(ib_conn);
238
239out_err:
240 iser_err("unable to alloc or map login buf\n");
241 return -ENOMEM;
242}
173 243
174int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) 244int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
175{ 245{
@@ -179,6 +249,12 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
179 struct ib_sge *rx_sg; 249 struct ib_sge *rx_sg;
180 struct iser_device *device = ib_conn->device; 250 struct iser_device *device = ib_conn->device;
181 251
252 if (iser_create_fmr_pool(ib_conn))
253 goto create_fmr_pool_failed;
254
255 if (iser_alloc_login_buf(ib_conn))
256 goto alloc_login_buf_fail;
257
182 ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS * 258 ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
183 sizeof(struct iser_rx_desc), GFP_KERNEL); 259 sizeof(struct iser_rx_desc), GFP_KERNEL);
184 if (!ib_conn->rx_descs) 260 if (!ib_conn->rx_descs)
@@ -207,10 +283,14 @@ rx_desc_dma_map_failed:
207 rx_desc = ib_conn->rx_descs; 283 rx_desc = ib_conn->rx_descs;
208 for (j = 0; j < i; j++, rx_desc++) 284 for (j = 0; j < i; j++, rx_desc++)
209 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, 285 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
210 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 286 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
211 kfree(ib_conn->rx_descs); 287 kfree(ib_conn->rx_descs);
212 ib_conn->rx_descs = NULL; 288 ib_conn->rx_descs = NULL;
213rx_desc_alloc_fail: 289rx_desc_alloc_fail:
290 iser_free_login_buf(ib_conn);
291alloc_login_buf_fail:
292 iser_free_fmr_pool(ib_conn);
293create_fmr_pool_failed:
214 iser_err("failed allocating rx descriptors / data buffers\n"); 294 iser_err("failed allocating rx descriptors / data buffers\n");
215 return -ENOMEM; 295 return -ENOMEM;
216} 296}
@@ -222,13 +302,19 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
222 struct iser_device *device = ib_conn->device; 302 struct iser_device *device = ib_conn->device;
223 303
224 if (!ib_conn->rx_descs) 304 if (!ib_conn->rx_descs)
225 return; 305 goto free_login_buf;
226 306
227 rx_desc = ib_conn->rx_descs; 307 rx_desc = ib_conn->rx_descs;
228 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) 308 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
229 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, 309 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
230 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 310 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
231 kfree(ib_conn->rx_descs); 311 kfree(ib_conn->rx_descs);
312 /* make sure we never redo any unmapping */
313 ib_conn->rx_descs = NULL;
314
315free_login_buf:
316 iser_free_login_buf(ib_conn);
317 iser_free_fmr_pool(ib_conn);
232} 318}
233 319
234static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) 320static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2c4941d0656b..b72e349790d2 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -178,56 +178,23 @@ static void iser_free_device_ib_res(struct iser_device *device)
178} 178}
179 179
180/** 180/**
181 * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP) 181 * iser_create_fmr_pool - Creates FMR pool and page_vector
182 * 182 *
183 * returns 0 on success, -1 on failure 183 * returns 0 on success, or errno code on failure
184 */ 184 */
185static int iser_create_ib_conn_res(struct iser_conn *ib_conn) 185int iser_create_fmr_pool(struct iser_conn *ib_conn)
186{ 186{
187 struct iser_device *device; 187 struct iser_device *device = ib_conn->device;
188 struct ib_qp_init_attr init_attr;
189 int req_err, resp_err, ret = -ENOMEM;
190 struct ib_fmr_pool_param params; 188 struct ib_fmr_pool_param params;
191 int index, min_index = 0; 189 int ret = -ENOMEM;
192
193 BUG_ON(ib_conn->device == NULL);
194
195 device = ib_conn->device;
196
197 ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
198 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
199 if (!ib_conn->login_buf)
200 goto out_err;
201
202 ib_conn->login_req_buf = ib_conn->login_buf;
203 ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
204
205 ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
206 (void *)ib_conn->login_req_buf,
207 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
208
209 ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
210 (void *)ib_conn->login_resp_buf,
211 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
212
213 req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
214 resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
215
216 if (req_err || resp_err) {
217 if (req_err)
218 ib_conn->login_req_dma = 0;
219 if (resp_err)
220 ib_conn->login_resp_dma = 0;
221 goto out_err;
222 }
223 190
224 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 191 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
225 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 192 (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE+1)),
226 GFP_KERNEL); 193 GFP_KERNEL);
227 if (!ib_conn->page_vec) 194 if (!ib_conn->page_vec)
228 goto out_err; 195 return ret;
229 196
230 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 197 ib_conn->page_vec->pages = (u64 *)(ib_conn->page_vec + 1);
231 198
232 params.page_shift = SHIFT_4K; 199 params.page_shift = SHIFT_4K;
233 /* when the first/last SG element are not start/end * 200 /* when the first/last SG element are not start/end *
@@ -244,15 +211,56 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
244 IB_ACCESS_REMOTE_READ); 211 IB_ACCESS_REMOTE_READ);
245 212
246 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); 213 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
214 if (!IS_ERR(ib_conn->fmr_pool))
215 return 0;
216
217 /* no FMR => no need for page_vec */
218 kfree(ib_conn->page_vec);
219 ib_conn->page_vec = NULL;
220
247 ret = PTR_ERR(ib_conn->fmr_pool); 221 ret = PTR_ERR(ib_conn->fmr_pool);
248 if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) { 222 ib_conn->fmr_pool = NULL;
249 ib_conn->fmr_pool = NULL; 223 if (ret != -ENOSYS) {
250 goto out_err; 224 iser_err("FMR allocation failed, err %d\n", ret);
251 } else if (ret == -ENOSYS) { 225 return ret;
252 ib_conn->fmr_pool = NULL; 226 } else {
253 iser_warn("FMRs are not supported, using unaligned mode\n"); 227 iser_warn("FMRs are not supported, using unaligned mode\n");
254 ret = 0; 228 return 0;
255 } 229 }
230}
231
232/**
233 * iser_free_fmr_pool - releases the FMR pool and page vec
234 */
235void iser_free_fmr_pool(struct iser_conn *ib_conn)
236{
237 iser_info("freeing conn %p fmr pool %p\n",
238 ib_conn, ib_conn->fmr_pool);
239
240 if (ib_conn->fmr_pool != NULL)
241 ib_destroy_fmr_pool(ib_conn->fmr_pool);
242
243 ib_conn->fmr_pool = NULL;
244
245 kfree(ib_conn->page_vec);
246 ib_conn->page_vec = NULL;
247}
248
249/**
250 * iser_create_ib_conn_res - Queue-Pair (QP)
251 *
252 * returns 0 on success, -1 on failure
253 */
254static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
255{
256 struct iser_device *device;
257 struct ib_qp_init_attr init_attr;
258 int ret = -ENOMEM;
259 int index, min_index = 0;
260
261 BUG_ON(ib_conn->device == NULL);
262
263 device = ib_conn->device;
256 264
257 memset(&init_attr, 0, sizeof init_attr); 265 memset(&init_attr, 0, sizeof init_attr);
258 266
@@ -282,9 +290,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
282 goto out_err; 290 goto out_err;
283 291
284 ib_conn->qp = ib_conn->cma_id->qp; 292 ib_conn->qp = ib_conn->cma_id->qp;
285 iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 293 iser_info("setting conn %p cma_id %p qp %p\n",
286 ib_conn, ib_conn->cma_id, 294 ib_conn, ib_conn->cma_id,
287 ib_conn->fmr_pool, ib_conn->cma_id->qp); 295 ib_conn->cma_id->qp);
288 return ret; 296 return ret;
289 297
290out_err: 298out_err:
@@ -293,7 +301,7 @@ out_err:
293} 301}
294 302
295/** 303/**
296 * releases the FMR pool and QP objects, returns 0 on success, 304 * releases the QP objects, returns 0 on success,
297 * -1 on failure 305 * -1 on failure
298 */ 306 */
299static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 307static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
@@ -301,13 +309,11 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
301 int cq_index; 309 int cq_index;
302 BUG_ON(ib_conn == NULL); 310 BUG_ON(ib_conn == NULL);
303 311
304 iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n", 312 iser_info("freeing conn %p cma_id %p qp %p\n",
305 ib_conn, ib_conn->cma_id, 313 ib_conn, ib_conn->cma_id,
306 ib_conn->fmr_pool, ib_conn->qp); 314 ib_conn->qp);
307 315
308 /* qp is created only once both addr & route are resolved */ 316 /* qp is created only once both addr & route are resolved */
309 if (ib_conn->fmr_pool != NULL)
310 ib_destroy_fmr_pool(ib_conn->fmr_pool);
311 317
312 if (ib_conn->qp != NULL) { 318 if (ib_conn->qp != NULL) {
313 cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index; 319 cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
@@ -316,21 +322,7 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
316 rdma_destroy_qp(ib_conn->cma_id); 322 rdma_destroy_qp(ib_conn->cma_id);
317 } 323 }
318 324
319 ib_conn->fmr_pool = NULL;
320 ib_conn->qp = NULL; 325 ib_conn->qp = NULL;
321 kfree(ib_conn->page_vec);
322
323 if (ib_conn->login_buf) {
324 if (ib_conn->login_req_dma)
325 ib_dma_unmap_single(ib_conn->device->ib_device,
326 ib_conn->login_req_dma,
327 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
328 if (ib_conn->login_resp_dma)
329 ib_dma_unmap_single(ib_conn->device->ib_device,
330 ib_conn->login_resp_dma,
331 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
332 kfree(ib_conn->login_buf);
333 }
334 326
335 return 0; 327 return 0;
336} 328}