aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/isert/ib_isert.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/isert/ib_isert.c')
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c222
1 files changed, 117 insertions, 105 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 9804fca6bf06..2b161be3c1a3 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -47,10 +47,10 @@ static int
47isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 47isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48 struct isert_rdma_wr *wr); 48 struct isert_rdma_wr *wr);
49static void 49static void
50isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 50isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int 51static int
52isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 struct isert_rdma_wr *wr); 53 struct isert_rdma_wr *wr);
54 54
55static void 55static void
56isert_qp_event_callback(struct ib_event *e, void *context) 56isert_qp_event_callback(struct ib_event *e, void *context)
@@ -227,11 +227,11 @@ isert_create_device_ib_res(struct isert_device *device)
227 227
228 /* asign function handlers */ 228 /* asign function handlers */
229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
230 device->use_frwr = 1; 230 device->use_fastreg = 1;
231 device->reg_rdma_mem = isert_reg_rdma_frwr; 231 device->reg_rdma_mem = isert_reg_rdma;
232 device->unreg_rdma_mem = isert_unreg_rdma_frwr; 232 device->unreg_rdma_mem = isert_unreg_rdma;
233 } else { 233 } else {
234 device->use_frwr = 0; 234 device->use_fastreg = 0;
235 device->reg_rdma_mem = isert_map_rdma; 235 device->reg_rdma_mem = isert_map_rdma;
236 device->unreg_rdma_mem = isert_unmap_cmd; 236 device->unreg_rdma_mem = isert_unmap_cmd;
237 } 237 }
@@ -239,9 +239,10 @@ isert_create_device_ib_res(struct isert_device *device)
239 device->cqs_used = min_t(int, num_online_cpus(), 239 device->cqs_used = min_t(int, num_online_cpus(),
240 device->ib_device->num_comp_vectors); 240 device->ib_device->num_comp_vectors);
241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242 pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", 242 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n",
243 device->cqs_used, device->ib_device->name, 244 device->cqs_used, device->ib_device->name,
244 device->ib_device->num_comp_vectors, device->use_frwr); 245 device->ib_device->num_comp_vectors, device->use_fastreg);
245 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
246 device->cqs_used, GFP_KERNEL); 247 device->cqs_used, GFP_KERNEL);
247 if (!device->cq_desc) { 248 if (!device->cq_desc) {
@@ -250,13 +251,6 @@ isert_create_device_ib_res(struct isert_device *device)
250 } 251 }
251 cq_desc = device->cq_desc; 252 cq_desc = device->cq_desc;
252 253
253 device->dev_pd = ib_alloc_pd(ib_dev);
254 if (IS_ERR(device->dev_pd)) {
255 ret = PTR_ERR(device->dev_pd);
256 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
257 goto out_cq_desc;
258 }
259
260 for (i = 0; i < device->cqs_used; i++) { 254 for (i = 0; i < device->cqs_used; i++) {
261 cq_desc[i].device = device; 255 cq_desc[i].device = device;
262 cq_desc[i].cq_index = i; 256 cq_desc[i].cq_index = i;
@@ -294,13 +288,6 @@ isert_create_device_ib_res(struct isert_device *device)
294 goto out_cq; 288 goto out_cq;
295 } 289 }
296 290
297 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
298 if (IS_ERR(device->dev_mr)) {
299 ret = PTR_ERR(device->dev_mr);
300 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
301 goto out_cq;
302 }
303
304 return 0; 291 return 0;
305 292
306out_cq: 293out_cq:
@@ -316,9 +303,6 @@ out_cq:
316 ib_destroy_cq(device->dev_tx_cq[j]); 303 ib_destroy_cq(device->dev_tx_cq[j]);
317 } 304 }
318 } 305 }
319 ib_dealloc_pd(device->dev_pd);
320
321out_cq_desc:
322 kfree(device->cq_desc); 306 kfree(device->cq_desc);
323 307
324 return ret; 308 return ret;
@@ -341,8 +325,6 @@ isert_free_device_ib_res(struct isert_device *device)
341 device->dev_tx_cq[i] = NULL; 325 device->dev_tx_cq[i] = NULL;
342 } 326 }
343 327
344 ib_dereg_mr(device->dev_mr);
345 ib_dealloc_pd(device->dev_pd);
346 kfree(device->cq_desc); 328 kfree(device->cq_desc);
347} 329}
348 330
@@ -398,18 +380,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
398} 380}
399 381
400static void 382static void
401isert_conn_free_frwr_pool(struct isert_conn *isert_conn) 383isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
402{ 384{
403 struct fast_reg_descriptor *fr_desc, *tmp; 385 struct fast_reg_descriptor *fr_desc, *tmp;
404 int i = 0; 386 int i = 0;
405 387
406 if (list_empty(&isert_conn->conn_frwr_pool)) 388 if (list_empty(&isert_conn->conn_fr_pool))
407 return; 389 return;
408 390
409 pr_debug("Freeing conn %p frwr pool", isert_conn); 391 pr_debug("Freeing conn %p fastreg pool", isert_conn);
410 392
411 list_for_each_entry_safe(fr_desc, tmp, 393 list_for_each_entry_safe(fr_desc, tmp,
412 &isert_conn->conn_frwr_pool, list) { 394 &isert_conn->conn_fr_pool, list) {
413 list_del(&fr_desc->list); 395 list_del(&fr_desc->list);
414 ib_free_fast_reg_page_list(fr_desc->data_frpl); 396 ib_free_fast_reg_page_list(fr_desc->data_frpl);
415 ib_dereg_mr(fr_desc->data_mr); 397 ib_dereg_mr(fr_desc->data_mr);
@@ -417,20 +399,47 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
417 ++i; 399 ++i;
418 } 400 }
419 401
420 if (i < isert_conn->conn_frwr_pool_size) 402 if (i < isert_conn->conn_fr_pool_size)
421 pr_warn("Pool still has %d regions registered\n", 403 pr_warn("Pool still has %d regions registered\n",
422 isert_conn->conn_frwr_pool_size - i); 404 isert_conn->conn_fr_pool_size - i);
405}
406
407static int
408isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409 struct fast_reg_descriptor *fr_desc)
410{
411 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412 ISCSI_ISER_SG_TABLESIZE);
413 if (IS_ERR(fr_desc->data_frpl)) {
414 pr_err("Failed to allocate data frpl err=%ld\n",
415 PTR_ERR(fr_desc->data_frpl));
416 return PTR_ERR(fr_desc->data_frpl);
417 }
418
419 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
420 if (IS_ERR(fr_desc->data_mr)) {
421 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc->data_mr));
423 ib_free_fast_reg_page_list(fr_desc->data_frpl);
424 return PTR_ERR(fr_desc->data_mr);
425 }
426 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc, fr_desc->data_frpl->page_list);
428
429 fr_desc->valid = true;
430
431 return 0;
423} 432}
424 433
425static int 434static int
426isert_conn_create_frwr_pool(struct isert_conn *isert_conn) 435isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
427{ 436{
428 struct fast_reg_descriptor *fr_desc; 437 struct fast_reg_descriptor *fr_desc;
429 struct isert_device *device = isert_conn->conn_device; 438 struct isert_device *device = isert_conn->conn_device;
430 int i, ret; 439 int i, ret;
431 440
432 INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); 441 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
433 isert_conn->conn_frwr_pool_size = 0; 442 isert_conn->conn_fr_pool_size = 0;
434 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 443 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
435 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 444 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
436 if (!fr_desc) { 445 if (!fr_desc) {
@@ -439,40 +448,25 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
439 goto err; 448 goto err;
440 } 449 }
441 450
442 fr_desc->data_frpl = 451 ret = isert_create_fr_desc(device->ib_device,
443 ib_alloc_fast_reg_page_list(device->ib_device, 452 isert_conn->conn_pd, fr_desc);
444 ISCSI_ISER_SG_TABLESIZE); 453 if (ret) {
445 if (IS_ERR(fr_desc->data_frpl)) { 454 pr_err("Failed to create fastreg descriptor err=%d\n",
446 pr_err("Failed to allocate fr_pg_list err=%ld\n", 455 ret);
447 PTR_ERR(fr_desc->data_frpl));
448 ret = PTR_ERR(fr_desc->data_frpl);
449 goto err;
450 }
451
452 fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
453 ISCSI_ISER_SG_TABLESIZE);
454 if (IS_ERR(fr_desc->data_mr)) {
455 pr_err("Failed to allocate frmr err=%ld\n",
456 PTR_ERR(fr_desc->data_mr));
457 ret = PTR_ERR(fr_desc->data_mr);
458 ib_free_fast_reg_page_list(fr_desc->data_frpl);
459 goto err; 456 goto err;
460 } 457 }
461 pr_debug("Create fr_desc %p page_list %p\n",
462 fr_desc, fr_desc->data_frpl->page_list);
463 458
464 fr_desc->valid = true; 459 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
465 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 460 isert_conn->conn_fr_pool_size++;
466 isert_conn->conn_frwr_pool_size++;
467 } 461 }
468 462
469 pr_debug("Creating conn %p frwr pool size=%d", 463 pr_debug("Creating conn %p fastreg pool size=%d",
470 isert_conn, isert_conn->conn_frwr_pool_size); 464 isert_conn, isert_conn->conn_fr_pool_size);
471 465
472 return 0; 466 return 0;
473 467
474err: 468err:
475 isert_conn_free_frwr_pool(isert_conn); 469 isert_conn_free_fastreg_pool(isert_conn);
476 return ret; 470 return ret;
477} 471}
478 472
@@ -558,14 +552,29 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
558 } 552 }
559 553
560 isert_conn->conn_device = device; 554 isert_conn->conn_device = device;
561 isert_conn->conn_pd = device->dev_pd; 555 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
562 isert_conn->conn_mr = device->dev_mr; 556 if (IS_ERR(isert_conn->conn_pd)) {
557 ret = PTR_ERR(isert_conn->conn_pd);
558 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
559 isert_conn, ret);
560 goto out_pd;
561 }
563 562
564 if (device->use_frwr) { 563 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
565 ret = isert_conn_create_frwr_pool(isert_conn); 564 IB_ACCESS_LOCAL_WRITE);
565 if (IS_ERR(isert_conn->conn_mr)) {
566 ret = PTR_ERR(isert_conn->conn_mr);
567 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
568 isert_conn, ret);
569 goto out_mr;
570 }
571
572 if (device->use_fastreg) {
573 ret = isert_conn_create_fastreg_pool(isert_conn);
566 if (ret) { 574 if (ret) {
567 pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); 575 pr_err("Conn: %p failed to create fastreg pool\n",
568 goto out_frwr; 576 isert_conn);
577 goto out_fastreg;
569 } 578 }
570 } 579 }
571 580
@@ -582,9 +591,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
582 return 0; 591 return 0;
583 592
584out_conn_dev: 593out_conn_dev:
585 if (device->use_frwr) 594 if (device->use_fastreg)
586 isert_conn_free_frwr_pool(isert_conn); 595 isert_conn_free_fastreg_pool(isert_conn);
587out_frwr: 596out_fastreg:
597 ib_dereg_mr(isert_conn->conn_mr);
598out_mr:
599 ib_dealloc_pd(isert_conn->conn_pd);
600out_pd:
588 isert_device_try_release(device); 601 isert_device_try_release(device);
589out_rsp_dma_map: 602out_rsp_dma_map:
590 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 603 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -608,8 +621,8 @@ isert_connect_release(struct isert_conn *isert_conn)
608 621
609 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 622 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
610 623
611 if (device && device->use_frwr) 624 if (device && device->use_fastreg)
612 isert_conn_free_frwr_pool(isert_conn); 625 isert_conn_free_fastreg_pool(isert_conn);
613 626
614 if (isert_conn->conn_qp) { 627 if (isert_conn->conn_qp) {
615 cq_index = ((struct isert_cq_desc *) 628 cq_index = ((struct isert_cq_desc *)
@@ -623,6 +636,9 @@ isert_connect_release(struct isert_conn *isert_conn)
623 isert_free_rx_descriptors(isert_conn); 636 isert_free_rx_descriptors(isert_conn);
624 rdma_destroy_id(isert_conn->conn_cm_id); 637 rdma_destroy_id(isert_conn->conn_cm_id);
625 638
639 ib_dereg_mr(isert_conn->conn_mr);
640 ib_dealloc_pd(isert_conn->conn_pd);
641
626 if (isert_conn->login_buf) { 642 if (isert_conn->login_buf) {
627 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 643 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
628 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 644 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -1024,13 +1040,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
1024} 1040}
1025 1041
1026static struct iscsi_cmd 1042static struct iscsi_cmd
1027*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) 1043*isert_allocate_cmd(struct iscsi_conn *conn)
1028{ 1044{
1029 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1045 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1030 struct isert_cmd *isert_cmd; 1046 struct isert_cmd *isert_cmd;
1031 struct iscsi_cmd *cmd; 1047 struct iscsi_cmd *cmd;
1032 1048
1033 cmd = iscsit_allocate_cmd(conn, gfp); 1049 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1034 if (!cmd) { 1050 if (!cmd) {
1035 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1051 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1036 return NULL; 1052 return NULL;
@@ -1219,7 +1235,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1219 1235
1220 switch (opcode) { 1236 switch (opcode) {
1221 case ISCSI_OP_SCSI_CMD: 1237 case ISCSI_OP_SCSI_CMD:
1222 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1238 cmd = isert_allocate_cmd(conn);
1223 if (!cmd) 1239 if (!cmd)
1224 break; 1240 break;
1225 1241
@@ -1233,7 +1249,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1233 rx_desc, (unsigned char *)hdr); 1249 rx_desc, (unsigned char *)hdr);
1234 break; 1250 break;
1235 case ISCSI_OP_NOOP_OUT: 1251 case ISCSI_OP_NOOP_OUT:
1236 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1252 cmd = isert_allocate_cmd(conn);
1237 if (!cmd) 1253 if (!cmd)
1238 break; 1254 break;
1239 1255
@@ -1246,7 +1262,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1246 (unsigned char *)hdr); 1262 (unsigned char *)hdr);
1247 break; 1263 break;
1248 case ISCSI_OP_SCSI_TMFUNC: 1264 case ISCSI_OP_SCSI_TMFUNC:
1249 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1265 cmd = isert_allocate_cmd(conn);
1250 if (!cmd) 1266 if (!cmd)
1251 break; 1267 break;
1252 1268
@@ -1254,7 +1270,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1254 (unsigned char *)hdr); 1270 (unsigned char *)hdr);
1255 break; 1271 break;
1256 case ISCSI_OP_LOGOUT: 1272 case ISCSI_OP_LOGOUT:
1257 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1273 cmd = isert_allocate_cmd(conn);
1258 if (!cmd) 1274 if (!cmd)
1259 break; 1275 break;
1260 1276
@@ -1265,7 +1281,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1265 HZ); 1281 HZ);
1266 break; 1282 break;
1267 case ISCSI_OP_TEXT: 1283 case ISCSI_OP_TEXT:
1268 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1284 cmd = isert_allocate_cmd(conn);
1269 if (!cmd) 1285 if (!cmd)
1270 break; 1286 break;
1271 1287
@@ -1404,25 +1420,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1404} 1420}
1405 1421
1406static void 1422static void
1407isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1423isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1408{ 1424{
1409 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1425 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1410 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1426 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1411 LIST_HEAD(unmap_list); 1427 LIST_HEAD(unmap_list);
1412 1428
1413 pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); 1429 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
1414 1430
1415 if (wr->fr_desc) { 1431 if (wr->fr_desc) {
1416 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", 1432 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1417 isert_cmd, wr->fr_desc); 1433 isert_cmd, wr->fr_desc);
1418 spin_lock_bh(&isert_conn->conn_lock); 1434 spin_lock_bh(&isert_conn->conn_lock);
1419 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); 1435 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1420 spin_unlock_bh(&isert_conn->conn_lock); 1436 spin_unlock_bh(&isert_conn->conn_lock);
1421 wr->fr_desc = NULL; 1437 wr->fr_desc = NULL;
1422 } 1438 }
1423 1439
1424 if (wr->sge) { 1440 if (wr->sge) {
1425 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); 1441 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1426 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1442 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1427 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1443 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1428 DMA_TO_DEVICE : DMA_FROM_DEVICE); 1444 DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -2163,26 +2179,22 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2163 2179
2164static int 2180static int
2165isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2181isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2166 struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, 2182 struct isert_conn *isert_conn, struct scatterlist *sg_start,
2167 struct ib_sge *ib_sge, u32 offset, unsigned int data_len) 2183 struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
2184 unsigned int data_len)
2168{ 2185{
2169 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2170 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2186 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2171 struct scatterlist *sg_start;
2172 u32 sg_off, page_off;
2173 struct ib_send_wr fr_wr, inv_wr; 2187 struct ib_send_wr fr_wr, inv_wr;
2174 struct ib_send_wr *bad_wr, *wr = NULL; 2188 struct ib_send_wr *bad_wr, *wr = NULL;
2189 int ret, pagelist_len;
2190 u32 page_off;
2175 u8 key; 2191 u8 key;
2176 int ret, sg_nents, pagelist_len;
2177 2192
2178 sg_off = offset / PAGE_SIZE; 2193 sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
2179 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2180 sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2181 ISCSI_ISER_SG_TABLESIZE);
2182 page_off = offset % PAGE_SIZE; 2194 page_off = offset % PAGE_SIZE;
2183 2195
2184 pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", 2196 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2185 isert_cmd, fr_desc, sg_nents, sg_off, offset); 2197 fr_desc, sg_nents, offset);
2186 2198
2187 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2199 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2188 &fr_desc->data_frpl->page_list[0]); 2200 &fr_desc->data_frpl->page_list[0]);
@@ -2232,8 +2244,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2232} 2244}
2233 2245
2234static int 2246static int
2235isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2247isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2236 struct isert_rdma_wr *wr) 2248 struct isert_rdma_wr *wr)
2237{ 2249{
2238 struct se_cmd *se_cmd = &cmd->se_cmd; 2250 struct se_cmd *se_cmd = &cmd->se_cmd;
2239 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2251 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2251,9 +2263,9 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2251 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2263 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2252 data_left = se_cmd->data_length; 2264 data_left = se_cmd->data_length;
2253 } else { 2265 } else {
2254 sg_off = cmd->write_data_done / PAGE_SIZE;
2255 data_left = se_cmd->data_length - cmd->write_data_done;
2256 offset = cmd->write_data_done; 2266 offset = cmd->write_data_done;
2267 sg_off = offset / PAGE_SIZE;
2268 data_left = se_cmd->data_length - cmd->write_data_done;
2257 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2269 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2258 } 2270 }
2259 2271
@@ -2311,16 +2323,16 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2311 wr->fr_desc = NULL; 2323 wr->fr_desc = NULL;
2312 } else { 2324 } else {
2313 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2325 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2314 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, 2326 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2315 struct fast_reg_descriptor, list); 2327 struct fast_reg_descriptor, list);
2316 list_del(&fr_desc->list); 2328 list_del(&fr_desc->list);
2317 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2329 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2318 wr->fr_desc = fr_desc; 2330 wr->fr_desc = fr_desc;
2319 2331
2320 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, 2332 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2321 ib_sge, offset, data_len); 2333 ib_sge, sg_nents, offset, data_len);
2322 if (ret) { 2334 if (ret) {
2323 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 2335 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2324 goto unmap_sg; 2336 goto unmap_sg;
2325 } 2337 }
2326 } 2338 }