aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-12 19:51:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-12 19:51:08 -0400
commit141eaccd018ef0476e94b180026d973db35460fd (patch)
treea1c8f5215bd4e5545dee6c56e8bb9b454c818b33 /drivers/infiniband
parent93094449060ae00213ba30ad9eaa485b448fe94b (diff)
parentb076808051f2c80d38e03fb2f1294f525c7a446d (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "Here are the target pending updates for v3.15-rc1. Apologies in advance for waiting until the second to last day of the merge window to send these out. The highlights this round include: - iser-target support for T10 PI (DIF) offloads (Sagi + Or) - Fix Task Aborted Status (TAS) handling in target-core (Alex Leung) - Pass in transport supported PI at session initialization (Sagi + MKP + nab) - Add WRITE_INSERT + READ_STRIP T10 PI support in target-core (nab + Sagi) - Fix iscsi-target ERL=2 ASYNC_EVENT connection pointer bug (nab) - Fix tcm_fc use-after-free of ft_tpg (Andy Grover) - Use correct ib_sg_dma primitives in ib_isert (Mike Marciniszyn) Also, note the virtio-scsi + vhost-scsi changes to expose T10 PI metadata into KVM guest have been left-out for now, as there where a few comments from MST + Paolo that where not able to be addressed in time for v3.15. Please expect this feature for v3.16-rc1" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (43 commits) ib_srpt: Use correct ib_sg_dma primitives target/tcm_fc: Rename ft_tport_create to ft_tport_get target/tcm_fc: Rename ft_{add,del}_lport to {add,del}_wwn target/tcm_fc: Rename structs and list members for clarity target/tcm_fc: Limit to 1 TPG per wwn target/tcm_fc: Don't export ft_lport_list target/tcm_fc: Fix use-after-free of ft_tpg target: Add check to prevent Abort Task from aborting itself target: Enable READ_STRIP emulation in target_complete_ok_work target/sbc: Add sbc_dif_read_strip software emulation target: Enable WRITE_INSERT emulation in target_execute_cmd target/sbc: Add sbc_dif_generate software emulation target/sbc: Only expose PI read_cap16 bits when supported by fabric target/spc: Only expose PI mode page bits when supported by fabric target/spc: Only expose PI inquiry bits when supported by fabric target: Pass in transport supported PI at session initialization target/iblock: Fix double bioset_integrity_free bug Target/sbc: Initialize COMPARE_AND_WRITE write_sg scatterlist target/rd: T10-Dif: RAM disk is allocating more space than required. iscsi-target: Fix ERL=2 ASYNC_EVENT connection pointer bug ...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c828
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h38
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c27
3 files changed, 690 insertions, 203 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8ee228e9ab5a..c98fdb185931 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int 51static int
52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 struct isert_rdma_wr *wr); 53 struct isert_rdma_wr *wr);
54static int
55isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
54 56
55static void 57static void
56isert_qp_event_callback(struct ib_event *e, void *context) 58isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
87} 89}
88 90
89static int 91static int
90isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 92isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
93 u8 protection)
91{ 94{
92 struct isert_device *device = isert_conn->conn_device; 95 struct isert_device *device = isert_conn->conn_device;
93 struct ib_qp_init_attr attr; 96 struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
119 attr.cap.max_recv_sge = 1; 122 attr.cap.max_recv_sge = 1;
120 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 123 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121 attr.qp_type = IB_QPT_RC; 124 attr.qp_type = IB_QPT_RC;
125 if (protection)
126 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
122 127
123 pr_debug("isert_conn_setup_qp cma_id->device: %p\n", 128 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124 cma_id->device); 129 cma_id->device);
@@ -226,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device)
226 return ret; 231 return ret;
227 232
228 /* asign function handlers */ 233 /* asign function handlers */
229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 234 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
235 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
230 device->use_fastreg = 1; 236 device->use_fastreg = 1;
231 device->reg_rdma_mem = isert_reg_rdma; 237 device->reg_rdma_mem = isert_reg_rdma;
232 device->unreg_rdma_mem = isert_unreg_rdma; 238 device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@ isert_create_device_ib_res(struct isert_device *device)
236 device->unreg_rdma_mem = isert_unmap_cmd; 242 device->unreg_rdma_mem = isert_unmap_cmd;
237 } 243 }
238 244
245 /* Check signature cap */
246 device->pi_capable = dev_attr->device_cap_flags &
247 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
248
239 device->cqs_used = min_t(int, num_online_cpus(), 249 device->cqs_used = min_t(int, num_online_cpus(),
240 device->ib_device->num_comp_vectors); 250 device->ib_device->num_comp_vectors);
241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 251 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242 pr_debug("Using %d CQs, device %s supports %d vectors support " 252 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n", 253 "Fast registration %d pi_capable %d\n",
244 device->cqs_used, device->ib_device->name, 254 device->cqs_used, device->ib_device->name,
245 device->ib_device->num_comp_vectors, device->use_fastreg); 255 device->ib_device->num_comp_vectors, device->use_fastreg,
256 device->pi_capable);
246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 257 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
247 device->cqs_used, GFP_KERNEL); 258 device->cqs_used, GFP_KERNEL);
248 if (!device->cq_desc) { 259 if (!device->cq_desc) {
@@ -395,6 +406,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
395 list_del(&fr_desc->list); 406 list_del(&fr_desc->list);
396 ib_free_fast_reg_page_list(fr_desc->data_frpl); 407 ib_free_fast_reg_page_list(fr_desc->data_frpl);
397 ib_dereg_mr(fr_desc->data_mr); 408 ib_dereg_mr(fr_desc->data_mr);
409 if (fr_desc->pi_ctx) {
410 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
411 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
412 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
413 kfree(fr_desc->pi_ctx);
414 }
398 kfree(fr_desc); 415 kfree(fr_desc);
399 ++i; 416 ++i;
400 } 417 }
@@ -406,8 +423,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
406 423
407static int 424static int
408isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 425isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409 struct fast_reg_descriptor *fr_desc) 426 struct fast_reg_descriptor *fr_desc, u8 protection)
410{ 427{
428 int ret;
429
411 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 430 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412 ISCSI_ISER_SG_TABLESIZE); 431 ISCSI_ISER_SG_TABLESIZE);
413 if (IS_ERR(fr_desc->data_frpl)) { 432 if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
420 if (IS_ERR(fr_desc->data_mr)) { 439 if (IS_ERR(fr_desc->data_mr)) {
421 pr_err("Failed to allocate data frmr err=%ld\n", 440 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc->data_mr)); 441 PTR_ERR(fr_desc->data_mr));
423 ib_free_fast_reg_page_list(fr_desc->data_frpl); 442 ret = PTR_ERR(fr_desc->data_mr);
424 return PTR_ERR(fr_desc->data_mr); 443 goto err_data_frpl;
425 } 444 }
426 pr_debug("Create fr_desc %p page_list %p\n", 445 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc, fr_desc->data_frpl->page_list); 446 fr_desc, fr_desc->data_frpl->page_list);
447 fr_desc->ind |= ISERT_DATA_KEY_VALID;
448
449 if (protection) {
450 struct ib_mr_init_attr mr_init_attr = {0};
451 struct pi_context *pi_ctx;
452
453 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
454 if (!fr_desc->pi_ctx) {
455 pr_err("Failed to allocate pi context\n");
456 ret = -ENOMEM;
457 goto err_data_mr;
458 }
459 pi_ctx = fr_desc->pi_ctx;
460
461 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
462 ISCSI_ISER_SG_TABLESIZE);
463 if (IS_ERR(pi_ctx->prot_frpl)) {
464 pr_err("Failed to allocate prot frpl err=%ld\n",
465 PTR_ERR(pi_ctx->prot_frpl));
466 ret = PTR_ERR(pi_ctx->prot_frpl);
467 goto err_pi_ctx;
468 }
428 469
429 fr_desc->valid = true; 470 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_mr)) {
472 pr_err("Failed to allocate prot frmr err=%ld\n",
473 PTR_ERR(pi_ctx->prot_mr));
474 ret = PTR_ERR(pi_ctx->prot_mr);
475 goto err_prot_frpl;
476 }
477 fr_desc->ind |= ISERT_PROT_KEY_VALID;
478
479 mr_init_attr.max_reg_descriptors = 2;
480 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
481 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
482 if (IS_ERR(pi_ctx->sig_mr)) {
483 pr_err("Failed to allocate signature enabled mr err=%ld\n",
484 PTR_ERR(pi_ctx->sig_mr));
485 ret = PTR_ERR(pi_ctx->sig_mr);
486 goto err_prot_mr;
487 }
488 fr_desc->ind |= ISERT_SIG_KEY_VALID;
489 }
490 fr_desc->ind &= ~ISERT_PROTECTED;
430 491
431 return 0; 492 return 0;
493err_prot_mr:
494 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
495err_prot_frpl:
496 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
497err_pi_ctx:
498 kfree(fr_desc->pi_ctx);
499err_data_mr:
500 ib_dereg_mr(fr_desc->data_mr);
501err_data_frpl:
502 ib_free_fast_reg_page_list(fr_desc->data_frpl);
503
504 return ret;
432} 505}
433 506
434static int 507static int
435isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 508isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
436{ 509{
437 struct fast_reg_descriptor *fr_desc; 510 struct fast_reg_descriptor *fr_desc;
438 struct isert_device *device = isert_conn->conn_device; 511 struct isert_device *device = isert_conn->conn_device;
439 int i, ret; 512 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
513 struct se_node_acl *se_nacl = se_sess->se_node_acl;
514 int i, ret, tag_num;
515 /*
516 * Setup the number of FRMRs based upon the number of tags
517 * available to session in iscsi_target_locate_portal().
518 */
519 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
520 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
440 521
441 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
442 isert_conn->conn_fr_pool_size = 0; 522 isert_conn->conn_fr_pool_size = 0;
443 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 523 for (i = 0; i < tag_num; i++) {
444 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 524 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
445 if (!fr_desc) { 525 if (!fr_desc) {
446 pr_err("Failed to allocate fast_reg descriptor\n"); 526 pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
449 } 529 }
450 530
451 ret = isert_create_fr_desc(device->ib_device, 531 ret = isert_create_fr_desc(device->ib_device,
452 isert_conn->conn_pd, fr_desc); 532 isert_conn->conn_pd, fr_desc,
533 pi_support);
453 if (ret) { 534 if (ret) {
454 pr_err("Failed to create fastreg descriptor err=%d\n", 535 pr_err("Failed to create fastreg descriptor err=%d\n",
455 ret); 536 ret);
@@ -480,6 +561,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
480 struct isert_device *device; 561 struct isert_device *device;
481 struct ib_device *ib_dev = cma_id->device; 562 struct ib_device *ib_dev = cma_id->device;
482 int ret = 0; 563 int ret = 0;
564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
483 565
484 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
485 cma_id, cma_id->context); 567 cma_id, cma_id->context);
@@ -498,6 +580,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
498 kref_get(&isert_conn->conn_kref); 580 kref_get(&isert_conn->conn_kref);
499 mutex_init(&isert_conn->conn_mutex); 581 mutex_init(&isert_conn->conn_mutex);
500 spin_lock_init(&isert_conn->conn_lock); 582 spin_lock_init(&isert_conn->conn_lock);
583 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
501 584
502 cma_id->context = isert_conn; 585 cma_id->context = isert_conn;
503 isert_conn->conn_cm_id = cma_id; 586 isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
569 goto out_mr; 652 goto out_mr;
570 } 653 }
571 654
572 if (device->use_fastreg) { 655 if (pi_support && !device->pi_capable) {
573 ret = isert_conn_create_fastreg_pool(isert_conn); 656 pr_err("Protection information requested but not supported\n");
574 if (ret) { 657 ret = -EINVAL;
575 pr_err("Conn: %p failed to create fastreg pool\n", 658 goto out_mr;
576 isert_conn);
577 goto out_fastreg;
578 }
579 } 659 }
580 660
581 ret = isert_conn_setup_qp(isert_conn, cma_id); 661 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
582 if (ret) 662 if (ret)
583 goto out_conn_dev; 663 goto out_conn_dev;
584 664
@@ -591,9 +671,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
591 return 0; 671 return 0;
592 672
593out_conn_dev: 673out_conn_dev:
594 if (device->use_fastreg)
595 isert_conn_free_fastreg_pool(isert_conn);
596out_fastreg:
597 ib_dereg_mr(isert_conn->conn_mr); 674 ib_dereg_mr(isert_conn->conn_mr);
598out_mr: 675out_mr:
599 ib_dealloc_pd(isert_conn->conn_pd); 676 ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
967 } 1044 }
968 if (!login->login_failed) { 1045 if (!login->login_failed) {
969 if (login->login_complete) { 1046 if (login->login_complete) {
1047 if (isert_conn->conn_device->use_fastreg) {
1048 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1049
1050 ret = isert_conn_create_fastreg_pool(isert_conn,
1051 pi_support);
1052 if (ret) {
1053 pr_err("Conn: %p failed to create"
1054 " fastreg pool\n", isert_conn);
1055 return ret;
1056 }
1057 }
1058
970 ret = isert_alloc_rx_descriptors(isert_conn); 1059 ret = isert_alloc_rx_descriptors(isert_conn);
971 if (ret) 1060 if (ret)
972 return ret; 1061 return ret;
@@ -1392,19 +1481,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1392 } 1481 }
1393} 1482}
1394 1483
1484static int
1485isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1486 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1487 enum iser_ib_op_code op, struct isert_data_buf *data)
1488{
1489 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1490
1491 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1492 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1493
1494 data->len = length - offset;
1495 data->offset = offset;
1496 data->sg_off = data->offset / PAGE_SIZE;
1497
1498 data->sg = &sg[data->sg_off];
1499 data->nents = min_t(unsigned int, nents - data->sg_off,
1500 ISCSI_ISER_SG_TABLESIZE);
1501 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1502 PAGE_SIZE);
1503
1504 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1505 data->dma_dir);
1506 if (unlikely(!data->dma_nents)) {
1507 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1508 return -EINVAL;
1509 }
1510
1511 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1512 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1513
1514 return 0;
1515}
1516
1517static void
1518isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1519{
1520 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1521
1522 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1523 memset(data, 0, sizeof(*data));
1524}
1525
1526
1527
1395static void 1528static void
1396isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1529isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1397{ 1530{
1398 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1531 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1399 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1400 1532
1401 pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1533 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1402 if (wr->sge) { 1534
1535 if (wr->data.sg) {
1403 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1536 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1404 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1537 isert_unmap_data_buf(isert_conn, &wr->data);
1405 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1406 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1407 wr->sge = NULL;
1408 } 1538 }
1409 1539
1410 if (wr->send_wr) { 1540 if (wr->send_wr) {
@@ -1424,7 +1554,6 @@ static void
1424isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1554isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1425{ 1555{
1426 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1556 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1427 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1428 LIST_HEAD(unmap_list); 1557 LIST_HEAD(unmap_list);
1429 1558
1430 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1559 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1432 if (wr->fr_desc) { 1561 if (wr->fr_desc) {
1433 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1562 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1434 isert_cmd, wr->fr_desc); 1563 isert_cmd, wr->fr_desc);
1564 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1565 isert_unmap_data_buf(isert_conn, &wr->prot);
1566 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1567 }
1435 spin_lock_bh(&isert_conn->conn_lock); 1568 spin_lock_bh(&isert_conn->conn_lock);
1436 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1569 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1437 spin_unlock_bh(&isert_conn->conn_lock); 1570 spin_unlock_bh(&isert_conn->conn_lock);
1438 wr->fr_desc = NULL; 1571 wr->fr_desc = NULL;
1439 } 1572 }
1440 1573
1441 if (wr->sge) { 1574 if (wr->data.sg) {
1442 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1575 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1443 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1576 isert_unmap_data_buf(isert_conn, &wr->data);
1444 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1445 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1446 wr->sge = NULL;
1447 } 1577 }
1448 1578
1449 wr->ib_sge = NULL; 1579 wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1451} 1581}
1452 1582
1453static void 1583static void
1454isert_put_cmd(struct isert_cmd *isert_cmd) 1584isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1455{ 1585{
1456 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1586 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1457 struct isert_conn *isert_conn = isert_cmd->conn; 1587 struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1467 list_del_init(&cmd->i_conn_node); 1597 list_del_init(&cmd->i_conn_node);
1468 spin_unlock_bh(&conn->cmd_lock); 1598 spin_unlock_bh(&conn->cmd_lock);
1469 1599
1470 if (cmd->data_direction == DMA_TO_DEVICE) 1600 if (cmd->data_direction == DMA_TO_DEVICE) {
1471 iscsit_stop_dataout_timer(cmd); 1601 iscsit_stop_dataout_timer(cmd);
1602 /*
1603 * Check for special case during comp_err where
1604 * WRITE_PENDING has been handed off from core,
1605 * but requires an extra target_put_sess_cmd()
1606 * before transport_generic_free_cmd() below.
1607 */
1608 if (comp_err &&
1609 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1610 struct se_cmd *se_cmd = &cmd->se_cmd;
1611
1612 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1613 }
1614 }
1472 1615
1473 device->unreg_rdma_mem(isert_cmd, isert_conn); 1616 device->unreg_rdma_mem(isert_cmd, isert_conn);
1474 transport_generic_free_cmd(&cmd->se_cmd, 0); 1617 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1523 1666
1524static void 1667static void
1525isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1668isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1526 struct ib_device *ib_dev) 1669 struct ib_device *ib_dev, bool comp_err)
1527{ 1670{
1528 if (isert_cmd->pdu_buf_dma != 0) { 1671 if (isert_cmd->pdu_buf_dma != 0) {
1529 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); 1672 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1533 } 1676 }
1534 1677
1535 isert_unmap_tx_desc(tx_desc, ib_dev); 1678 isert_unmap_tx_desc(tx_desc, ib_dev);
1536 isert_put_cmd(isert_cmd); 1679 isert_put_cmd(isert_cmd, comp_err);
1680}
1681
1682static int
1683isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1684{
1685 struct ib_mr_status mr_status;
1686 int ret;
1687
1688 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1689 if (ret) {
1690 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1691 goto fail_mr_status;
1692 }
1693
1694 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1695 u64 sec_offset_err;
1696 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1697
1698 switch (mr_status.sig_err.err_type) {
1699 case IB_SIG_BAD_GUARD:
1700 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1701 break;
1702 case IB_SIG_BAD_REFTAG:
1703 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1704 break;
1705 case IB_SIG_BAD_APPTAG:
1706 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1707 break;
1708 }
1709 sec_offset_err = mr_status.sig_err.sig_err_offset;
1710 do_div(sec_offset_err, block_size);
1711 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1712
1713 pr_err("isert: PI error found type %d at sector 0x%llx "
1714 "expected 0x%x vs actual 0x%x\n",
1715 mr_status.sig_err.err_type,
1716 (unsigned long long)se_cmd->bad_sector,
1717 mr_status.sig_err.expected,
1718 mr_status.sig_err.actual);
1719 ret = 1;
1720 }
1721
1722fail_mr_status:
1723 return ret;
1724}
1725
1726static void
1727isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1728 struct isert_cmd *isert_cmd)
1729{
1730 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1731 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1732 struct se_cmd *se_cmd = &cmd->se_cmd;
1733 struct isert_conn *isert_conn = isert_cmd->conn;
1734 struct isert_device *device = isert_conn->conn_device;
1735 int ret = 0;
1736
1737 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1738 ret = isert_check_pi_status(se_cmd,
1739 wr->fr_desc->pi_ctx->sig_mr);
1740 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1741 }
1742
1743 device->unreg_rdma_mem(isert_cmd, isert_conn);
1744 wr->send_wr_num = 0;
1745 if (ret)
1746 transport_send_check_condition_and_sense(se_cmd,
1747 se_cmd->pi_err, 0);
1748 else
1749 isert_put_response(isert_conn->conn, cmd);
1537} 1750}
1538 1751
1539static void 1752static void
@@ -1545,10 +1758,17 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1545 struct se_cmd *se_cmd = &cmd->se_cmd; 1758 struct se_cmd *se_cmd = &cmd->se_cmd;
1546 struct isert_conn *isert_conn = isert_cmd->conn; 1759 struct isert_conn *isert_conn = isert_cmd->conn;
1547 struct isert_device *device = isert_conn->conn_device; 1760 struct isert_device *device = isert_conn->conn_device;
1761 int ret = 0;
1762
1763 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1764 ret = isert_check_pi_status(se_cmd,
1765 wr->fr_desc->pi_ctx->sig_mr);
1766 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1767 }
1548 1768
1549 iscsit_stop_dataout_timer(cmd); 1769 iscsit_stop_dataout_timer(cmd);
1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1770 device->unreg_rdma_mem(isert_cmd, isert_conn);
1551 cmd->write_data_done = wr->cur_rdma_length; 1771 cmd->write_data_done = wr->data.len;
1552 wr->send_wr_num = 0; 1772 wr->send_wr_num = 0;
1553 1773
1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1774 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1557 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1777 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1558 spin_unlock_bh(&cmd->istate_lock); 1778 spin_unlock_bh(&cmd->istate_lock);
1559 1779
1560 target_execute_cmd(se_cmd); 1780 if (ret)
1781 transport_send_check_condition_and_sense(se_cmd,
1782 se_cmd->pi_err, 0);
1783 else
1784 target_execute_cmd(se_cmd);
1561} 1785}
1562 1786
1563static void 1787static void
@@ -1577,14 +1801,14 @@ isert_do_control_comp(struct work_struct *work)
1577 iscsit_tmr_post_handler(cmd, cmd->conn); 1801 iscsit_tmr_post_handler(cmd, cmd->conn);
1578 1802
1579 cmd->i_state = ISTATE_SENT_STATUS; 1803 cmd->i_state = ISTATE_SENT_STATUS;
1580 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1804 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1581 break; 1805 break;
1582 case ISTATE_SEND_REJECT: 1806 case ISTATE_SEND_REJECT:
1583 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); 1807 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1584 atomic_dec(&isert_conn->post_send_buf_count); 1808 atomic_dec(&isert_conn->post_send_buf_count);
1585 1809
1586 cmd->i_state = ISTATE_SENT_STATUS; 1810 cmd->i_state = ISTATE_SENT_STATUS;
1587 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1811 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1588 break; 1812 break;
1589 case ISTATE_SEND_LOGOUTRSP: 1813 case ISTATE_SEND_LOGOUTRSP:
1590 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1814 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@ isert_do_control_comp(struct work_struct *work)
1598 case ISTATE_SEND_TEXTRSP: 1822 case ISTATE_SEND_TEXTRSP:
1599 atomic_dec(&isert_conn->post_send_buf_count); 1823 atomic_dec(&isert_conn->post_send_buf_count);
1600 cmd->i_state = ISTATE_SENT_STATUS; 1824 cmd->i_state = ISTATE_SENT_STATUS;
1601 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1825 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1602 break; 1826 break;
1603 default: 1827 default:
1604 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1828 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1626 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1850 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1627 return; 1851 return;
1628 } 1852 }
1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1853
1854 /**
1855 * If send_wr_num is 0 this means that we got
1856 * RDMA completion and we cleared it and we should
1857 * simply decrement the response post. else the
1858 * response is incorporated in send_wr_num, just
1859 * sub it.
1860 **/
1861 if (wr->send_wr_num)
1862 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1863 else
1864 atomic_dec(&isert_conn->post_send_buf_count);
1630 1865
1631 cmd->i_state = ISTATE_SENT_STATUS; 1866 cmd->i_state = ISTATE_SENT_STATUS;
1632 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1867 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1633} 1868}
1634 1869
1635static void 1870static void
@@ -1658,8 +1893,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1658 isert_conn, ib_dev); 1893 isert_conn, ib_dev);
1659 break; 1894 break;
1660 case ISER_IB_RDMA_WRITE: 1895 case ISER_IB_RDMA_WRITE:
1661 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); 1896 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1662 dump_stack(); 1897 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1898 isert_completion_rdma_write(tx_desc, isert_cmd);
1663 break; 1899 break;
1664 case ISER_IB_RDMA_READ: 1900 case ISER_IB_RDMA_READ:
1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1901 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
1709 llnode = llist_next(llnode); 1945 llnode = llist_next(llnode);
1710 wr = &t->isert_cmd->rdma_wr; 1946 wr = &t->isert_cmd->rdma_wr;
1711 1947
1712 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1948 /**
1713 isert_completion_put(t, t->isert_cmd, ib_dev); 1949 * If send_wr_num is 0 this means that we got
1950 * RDMA completion and we cleared it and we should
1951 * simply decrement the response post. else the
1952 * response is incorporated in send_wr_num, just
1953 * sub it.
1954 **/
1955 if (wr->send_wr_num)
1956 atomic_sub(wr->send_wr_num,
1957 &isert_conn->post_send_buf_count);
1958 else
1959 atomic_dec(&isert_conn->post_send_buf_count);
1960
1961 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1714 } 1962 }
1715} 1963}
1716 1964
@@ -1728,15 +1976,27 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
1728 llnode = llist_next(llnode); 1976 llnode = llist_next(llnode);
1729 wr = &t->isert_cmd->rdma_wr; 1977 wr = &t->isert_cmd->rdma_wr;
1730 1978
1731 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1979 /**
1732 isert_completion_put(t, t->isert_cmd, ib_dev); 1980 * If send_wr_num is 0 this means that we got
1981 * RDMA completion and we cleared it and we should
1982 * simply decrement the response post. else the
1983 * response is incorporated in send_wr_num, just
1984 * sub it.
1985 **/
1986 if (wr->send_wr_num)
1987 atomic_sub(wr->send_wr_num,
1988 &isert_conn->post_send_buf_count);
1989 else
1990 atomic_dec(&isert_conn->post_send_buf_count);
1991
1992 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1733 } 1993 }
1734 tx_desc->comp_llnode_batch = NULL; 1994 tx_desc->comp_llnode_batch = NULL;
1735 1995
1736 if (!isert_cmd) 1996 if (!isert_cmd)
1737 isert_unmap_tx_desc(tx_desc, ib_dev); 1997 isert_unmap_tx_desc(tx_desc, ib_dev);
1738 else 1998 else
1739 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1999 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1740} 2000}
1741 2001
1742static void 2002static void
@@ -1918,6 +2178,36 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1918 return isert_post_response(isert_conn, isert_cmd); 2178 return isert_post_response(isert_conn, isert_cmd);
1919} 2179}
1920 2180
2181static void
2182isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2183{
2184 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2185 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2186 struct isert_device *device = isert_conn->conn_device;
2187
2188 spin_lock_bh(&conn->cmd_lock);
2189 if (!list_empty(&cmd->i_conn_node))
2190 list_del_init(&cmd->i_conn_node);
2191 spin_unlock_bh(&conn->cmd_lock);
2192
2193 if (cmd->data_direction == DMA_TO_DEVICE)
2194 iscsit_stop_dataout_timer(cmd);
2195
2196 device->unreg_rdma_mem(isert_cmd, isert_conn);
2197}
2198
2199static enum target_prot_op
2200isert_get_sup_prot_ops(struct iscsi_conn *conn)
2201{
2202 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2203 struct isert_device *device = isert_conn->conn_device;
2204
2205 if (device->pi_capable)
2206 return TARGET_PROT_ALL;
2207
2208 return TARGET_PROT_NORMAL;
2209}
2210
1921static int 2211static int
1922isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2212isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1923 bool nopout_response) 2213 bool nopout_response)
@@ -2099,54 +2389,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2099 struct se_cmd *se_cmd = &cmd->se_cmd; 2389 struct se_cmd *se_cmd = &cmd->se_cmd;
2100 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2390 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2101 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2391 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2102 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2392 struct isert_data_buf *data = &wr->data;
2103 struct ib_send_wr *send_wr; 2393 struct ib_send_wr *send_wr;
2104 struct ib_sge *ib_sge; 2394 struct ib_sge *ib_sge;
2105 struct scatterlist *sg_start; 2395 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2106 u32 sg_off = 0, sg_nents; 2396 int ret = 0, i, ib_sge_cnt;
2107 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2108 int ret = 0, count, i, ib_sge_cnt;
2109 2397
2110 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2398 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2111 data_left = se_cmd->data_length;
2112 } else {
2113 sg_off = cmd->write_data_done / PAGE_SIZE;
2114 data_left = se_cmd->data_length - cmd->write_data_done;
2115 offset = cmd->write_data_done;
2116 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2117 }
2118 2399
2119 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2400 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2120 sg_nents = se_cmd->t_data_nents - sg_off; 2401 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2402 se_cmd->t_data_nents, se_cmd->data_length,
2403 offset, wr->iser_ib_op, &wr->data);
2404 if (ret)
2405 return ret;
2121 2406
2122 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2407 data_left = data->len;
2123 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2408 offset = data->offset;
2124 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2125 if (unlikely(!count)) {
2126 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2127 return -EINVAL;
2128 }
2129 wr->sge = sg_start;
2130 wr->num_sge = sg_nents;
2131 wr->cur_rdma_length = data_left;
2132 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2133 isert_cmd, count, sg_start, sg_nents, data_left);
2134 2409
2135 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2410 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2136 if (!ib_sge) { 2411 if (!ib_sge) {
2137 pr_warn("Unable to allocate ib_sge\n"); 2412 pr_warn("Unable to allocate ib_sge\n");
2138 ret = -ENOMEM; 2413 ret = -ENOMEM;
2139 goto unmap_sg; 2414 goto unmap_cmd;
2140 } 2415 }
2141 wr->ib_sge = ib_sge; 2416 wr->ib_sge = ib_sge;
2142 2417
2143 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2418 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2144 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2419 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2145 GFP_KERNEL); 2420 GFP_KERNEL);
2146 if (!wr->send_wr) { 2421 if (!wr->send_wr) {
2147 pr_debug("Unable to allocate wr->send_wr\n"); 2422 pr_debug("Unable to allocate wr->send_wr\n");
2148 ret = -ENOMEM; 2423 ret = -ENOMEM;
2149 goto unmap_sg; 2424 goto unmap_cmd;
2150 } 2425 }
2151 2426
2152 wr->isert_cmd = isert_cmd; 2427 wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2185 } 2460 }
2186 2461
2187 return 0; 2462 return 0;
2188unmap_sg: 2463unmap_cmd:
2189 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, 2464 isert_unmap_data_buf(isert_conn, data);
2190 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2465
2191 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2192 return ret; 2466 return ret;
2193} 2467}
2194 2468
@@ -2232,49 +2506,70 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2232} 2506}
2233 2507
2234static int 2508static int
2235isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2509isert_fast_reg_mr(struct isert_conn *isert_conn,
2236 struct isert_conn *isert_conn, struct scatterlist *sg_start, 2510 struct fast_reg_descriptor *fr_desc,
2237 struct ib_sge *ib_sge, u32 sg_nents, u32 offset, 2511 struct isert_data_buf *mem,
2238 unsigned int data_len) 2512 enum isert_indicator ind,
2513 struct ib_sge *sge)
2239{ 2514{
2240 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2515 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2516 struct ib_mr *mr;
2517 struct ib_fast_reg_page_list *frpl;
2241 struct ib_send_wr fr_wr, inv_wr; 2518 struct ib_send_wr fr_wr, inv_wr;
2242 struct ib_send_wr *bad_wr, *wr = NULL; 2519 struct ib_send_wr *bad_wr, *wr = NULL;
2243 int ret, pagelist_len; 2520 int ret, pagelist_len;
2244 u32 page_off; 2521 u32 page_off;
2245 u8 key; 2522 u8 key;
2246 2523
2247 sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); 2524 if (mem->dma_nents == 1) {
2248 page_off = offset % PAGE_SIZE; 2525 sge->lkey = isert_conn->conn_mr->lkey;
2526 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2527 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2528 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2529 __func__, __LINE__, sge->addr, sge->length,
2530 sge->lkey);
2531 return 0;
2532 }
2533
2534 if (ind == ISERT_DATA_KEY_VALID) {
2535 /* Registering data buffer */
2536 mr = fr_desc->data_mr;
2537 frpl = fr_desc->data_frpl;
2538 } else {
2539 /* Registering protection buffer */
2540 mr = fr_desc->pi_ctx->prot_mr;
2541 frpl = fr_desc->pi_ctx->prot_frpl;
2542 }
2543
2544 page_off = mem->offset % PAGE_SIZE;
2249 2545
2250 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2546 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2251 fr_desc, sg_nents, offset); 2547 fr_desc, mem->nents, mem->offset);
2252 2548
2253 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2549 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2254 &fr_desc->data_frpl->page_list[0]); 2550 &frpl->page_list[0]);
2255 2551
2256 if (!fr_desc->valid) { 2552 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
2257 memset(&inv_wr, 0, sizeof(inv_wr)); 2553 memset(&inv_wr, 0, sizeof(inv_wr));
2258 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2554 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2259 inv_wr.opcode = IB_WR_LOCAL_INV; 2555 inv_wr.opcode = IB_WR_LOCAL_INV;
2260 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2556 inv_wr.ex.invalidate_rkey = mr->rkey;
2261 wr = &inv_wr; 2557 wr = &inv_wr;
2262 /* Bump the key */ 2558 /* Bump the key */
2263 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); 2559 key = (u8)(mr->rkey & 0x000000FF);
2264 ib_update_fast_reg_key(fr_desc->data_mr, ++key); 2560 ib_update_fast_reg_key(mr, ++key);
2265 } 2561 }
2266 2562
2267 /* Prepare FASTREG WR */ 2563 /* Prepare FASTREG WR */
2268 memset(&fr_wr, 0, sizeof(fr_wr)); 2564 memset(&fr_wr, 0, sizeof(fr_wr));
2269 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2565 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2270 fr_wr.opcode = IB_WR_FAST_REG_MR; 2566 fr_wr.opcode = IB_WR_FAST_REG_MR;
2271 fr_wr.wr.fast_reg.iova_start = 2567 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2272 fr_desc->data_frpl->page_list[0] + page_off; 2568 fr_wr.wr.fast_reg.page_list = frpl;
2273 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2274 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2569 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2275 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2570 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2276 fr_wr.wr.fast_reg.length = data_len; 2571 fr_wr.wr.fast_reg.length = mem->len;
2277 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; 2572 fr_wr.wr.fast_reg.rkey = mr->rkey;
2278 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2573 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2279 2574
2280 if (!wr) 2575 if (!wr)
@@ -2287,15 +2582,157 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2287 pr_err("fast registration failed, ret:%d\n", ret); 2582 pr_err("fast registration failed, ret:%d\n", ret);
2288 return ret; 2583 return ret;
2289 } 2584 }
2290 fr_desc->valid = false; 2585 fr_desc->ind &= ~ind;
2586
2587 sge->lkey = mr->lkey;
2588 sge->addr = frpl->page_list[0] + page_off;
2589 sge->length = mem->len;
2590
2591 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2592 __func__, __LINE__, sge->addr, sge->length,
2593 sge->lkey);
2594
2595 return ret;
2596}
2597
2598static inline enum ib_t10_dif_type
2599se2ib_prot_type(enum target_prot_type prot_type)
2600{
2601 switch (prot_type) {
2602 case TARGET_DIF_TYPE0_PROT:
2603 return IB_T10DIF_NONE;
2604 case TARGET_DIF_TYPE1_PROT:
2605 return IB_T10DIF_TYPE1;
2606 case TARGET_DIF_TYPE2_PROT:
2607 return IB_T10DIF_TYPE2;
2608 case TARGET_DIF_TYPE3_PROT:
2609 return IB_T10DIF_TYPE3;
2610 default:
2611 return IB_T10DIF_NONE;
2612 }
2613}
2291 2614
2292 ib_sge->lkey = fr_desc->data_mr->lkey; 2615static int
2293 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; 2616isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2294 ib_sge->length = data_len; 2617{
2618 enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
2619
2620 sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
2621 sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
2622 sig_attrs->mem.sig.dif.pi_interval =
2623 se_cmd->se_dev->dev_attrib.block_size;
2624 sig_attrs->wire.sig.dif.pi_interval =
2625 se_cmd->se_dev->dev_attrib.block_size;
2626
2627 switch (se_cmd->prot_op) {
2628 case TARGET_PROT_DIN_INSERT:
2629 case TARGET_PROT_DOUT_STRIP:
2630 sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
2631 sig_attrs->wire.sig.dif.type = ib_prot_type;
2632 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2633 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2634 break;
2635 case TARGET_PROT_DOUT_INSERT:
2636 case TARGET_PROT_DIN_STRIP:
2637 sig_attrs->mem.sig.dif.type = ib_prot_type;
2638 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2639 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2640 sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
2641 break;
2642 case TARGET_PROT_DIN_PASS:
2643 case TARGET_PROT_DOUT_PASS:
2644 sig_attrs->mem.sig.dif.type = ib_prot_type;
2645 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2646 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2647 sig_attrs->wire.sig.dif.type = ib_prot_type;
2648 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2649 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2650 break;
2651 default:
2652 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2653 return -EINVAL;
2654 }
2295 2655
2296 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2656 return 0;
2297 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2657}
2658
2659static inline u8
2660isert_set_prot_checks(u8 prot_checks)
2661{
2662 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2663 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2664 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2665}
2666
2667static int
2668isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2669 struct fast_reg_descriptor *fr_desc,
2670 struct ib_sge *data_sge, struct ib_sge *prot_sge,
2671 struct ib_sge *sig_sge)
2672{
2673 struct ib_send_wr sig_wr, inv_wr;
2674 struct ib_send_wr *bad_wr, *wr = NULL;
2675 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2676 struct ib_sig_attrs sig_attrs;
2677 int ret;
2678 u32 key;
2679
2680 memset(&sig_attrs, 0, sizeof(sig_attrs));
2681 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2682 if (ret)
2683 goto err;
2684
2685 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2686
2687 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2688 memset(&inv_wr, 0, sizeof(inv_wr));
2689 inv_wr.opcode = IB_WR_LOCAL_INV;
2690 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2691 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2692 wr = &inv_wr;
2693 /* Bump the key */
2694 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2695 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2696 }
2697
2698 memset(&sig_wr, 0, sizeof(sig_wr));
2699 sig_wr.opcode = IB_WR_REG_SIG_MR;
2700 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2701 sig_wr.sg_list = data_sge;
2702 sig_wr.num_sge = 1;
2703 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2704 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2705 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2706 if (se_cmd->t_prot_sg)
2707 sig_wr.wr.sig_handover.prot = prot_sge;
2708
2709 if (!wr)
2710 wr = &sig_wr;
2711 else
2712 wr->next = &sig_wr;
2713
2714 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2715 if (ret) {
2716 pr_err("fast registration failed, ret:%d\n", ret);
2717 goto err;
2718 }
2719 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2720
2721 sig_sge->lkey = pi_ctx->sig_mr->lkey;
2722 sig_sge->addr = 0;
2723 sig_sge->length = se_cmd->data_length;
2724 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2725 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2726 /*
2727 * We have protection guards on the wire
2728 * so we need to set a larget transfer
2729 */
2730 sig_sge->length += se_cmd->prot_length;
2298 2731
2732 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2733 sig_sge->addr, sig_sge->length,
2734 sig_sge->lkey);
2735err:
2299 return ret; 2736 return ret;
2300} 2737}
2301 2738
@@ -2305,62 +2742,82 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2305{ 2742{
2306 struct se_cmd *se_cmd = &cmd->se_cmd; 2743 struct se_cmd *se_cmd = &cmd->se_cmd;
2307 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2744 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2308 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2745 struct isert_conn *isert_conn = conn->context;
2309 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2746 struct ib_sge data_sge;
2310 struct ib_send_wr *send_wr; 2747 struct ib_send_wr *send_wr;
2311 struct ib_sge *ib_sge; 2748 struct fast_reg_descriptor *fr_desc = NULL;
2312 struct scatterlist *sg_start; 2749 u32 offset;
2313 struct fast_reg_descriptor *fr_desc; 2750 int ret = 0;
2314 u32 sg_off = 0, sg_nents;
2315 u32 offset = 0, data_len, data_left, rdma_write_max;
2316 int ret = 0, count;
2317 unsigned long flags; 2751 unsigned long flags;
2318 2752
2319 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2753 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2320 data_left = se_cmd->data_length;
2321 } else {
2322 offset = cmd->write_data_done;
2323 sg_off = offset / PAGE_SIZE;
2324 data_left = se_cmd->data_length - cmd->write_data_done;
2325 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2326 }
2327 2754
2328 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2755 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2329 sg_nents = se_cmd->t_data_nents - sg_off; 2756 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2757 se_cmd->t_data_nents, se_cmd->data_length,
2758 offset, wr->iser_ib_op, &wr->data);
2759 if (ret)
2760 return ret;
2330 2761
2331 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2762 if (wr->data.dma_nents != 1 ||
2332 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2763 se_cmd->prot_op != TARGET_PROT_NORMAL) {
2333 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2764 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2334 if (unlikely(!count)) { 2765 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2335 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); 2766 struct fast_reg_descriptor, list);
2336 return -EINVAL; 2767 list_del(&fr_desc->list);
2768 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2769 wr->fr_desc = fr_desc;
2337 } 2770 }
2338 wr->sge = sg_start;
2339 wr->num_sge = sg_nents;
2340 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2341 isert_cmd, count, sg_start, sg_nents, data_left);
2342 2771
2343 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); 2772 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2344 ib_sge = &wr->s_ib_sge; 2773 ISERT_DATA_KEY_VALID, &data_sge);
2345 wr->ib_sge = ib_sge; 2774 if (ret)
2775 goto unmap_cmd;
2776
2777 if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2778 struct ib_sge prot_sge, sig_sge;
2779
2780 if (se_cmd->t_prot_sg) {
2781 ret = isert_map_data_buf(isert_conn, isert_cmd,
2782 se_cmd->t_prot_sg,
2783 se_cmd->t_prot_nents,
2784 se_cmd->prot_length,
2785 0, wr->iser_ib_op, &wr->prot);
2786 if (ret)
2787 goto unmap_cmd;
2788
2789 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2790 ISERT_PROT_KEY_VALID, &prot_sge);
2791 if (ret)
2792 goto unmap_prot_cmd;
2793 }
2794
2795 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2796 &data_sge, &prot_sge, &sig_sge);
2797 if (ret)
2798 goto unmap_prot_cmd;
2346 2799
2800 fr_desc->ind |= ISERT_PROTECTED;
2801 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2802 } else
2803 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2804
2805 wr->ib_sge = &wr->s_ib_sge;
2347 wr->send_wr_num = 1; 2806 wr->send_wr_num = 1;
2348 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2807 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2349 wr->send_wr = &wr->s_send_wr; 2808 wr->send_wr = &wr->s_send_wr;
2350
2351 wr->isert_cmd = isert_cmd; 2809 wr->isert_cmd = isert_cmd;
2352 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2353 2810
2354 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2811 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2355 send_wr->sg_list = ib_sge; 2812 send_wr->sg_list = &wr->s_ib_sge;
2356 send_wr->num_sge = 1; 2813 send_wr->num_sge = 1;
2357 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2814 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2358 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2815 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2359 send_wr->opcode = IB_WR_RDMA_WRITE; 2816 send_wr->opcode = IB_WR_RDMA_WRITE;
2360 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2817 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2361 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2818 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2362 send_wr->send_flags = 0; 2819 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2363 send_wr->next = &isert_cmd->tx_desc.send_wr; 2820 0 : IB_SEND_SIGNALED;
2364 } else { 2821 } else {
2365 send_wr->opcode = IB_WR_RDMA_READ; 2822 send_wr->opcode = IB_WR_RDMA_READ;
2366 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2823 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2368 send_wr->send_flags = IB_SEND_SIGNALED; 2825 send_wr->send_flags = IB_SEND_SIGNALED;
2369 } 2826 }
2370 2827
2371 data_len = min(data_left, rdma_write_max); 2828 return 0;
2372 wr->cur_rdma_length = data_len; 2829unmap_prot_cmd:
2373 2830 if (se_cmd->t_prot_sg)
2374 /* if there is a single dma entry, dma mr is sufficient */ 2831 isert_unmap_data_buf(isert_conn, &wr->prot);
2375 if (count == 1) { 2832unmap_cmd:
2376 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); 2833 if (fr_desc) {
2377 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2378 ib_sge->lkey = isert_conn->conn_mr->lkey;
2379 wr->fr_desc = NULL;
2380 } else {
2381 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2834 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2382 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2835 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2383 struct fast_reg_descriptor, list);
2384 list_del(&fr_desc->list);
2385 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2836 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2386 wr->fr_desc = fr_desc;
2387
2388 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2389 ib_sge, sg_nents, offset, data_len);
2390 if (ret) {
2391 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2392 goto unmap_sg;
2393 }
2394 } 2837 }
2838 isert_unmap_data_buf(isert_conn, &wr->data);
2395 2839
2396 return 0;
2397
2398unmap_sg:
2399 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2400 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2401 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2402 return ret; 2840 return ret;
2403} 2841}
2404 2842
@@ -2422,25 +2860,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2422 return rc; 2860 return rc;
2423 } 2861 }
2424 2862
2425 /* 2863 if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2426 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2864 /*
2427 */ 2865 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2428 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2866 */
2429 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2867 isert_create_send_desc(isert_conn, isert_cmd,
2430 &isert_cmd->tx_desc.iscsi_header); 2868 &isert_cmd->tx_desc);
2431 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2869 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2432 isert_init_send_wr(isert_conn, isert_cmd, 2870 &isert_cmd->tx_desc.iscsi_header);
2433 &isert_cmd->tx_desc.send_wr, true); 2871 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2872 isert_init_send_wr(isert_conn, isert_cmd,
2873 &isert_cmd->tx_desc.send_wr, true);
2874 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2875 wr->send_wr_num += 1;
2876 }
2434 2877
2435 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2878 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2436 2879
2437 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2880 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2438 if (rc) { 2881 if (rc) {
2439 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2882 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2440 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2883 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2441 } 2884 }
2442 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2885
2443 isert_cmd); 2886 if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2887 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2888 "READ\n", isert_cmd);
2889 else
2890 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2891 isert_cmd);
2444 2892
2445 return 1; 2893 return 1;
2446} 2894}
@@ -2815,6 +3263,8 @@ static struct iscsit_transport iser_target_transport = {
2815 .iscsit_get_dataout = isert_get_dataout, 3263 .iscsit_get_dataout = isert_get_dataout,
2816 .iscsit_queue_data_in = isert_put_datain, 3264 .iscsit_queue_data_in = isert_put_datain,
2817 .iscsit_queue_status = isert_put_response, 3265 .iscsit_queue_status = isert_put_response,
3266 .iscsit_aborted_task = isert_aborted_task,
3267 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2818}; 3268};
2819 3269
2820static int __init isert_init(void) 3270static int __init isert_init(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index f6ae7f5dd408..4c072ae34c01 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,11 +50,35 @@ struct iser_tx_desc {
50 struct ib_send_wr send_wr; 50 struct ib_send_wr send_wr;
51} __packed; 51} __packed;
52 52
53enum isert_indicator {
54 ISERT_PROTECTED = 1 << 0,
55 ISERT_DATA_KEY_VALID = 1 << 1,
56 ISERT_PROT_KEY_VALID = 1 << 2,
57 ISERT_SIG_KEY_VALID = 1 << 3,
58};
59
60struct pi_context {
61 struct ib_mr *prot_mr;
62 struct ib_fast_reg_page_list *prot_frpl;
63 struct ib_mr *sig_mr;
64};
65
53struct fast_reg_descriptor { 66struct fast_reg_descriptor {
54 struct list_head list; 67 struct list_head list;
55 struct ib_mr *data_mr; 68 struct ib_mr *data_mr;
56 struct ib_fast_reg_page_list *data_frpl; 69 struct ib_fast_reg_page_list *data_frpl;
57 bool valid; 70 u8 ind;
71 struct pi_context *pi_ctx;
72};
73
74struct isert_data_buf {
75 struct scatterlist *sg;
76 int nents;
77 u32 sg_off;
78 u32 len; /* cur_rdma_length */
79 u32 offset;
80 unsigned int dma_nents;
81 enum dma_data_direction dma_dir;
58}; 82};
59 83
60struct isert_rdma_wr { 84struct isert_rdma_wr {
@@ -63,12 +87,11 @@ struct isert_rdma_wr {
63 enum iser_ib_op_code iser_ib_op; 87 enum iser_ib_op_code iser_ib_op;
64 struct ib_sge *ib_sge; 88 struct ib_sge *ib_sge;
65 struct ib_sge s_ib_sge; 89 struct ib_sge s_ib_sge;
66 int num_sge;
67 struct scatterlist *sge;
68 int send_wr_num; 90 int send_wr_num;
69 struct ib_send_wr *send_wr; 91 struct ib_send_wr *send_wr;
70 struct ib_send_wr s_send_wr; 92 struct ib_send_wr s_send_wr;
71 u32 cur_rdma_length; 93 struct isert_data_buf data;
94 struct isert_data_buf prot;
72 struct fast_reg_descriptor *fr_desc; 95 struct fast_reg_descriptor *fr_desc;
73}; 96};
74 97
@@ -141,6 +164,7 @@ struct isert_cq_desc {
141 164
142struct isert_device { 165struct isert_device {
143 int use_fastreg; 166 int use_fastreg;
167 bool pi_capable;
144 int cqs_used; 168 int cqs_used;
145 int refcount; 169 int refcount;
146 int cq_active_qps[ISERT_MAX_CQ]; 170 int cq_active_qps[ISERT_MAX_CQ];
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0e537d8d0e47..fe09f2788b15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1078static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1078static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1079 struct srpt_send_ioctx *ioctx) 1079 struct srpt_send_ioctx *ioctx)
1080{ 1080{
1081 struct ib_device *dev = ch->sport->sdev->device;
1081 struct se_cmd *cmd; 1082 struct se_cmd *cmd;
1082 struct scatterlist *sg, *sg_orig; 1083 struct scatterlist *sg, *sg_orig;
1083 int sg_cnt; 1084 int sg_cnt;
@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1124 1125
1125 db = ioctx->rbufs; 1126 db = ioctx->rbufs;
1126 tsize = cmd->data_length; 1127 tsize = cmd->data_length;
1127 dma_len = sg_dma_len(&sg[0]); 1128 dma_len = ib_sg_dma_len(dev, &sg[0]);
1128 riu = ioctx->rdma_ius; 1129 riu = ioctx->rdma_ius;
1129 1130
1130 /* 1131 /*
@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1155 ++j; 1156 ++j;
1156 if (j < count) { 1157 if (j < count) {
1157 sg = sg_next(sg); 1158 sg = sg_next(sg);
1158 dma_len = sg_dma_len(sg); 1159 dma_len = ib_sg_dma_len(
1160 dev, sg);
1159 } 1161 }
1160 } 1162 }
1161 } else { 1163 } else {
@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1192 tsize = cmd->data_length; 1194 tsize = cmd->data_length;
1193 riu = ioctx->rdma_ius; 1195 riu = ioctx->rdma_ius;
1194 sg = sg_orig; 1196 sg = sg_orig;
1195 dma_len = sg_dma_len(&sg[0]); 1197 dma_len = ib_sg_dma_len(dev, &sg[0]);
1196 dma_addr = sg_dma_address(&sg[0]); 1198 dma_addr = ib_sg_dma_address(dev, &sg[0]);
1197 1199
1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ 1200 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 for (i = 0, j = 0; 1201 for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1216 ++j; 1218 ++j;
1217 if (j < count) { 1219 if (j < count) {
1218 sg = sg_next(sg); 1220 sg = sg_next(sg);
1219 dma_len = sg_dma_len(sg); 1221 dma_len = ib_sg_dma_len(
1220 dma_addr = sg_dma_address(sg); 1222 dev, sg);
1223 dma_addr = ib_sg_dma_address(
1224 dev, sg);
1221 } 1225 }
1222 } 1226 }
1223 } else { 1227 } else {
@@ -2580,7 +2584,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2580 goto destroy_ib; 2584 goto destroy_ib;
2581 } 2585 }
2582 2586
2583 ch->sess = transport_init_session(); 2587 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2584 if (IS_ERR(ch->sess)) { 2588 if (IS_ERR(ch->sess)) {
2585 rej->reason = __constant_cpu_to_be32( 2589 rej->reason = __constant_cpu_to_be32(
2586 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2590 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
3081 srpt_queue_response(cmd); 3085 srpt_queue_response(cmd);
3082} 3086}
3083 3087
3088static void srpt_aborted_task(struct se_cmd *cmd)
3089{
3090 struct srpt_send_ioctx *ioctx = container_of(cmd,
3091 struct srpt_send_ioctx, cmd);
3092
3093 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
3094}
3095
3084static int srpt_queue_status(struct se_cmd *cmd) 3096static int srpt_queue_status(struct se_cmd *cmd)
3085{ 3097{
3086 struct srpt_send_ioctx *ioctx; 3098 struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@ static struct target_core_fabric_ops srpt_template = {
3928 .queue_data_in = srpt_queue_data_in, 3940 .queue_data_in = srpt_queue_data_in,
3929 .queue_status = srpt_queue_status, 3941 .queue_status = srpt_queue_status,
3930 .queue_tm_rsp = srpt_queue_tm_rsp, 3942 .queue_tm_rsp = srpt_queue_tm_rsp,
3943 .aborted_task = srpt_aborted_task,
3931 /* 3944 /*
3932 * Setup function pointers for generic logic in 3945 * Setup function pointers for generic logic in
3933 * target_core_fabric_configfs.c 3946 * target_core_fabric_configfs.c