aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-12 19:51:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-12 19:51:08 -0400
commit141eaccd018ef0476e94b180026d973db35460fd (patch)
treea1c8f5215bd4e5545dee6c56e8bb9b454c818b33 /drivers
parent93094449060ae00213ba30ad9eaa485b448fe94b (diff)
parentb076808051f2c80d38e03fb2f1294f525c7a446d (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "Here are the target pending updates for v3.15-rc1. Apologies in advance for waiting until the second to last day of the merge window to send these out. The highlights this round include: - iser-target support for T10 PI (DIF) offloads (Sagi + Or) - Fix Task Aborted Status (TAS) handling in target-core (Alex Leung) - Pass in transport supported PI at session initialization (Sagi + MKP + nab) - Add WRITE_INSERT + READ_STRIP T10 PI support in target-core (nab + Sagi) - Fix iscsi-target ERL=2 ASYNC_EVENT connection pointer bug (nab) - Fix tcm_fc use-after-free of ft_tpg (Andy Grover) - Use correct ib_sg_dma primitives in ib_isert (Mike Marciniszyn) Also, note the virtio-scsi + vhost-scsi changes to expose T10 PI metadata into KVM guest have been left-out for now, as there where a few comments from MST + Paolo that where not able to be addressed in time for v3.15. Please expect this feature for v3.16-rc1" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (43 commits) ib_srpt: Use correct ib_sg_dma primitives target/tcm_fc: Rename ft_tport_create to ft_tport_get target/tcm_fc: Rename ft_{add,del}_lport to {add,del}_wwn target/tcm_fc: Rename structs and list members for clarity target/tcm_fc: Limit to 1 TPG per wwn target/tcm_fc: Don't export ft_lport_list target/tcm_fc: Fix use-after-free of ft_tpg target: Add check to prevent Abort Task from aborting itself target: Enable READ_STRIP emulation in target_complete_ok_work target/sbc: Add sbc_dif_read_strip software emulation target: Enable WRITE_INSERT emulation in target_execute_cmd target/sbc: Add sbc_dif_generate software emulation target/sbc: Only expose PI read_cap16 bits when supported by fabric target/spc: Only expose PI mode page bits when supported by fabric target/spc: Only expose PI inquiry bits when supported by fabric target: Pass in transport supported PI at session initialization target/iblock: Fix double bioset_integrity_free bug Target/sbc: Initialize COMPARE_AND_WRITE write_sg scatterlist target/rd: T10-Dif: RAM disk is allocating more space than required. iscsi-target: Fix ERL=2 ASYNC_EVENT connection pointer bug ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c828
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h38
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c27
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c18
-rw-r--r--drivers/target/iscsi/iscsi_target.c33
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c12
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c95
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_file.c40
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_rd.c14
-rw-r--r--drivers/target/target_core_sbc.c178
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_tmr.c23
-rw-r--r--drivers/target/target_core_transport.c92
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h13
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c5
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c76
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c10
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c8
-rw-r--r--drivers/vhost/scsi.c9
29 files changed, 1213 insertions, 422 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8ee228e9ab5a..c98fdb185931 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int 51static int
52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 struct isert_rdma_wr *wr); 53 struct isert_rdma_wr *wr);
54static int
55isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
54 56
55static void 57static void
56isert_qp_event_callback(struct ib_event *e, void *context) 58isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
87} 89}
88 90
89static int 91static int
90isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) 92isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
93 u8 protection)
91{ 94{
92 struct isert_device *device = isert_conn->conn_device; 95 struct isert_device *device = isert_conn->conn_device;
93 struct ib_qp_init_attr attr; 96 struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
119 attr.cap.max_recv_sge = 1; 122 attr.cap.max_recv_sge = 1;
120 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 123 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121 attr.qp_type = IB_QPT_RC; 124 attr.qp_type = IB_QPT_RC;
125 if (protection)
126 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
122 127
123 pr_debug("isert_conn_setup_qp cma_id->device: %p\n", 128 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124 cma_id->device); 129 cma_id->device);
@@ -226,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device)
226 return ret; 231 return ret;
227 232
228 /* asign function handlers */ 233 /* asign function handlers */
229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 234 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
235 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
230 device->use_fastreg = 1; 236 device->use_fastreg = 1;
231 device->reg_rdma_mem = isert_reg_rdma; 237 device->reg_rdma_mem = isert_reg_rdma;
232 device->unreg_rdma_mem = isert_unreg_rdma; 238 device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@ isert_create_device_ib_res(struct isert_device *device)
236 device->unreg_rdma_mem = isert_unmap_cmd; 242 device->unreg_rdma_mem = isert_unmap_cmd;
237 } 243 }
238 244
245 /* Check signature cap */
246 device->pi_capable = dev_attr->device_cap_flags &
247 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
248
239 device->cqs_used = min_t(int, num_online_cpus(), 249 device->cqs_used = min_t(int, num_online_cpus(),
240 device->ib_device->num_comp_vectors); 250 device->ib_device->num_comp_vectors);
241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 251 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242 pr_debug("Using %d CQs, device %s supports %d vectors support " 252 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n", 253 "Fast registration %d pi_capable %d\n",
244 device->cqs_used, device->ib_device->name, 254 device->cqs_used, device->ib_device->name,
245 device->ib_device->num_comp_vectors, device->use_fastreg); 255 device->ib_device->num_comp_vectors, device->use_fastreg,
256 device->pi_capable);
246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 257 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
247 device->cqs_used, GFP_KERNEL); 258 device->cqs_used, GFP_KERNEL);
248 if (!device->cq_desc) { 259 if (!device->cq_desc) {
@@ -395,6 +406,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
395 list_del(&fr_desc->list); 406 list_del(&fr_desc->list);
396 ib_free_fast_reg_page_list(fr_desc->data_frpl); 407 ib_free_fast_reg_page_list(fr_desc->data_frpl);
397 ib_dereg_mr(fr_desc->data_mr); 408 ib_dereg_mr(fr_desc->data_mr);
409 if (fr_desc->pi_ctx) {
410 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
411 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
412 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
413 kfree(fr_desc->pi_ctx);
414 }
398 kfree(fr_desc); 415 kfree(fr_desc);
399 ++i; 416 ++i;
400 } 417 }
@@ -406,8 +423,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
406 423
407static int 424static int
408isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 425isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409 struct fast_reg_descriptor *fr_desc) 426 struct fast_reg_descriptor *fr_desc, u8 protection)
410{ 427{
428 int ret;
429
411 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 430 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412 ISCSI_ISER_SG_TABLESIZE); 431 ISCSI_ISER_SG_TABLESIZE);
413 if (IS_ERR(fr_desc->data_frpl)) { 432 if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
420 if (IS_ERR(fr_desc->data_mr)) { 439 if (IS_ERR(fr_desc->data_mr)) {
421 pr_err("Failed to allocate data frmr err=%ld\n", 440 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc->data_mr)); 441 PTR_ERR(fr_desc->data_mr));
423 ib_free_fast_reg_page_list(fr_desc->data_frpl); 442 ret = PTR_ERR(fr_desc->data_mr);
424 return PTR_ERR(fr_desc->data_mr); 443 goto err_data_frpl;
425 } 444 }
426 pr_debug("Create fr_desc %p page_list %p\n", 445 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc, fr_desc->data_frpl->page_list); 446 fr_desc, fr_desc->data_frpl->page_list);
447 fr_desc->ind |= ISERT_DATA_KEY_VALID;
448
449 if (protection) {
450 struct ib_mr_init_attr mr_init_attr = {0};
451 struct pi_context *pi_ctx;
452
453 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
454 if (!fr_desc->pi_ctx) {
455 pr_err("Failed to allocate pi context\n");
456 ret = -ENOMEM;
457 goto err_data_mr;
458 }
459 pi_ctx = fr_desc->pi_ctx;
460
461 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
462 ISCSI_ISER_SG_TABLESIZE);
463 if (IS_ERR(pi_ctx->prot_frpl)) {
464 pr_err("Failed to allocate prot frpl err=%ld\n",
465 PTR_ERR(pi_ctx->prot_frpl));
466 ret = PTR_ERR(pi_ctx->prot_frpl);
467 goto err_pi_ctx;
468 }
428 469
429 fr_desc->valid = true; 470 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_mr)) {
472 pr_err("Failed to allocate prot frmr err=%ld\n",
473 PTR_ERR(pi_ctx->prot_mr));
474 ret = PTR_ERR(pi_ctx->prot_mr);
475 goto err_prot_frpl;
476 }
477 fr_desc->ind |= ISERT_PROT_KEY_VALID;
478
479 mr_init_attr.max_reg_descriptors = 2;
480 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
481 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
482 if (IS_ERR(pi_ctx->sig_mr)) {
483 pr_err("Failed to allocate signature enabled mr err=%ld\n",
484 PTR_ERR(pi_ctx->sig_mr));
485 ret = PTR_ERR(pi_ctx->sig_mr);
486 goto err_prot_mr;
487 }
488 fr_desc->ind |= ISERT_SIG_KEY_VALID;
489 }
490 fr_desc->ind &= ~ISERT_PROTECTED;
430 491
431 return 0; 492 return 0;
493err_prot_mr:
494 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
495err_prot_frpl:
496 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
497err_pi_ctx:
498 kfree(fr_desc->pi_ctx);
499err_data_mr:
500 ib_dereg_mr(fr_desc->data_mr);
501err_data_frpl:
502 ib_free_fast_reg_page_list(fr_desc->data_frpl);
503
504 return ret;
432} 505}
433 506
434static int 507static int
435isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) 508isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
436{ 509{
437 struct fast_reg_descriptor *fr_desc; 510 struct fast_reg_descriptor *fr_desc;
438 struct isert_device *device = isert_conn->conn_device; 511 struct isert_device *device = isert_conn->conn_device;
439 int i, ret; 512 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
513 struct se_node_acl *se_nacl = se_sess->se_node_acl;
514 int i, ret, tag_num;
515 /*
516 * Setup the number of FRMRs based upon the number of tags
517 * available to session in iscsi_target_locate_portal().
518 */
519 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
520 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
440 521
441 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
442 isert_conn->conn_fr_pool_size = 0; 522 isert_conn->conn_fr_pool_size = 0;
443 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 523 for (i = 0; i < tag_num; i++) {
444 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 524 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
445 if (!fr_desc) { 525 if (!fr_desc) {
446 pr_err("Failed to allocate fast_reg descriptor\n"); 526 pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
449 } 529 }
450 530
451 ret = isert_create_fr_desc(device->ib_device, 531 ret = isert_create_fr_desc(device->ib_device,
452 isert_conn->conn_pd, fr_desc); 532 isert_conn->conn_pd, fr_desc,
533 pi_support);
453 if (ret) { 534 if (ret) {
454 pr_err("Failed to create fastreg descriptor err=%d\n", 535 pr_err("Failed to create fastreg descriptor err=%d\n",
455 ret); 536 ret);
@@ -480,6 +561,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
480 struct isert_device *device; 561 struct isert_device *device;
481 struct ib_device *ib_dev = cma_id->device; 562 struct ib_device *ib_dev = cma_id->device;
482 int ret = 0; 563 int ret = 0;
564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
483 565
484 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
485 cma_id, cma_id->context); 567 cma_id, cma_id->context);
@@ -498,6 +580,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
498 kref_get(&isert_conn->conn_kref); 580 kref_get(&isert_conn->conn_kref);
499 mutex_init(&isert_conn->conn_mutex); 581 mutex_init(&isert_conn->conn_mutex);
500 spin_lock_init(&isert_conn->conn_lock); 582 spin_lock_init(&isert_conn->conn_lock);
583 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
501 584
502 cma_id->context = isert_conn; 585 cma_id->context = isert_conn;
503 isert_conn->conn_cm_id = cma_id; 586 isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
569 goto out_mr; 652 goto out_mr;
570 } 653 }
571 654
572 if (device->use_fastreg) { 655 if (pi_support && !device->pi_capable) {
573 ret = isert_conn_create_fastreg_pool(isert_conn); 656 pr_err("Protection information requested but not supported\n");
574 if (ret) { 657 ret = -EINVAL;
575 pr_err("Conn: %p failed to create fastreg pool\n", 658 goto out_mr;
576 isert_conn);
577 goto out_fastreg;
578 }
579 } 659 }
580 660
581 ret = isert_conn_setup_qp(isert_conn, cma_id); 661 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
582 if (ret) 662 if (ret)
583 goto out_conn_dev; 663 goto out_conn_dev;
584 664
@@ -591,9 +671,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
591 return 0; 671 return 0;
592 672
593out_conn_dev: 673out_conn_dev:
594 if (device->use_fastreg)
595 isert_conn_free_fastreg_pool(isert_conn);
596out_fastreg:
597 ib_dereg_mr(isert_conn->conn_mr); 674 ib_dereg_mr(isert_conn->conn_mr);
598out_mr: 675out_mr:
599 ib_dealloc_pd(isert_conn->conn_pd); 676 ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
967 } 1044 }
968 if (!login->login_failed) { 1045 if (!login->login_failed) {
969 if (login->login_complete) { 1046 if (login->login_complete) {
1047 if (isert_conn->conn_device->use_fastreg) {
1048 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1049
1050 ret = isert_conn_create_fastreg_pool(isert_conn,
1051 pi_support);
1052 if (ret) {
1053 pr_err("Conn: %p failed to create"
1054 " fastreg pool\n", isert_conn);
1055 return ret;
1056 }
1057 }
1058
970 ret = isert_alloc_rx_descriptors(isert_conn); 1059 ret = isert_alloc_rx_descriptors(isert_conn);
971 if (ret) 1060 if (ret)
972 return ret; 1061 return ret;
@@ -1392,19 +1481,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1392 } 1481 }
1393} 1482}
1394 1483
1484static int
1485isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1486 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1487 enum iser_ib_op_code op, struct isert_data_buf *data)
1488{
1489 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1490
1491 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1492 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1493
1494 data->len = length - offset;
1495 data->offset = offset;
1496 data->sg_off = data->offset / PAGE_SIZE;
1497
1498 data->sg = &sg[data->sg_off];
1499 data->nents = min_t(unsigned int, nents - data->sg_off,
1500 ISCSI_ISER_SG_TABLESIZE);
1501 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1502 PAGE_SIZE);
1503
1504 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1505 data->dma_dir);
1506 if (unlikely(!data->dma_nents)) {
1507 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1508 return -EINVAL;
1509 }
1510
1511 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1512 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1513
1514 return 0;
1515}
1516
1517static void
1518isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1519{
1520 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1521
1522 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1523 memset(data, 0, sizeof(*data));
1524}
1525
1526
1527
1395static void 1528static void
1396isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1529isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1397{ 1530{
1398 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1531 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1399 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1400 1532
1401 pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1533 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1402 if (wr->sge) { 1534
1535 if (wr->data.sg) {
1403 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1536 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1404 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1537 isert_unmap_data_buf(isert_conn, &wr->data);
1405 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1406 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1407 wr->sge = NULL;
1408 } 1538 }
1409 1539
1410 if (wr->send_wr) { 1540 if (wr->send_wr) {
@@ -1424,7 +1554,6 @@ static void
1424isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1554isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1425{ 1555{
1426 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1556 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1427 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1428 LIST_HEAD(unmap_list); 1557 LIST_HEAD(unmap_list);
1429 1558
1430 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1559 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1432 if (wr->fr_desc) { 1561 if (wr->fr_desc) {
1433 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1562 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1434 isert_cmd, wr->fr_desc); 1563 isert_cmd, wr->fr_desc);
1564 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1565 isert_unmap_data_buf(isert_conn, &wr->prot);
1566 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1567 }
1435 spin_lock_bh(&isert_conn->conn_lock); 1568 spin_lock_bh(&isert_conn->conn_lock);
1436 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); 1569 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1437 spin_unlock_bh(&isert_conn->conn_lock); 1570 spin_unlock_bh(&isert_conn->conn_lock);
1438 wr->fr_desc = NULL; 1571 wr->fr_desc = NULL;
1439 } 1572 }
1440 1573
1441 if (wr->sge) { 1574 if (wr->data.sg) {
1442 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1575 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1443 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1576 isert_unmap_data_buf(isert_conn, &wr->data);
1444 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1445 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1446 wr->sge = NULL;
1447 } 1577 }
1448 1578
1449 wr->ib_sge = NULL; 1579 wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1451} 1581}
1452 1582
1453static void 1583static void
1454isert_put_cmd(struct isert_cmd *isert_cmd) 1584isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1455{ 1585{
1456 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1586 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1457 struct isert_conn *isert_conn = isert_cmd->conn; 1587 struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1467 list_del_init(&cmd->i_conn_node); 1597 list_del_init(&cmd->i_conn_node);
1468 spin_unlock_bh(&conn->cmd_lock); 1598 spin_unlock_bh(&conn->cmd_lock);
1469 1599
1470 if (cmd->data_direction == DMA_TO_DEVICE) 1600 if (cmd->data_direction == DMA_TO_DEVICE) {
1471 iscsit_stop_dataout_timer(cmd); 1601 iscsit_stop_dataout_timer(cmd);
1602 /*
1603 * Check for special case during comp_err where
1604 * WRITE_PENDING has been handed off from core,
1605 * but requires an extra target_put_sess_cmd()
1606 * before transport_generic_free_cmd() below.
1607 */
1608 if (comp_err &&
1609 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1610 struct se_cmd *se_cmd = &cmd->se_cmd;
1611
1612 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1613 }
1614 }
1472 1615
1473 device->unreg_rdma_mem(isert_cmd, isert_conn); 1616 device->unreg_rdma_mem(isert_cmd, isert_conn);
1474 transport_generic_free_cmd(&cmd->se_cmd, 0); 1617 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1523 1666
1524static void 1667static void
1525isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, 1668isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1526 struct ib_device *ib_dev) 1669 struct ib_device *ib_dev, bool comp_err)
1527{ 1670{
1528 if (isert_cmd->pdu_buf_dma != 0) { 1671 if (isert_cmd->pdu_buf_dma != 0) {
1529 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); 1672 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1533 } 1676 }
1534 1677
1535 isert_unmap_tx_desc(tx_desc, ib_dev); 1678 isert_unmap_tx_desc(tx_desc, ib_dev);
1536 isert_put_cmd(isert_cmd); 1679 isert_put_cmd(isert_cmd, comp_err);
1680}
1681
1682static int
1683isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1684{
1685 struct ib_mr_status mr_status;
1686 int ret;
1687
1688 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1689 if (ret) {
1690 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1691 goto fail_mr_status;
1692 }
1693
1694 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1695 u64 sec_offset_err;
1696 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1697
1698 switch (mr_status.sig_err.err_type) {
1699 case IB_SIG_BAD_GUARD:
1700 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1701 break;
1702 case IB_SIG_BAD_REFTAG:
1703 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1704 break;
1705 case IB_SIG_BAD_APPTAG:
1706 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1707 break;
1708 }
1709 sec_offset_err = mr_status.sig_err.sig_err_offset;
1710 do_div(sec_offset_err, block_size);
1711 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1712
1713 pr_err("isert: PI error found type %d at sector 0x%llx "
1714 "expected 0x%x vs actual 0x%x\n",
1715 mr_status.sig_err.err_type,
1716 (unsigned long long)se_cmd->bad_sector,
1717 mr_status.sig_err.expected,
1718 mr_status.sig_err.actual);
1719 ret = 1;
1720 }
1721
1722fail_mr_status:
1723 return ret;
1724}
1725
1726static void
1727isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1728 struct isert_cmd *isert_cmd)
1729{
1730 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1731 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1732 struct se_cmd *se_cmd = &cmd->se_cmd;
1733 struct isert_conn *isert_conn = isert_cmd->conn;
1734 struct isert_device *device = isert_conn->conn_device;
1735 int ret = 0;
1736
1737 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1738 ret = isert_check_pi_status(se_cmd,
1739 wr->fr_desc->pi_ctx->sig_mr);
1740 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1741 }
1742
1743 device->unreg_rdma_mem(isert_cmd, isert_conn);
1744 wr->send_wr_num = 0;
1745 if (ret)
1746 transport_send_check_condition_and_sense(se_cmd,
1747 se_cmd->pi_err, 0);
1748 else
1749 isert_put_response(isert_conn->conn, cmd);
1537} 1750}
1538 1751
1539static void 1752static void
@@ -1545,10 +1758,17 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1545 struct se_cmd *se_cmd = &cmd->se_cmd; 1758 struct se_cmd *se_cmd = &cmd->se_cmd;
1546 struct isert_conn *isert_conn = isert_cmd->conn; 1759 struct isert_conn *isert_conn = isert_cmd->conn;
1547 struct isert_device *device = isert_conn->conn_device; 1760 struct isert_device *device = isert_conn->conn_device;
1761 int ret = 0;
1762
1763 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1764 ret = isert_check_pi_status(se_cmd,
1765 wr->fr_desc->pi_ctx->sig_mr);
1766 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1767 }
1548 1768
1549 iscsit_stop_dataout_timer(cmd); 1769 iscsit_stop_dataout_timer(cmd);
1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1770 device->unreg_rdma_mem(isert_cmd, isert_conn);
1551 cmd->write_data_done = wr->cur_rdma_length; 1771 cmd->write_data_done = wr->data.len;
1552 wr->send_wr_num = 0; 1772 wr->send_wr_num = 0;
1553 1773
1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1774 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1557 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1777 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1558 spin_unlock_bh(&cmd->istate_lock); 1778 spin_unlock_bh(&cmd->istate_lock);
1559 1779
1560 target_execute_cmd(se_cmd); 1780 if (ret)
1781 transport_send_check_condition_and_sense(se_cmd,
1782 se_cmd->pi_err, 0);
1783 else
1784 target_execute_cmd(se_cmd);
1561} 1785}
1562 1786
1563static void 1787static void
@@ -1577,14 +1801,14 @@ isert_do_control_comp(struct work_struct *work)
1577 iscsit_tmr_post_handler(cmd, cmd->conn); 1801 iscsit_tmr_post_handler(cmd, cmd->conn);
1578 1802
1579 cmd->i_state = ISTATE_SENT_STATUS; 1803 cmd->i_state = ISTATE_SENT_STATUS;
1580 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1804 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1581 break; 1805 break;
1582 case ISTATE_SEND_REJECT: 1806 case ISTATE_SEND_REJECT:
1583 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); 1807 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1584 atomic_dec(&isert_conn->post_send_buf_count); 1808 atomic_dec(&isert_conn->post_send_buf_count);
1585 1809
1586 cmd->i_state = ISTATE_SENT_STATUS; 1810 cmd->i_state = ISTATE_SENT_STATUS;
1587 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1811 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1588 break; 1812 break;
1589 case ISTATE_SEND_LOGOUTRSP: 1813 case ISTATE_SEND_LOGOUTRSP:
1590 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1814 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@ isert_do_control_comp(struct work_struct *work)
1598 case ISTATE_SEND_TEXTRSP: 1822 case ISTATE_SEND_TEXTRSP:
1599 atomic_dec(&isert_conn->post_send_buf_count); 1823 atomic_dec(&isert_conn->post_send_buf_count);
1600 cmd->i_state = ISTATE_SENT_STATUS; 1824 cmd->i_state = ISTATE_SENT_STATUS;
1601 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); 1825 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1602 break; 1826 break;
1603 default: 1827 default:
1604 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1828 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1626 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1850 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1627 return; 1851 return;
1628 } 1852 }
1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1853
1854 /**
1855 * If send_wr_num is 0 this means that we got
1856 * RDMA completion and we cleared it and we should
1857 * simply decrement the response post. else the
1858 * response is incorporated in send_wr_num, just
1859 * sub it.
1860 **/
1861 if (wr->send_wr_num)
1862 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1863 else
1864 atomic_dec(&isert_conn->post_send_buf_count);
1630 1865
1631 cmd->i_state = ISTATE_SENT_STATUS; 1866 cmd->i_state = ISTATE_SENT_STATUS;
1632 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1867 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1633} 1868}
1634 1869
1635static void 1870static void
@@ -1658,8 +1893,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1658 isert_conn, ib_dev); 1893 isert_conn, ib_dev);
1659 break; 1894 break;
1660 case ISER_IB_RDMA_WRITE: 1895 case ISER_IB_RDMA_WRITE:
1661 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); 1896 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1662 dump_stack(); 1897 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1898 isert_completion_rdma_write(tx_desc, isert_cmd);
1663 break; 1899 break;
1664 case ISER_IB_RDMA_READ: 1900 case ISER_IB_RDMA_READ:
1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1901 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
1709 llnode = llist_next(llnode); 1945 llnode = llist_next(llnode);
1710 wr = &t->isert_cmd->rdma_wr; 1946 wr = &t->isert_cmd->rdma_wr;
1711 1947
1712 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1948 /**
1713 isert_completion_put(t, t->isert_cmd, ib_dev); 1949 * If send_wr_num is 0 this means that we got
1950 * RDMA completion and we cleared it and we should
1951 * simply decrement the response post. else the
1952 * response is incorporated in send_wr_num, just
1953 * sub it.
1954 **/
1955 if (wr->send_wr_num)
1956 atomic_sub(wr->send_wr_num,
1957 &isert_conn->post_send_buf_count);
1958 else
1959 atomic_dec(&isert_conn->post_send_buf_count);
1960
1961 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1714 } 1962 }
1715} 1963}
1716 1964
@@ -1728,15 +1976,27 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
1728 llnode = llist_next(llnode); 1976 llnode = llist_next(llnode);
1729 wr = &t->isert_cmd->rdma_wr; 1977 wr = &t->isert_cmd->rdma_wr;
1730 1978
1731 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1979 /**
1732 isert_completion_put(t, t->isert_cmd, ib_dev); 1980 * If send_wr_num is 0 this means that we got
1981 * RDMA completion and we cleared it and we should
1982 * simply decrement the response post. else the
1983 * response is incorporated in send_wr_num, just
1984 * sub it.
1985 **/
1986 if (wr->send_wr_num)
1987 atomic_sub(wr->send_wr_num,
1988 &isert_conn->post_send_buf_count);
1989 else
1990 atomic_dec(&isert_conn->post_send_buf_count);
1991
1992 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1733 } 1993 }
1734 tx_desc->comp_llnode_batch = NULL; 1994 tx_desc->comp_llnode_batch = NULL;
1735 1995
1736 if (!isert_cmd) 1996 if (!isert_cmd)
1737 isert_unmap_tx_desc(tx_desc, ib_dev); 1997 isert_unmap_tx_desc(tx_desc, ib_dev);
1738 else 1998 else
1739 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1999 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1740} 2000}
1741 2001
1742static void 2002static void
@@ -1918,6 +2178,36 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1918 return isert_post_response(isert_conn, isert_cmd); 2178 return isert_post_response(isert_conn, isert_cmd);
1919} 2179}
1920 2180
2181static void
2182isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2183{
2184 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2185 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2186 struct isert_device *device = isert_conn->conn_device;
2187
2188 spin_lock_bh(&conn->cmd_lock);
2189 if (!list_empty(&cmd->i_conn_node))
2190 list_del_init(&cmd->i_conn_node);
2191 spin_unlock_bh(&conn->cmd_lock);
2192
2193 if (cmd->data_direction == DMA_TO_DEVICE)
2194 iscsit_stop_dataout_timer(cmd);
2195
2196 device->unreg_rdma_mem(isert_cmd, isert_conn);
2197}
2198
2199static enum target_prot_op
2200isert_get_sup_prot_ops(struct iscsi_conn *conn)
2201{
2202 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2203 struct isert_device *device = isert_conn->conn_device;
2204
2205 if (device->pi_capable)
2206 return TARGET_PROT_ALL;
2207
2208 return TARGET_PROT_NORMAL;
2209}
2210
1921static int 2211static int
1922isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2212isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1923 bool nopout_response) 2213 bool nopout_response)
@@ -2099,54 +2389,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2099 struct se_cmd *se_cmd = &cmd->se_cmd; 2389 struct se_cmd *se_cmd = &cmd->se_cmd;
2100 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2390 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2101 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2391 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2102 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2392 struct isert_data_buf *data = &wr->data;
2103 struct ib_send_wr *send_wr; 2393 struct ib_send_wr *send_wr;
2104 struct ib_sge *ib_sge; 2394 struct ib_sge *ib_sge;
2105 struct scatterlist *sg_start; 2395 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2106 u32 sg_off = 0, sg_nents; 2396 int ret = 0, i, ib_sge_cnt;
2107 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2108 int ret = 0, count, i, ib_sge_cnt;
2109 2397
2110 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2398 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2111 data_left = se_cmd->data_length;
2112 } else {
2113 sg_off = cmd->write_data_done / PAGE_SIZE;
2114 data_left = se_cmd->data_length - cmd->write_data_done;
2115 offset = cmd->write_data_done;
2116 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2117 }
2118 2399
2119 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2400 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2120 sg_nents = se_cmd->t_data_nents - sg_off; 2401 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2402 se_cmd->t_data_nents, se_cmd->data_length,
2403 offset, wr->iser_ib_op, &wr->data);
2404 if (ret)
2405 return ret;
2121 2406
2122 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2407 data_left = data->len;
2123 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2408 offset = data->offset;
2124 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2125 if (unlikely(!count)) {
2126 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2127 return -EINVAL;
2128 }
2129 wr->sge = sg_start;
2130 wr->num_sge = sg_nents;
2131 wr->cur_rdma_length = data_left;
2132 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2133 isert_cmd, count, sg_start, sg_nents, data_left);
2134 2409
2135 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 2410 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2136 if (!ib_sge) { 2411 if (!ib_sge) {
2137 pr_warn("Unable to allocate ib_sge\n"); 2412 pr_warn("Unable to allocate ib_sge\n");
2138 ret = -ENOMEM; 2413 ret = -ENOMEM;
2139 goto unmap_sg; 2414 goto unmap_cmd;
2140 } 2415 }
2141 wr->ib_sge = ib_sge; 2416 wr->ib_sge = ib_sge;
2142 2417
2143 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 2418 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2144 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2419 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2145 GFP_KERNEL); 2420 GFP_KERNEL);
2146 if (!wr->send_wr) { 2421 if (!wr->send_wr) {
2147 pr_debug("Unable to allocate wr->send_wr\n"); 2422 pr_debug("Unable to allocate wr->send_wr\n");
2148 ret = -ENOMEM; 2423 ret = -ENOMEM;
2149 goto unmap_sg; 2424 goto unmap_cmd;
2150 } 2425 }
2151 2426
2152 wr->isert_cmd = isert_cmd; 2427 wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2185 } 2460 }
2186 2461
2187 return 0; 2462 return 0;
2188unmap_sg: 2463unmap_cmd:
2189 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, 2464 isert_unmap_data_buf(isert_conn, data);
2190 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2465
2191 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2192 return ret; 2466 return ret;
2193} 2467}
2194 2468
@@ -2232,49 +2506,70 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2232} 2506}
2233 2507
2234static int 2508static int
2235isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2509isert_fast_reg_mr(struct isert_conn *isert_conn,
2236 struct isert_conn *isert_conn, struct scatterlist *sg_start, 2510 struct fast_reg_descriptor *fr_desc,
2237 struct ib_sge *ib_sge, u32 sg_nents, u32 offset, 2511 struct isert_data_buf *mem,
2238 unsigned int data_len) 2512 enum isert_indicator ind,
2513 struct ib_sge *sge)
2239{ 2514{
2240 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2515 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2516 struct ib_mr *mr;
2517 struct ib_fast_reg_page_list *frpl;
2241 struct ib_send_wr fr_wr, inv_wr; 2518 struct ib_send_wr fr_wr, inv_wr;
2242 struct ib_send_wr *bad_wr, *wr = NULL; 2519 struct ib_send_wr *bad_wr, *wr = NULL;
2243 int ret, pagelist_len; 2520 int ret, pagelist_len;
2244 u32 page_off; 2521 u32 page_off;
2245 u8 key; 2522 u8 key;
2246 2523
2247 sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); 2524 if (mem->dma_nents == 1) {
2248 page_off = offset % PAGE_SIZE; 2525 sge->lkey = isert_conn->conn_mr->lkey;
2526 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2527 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2528 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2529 __func__, __LINE__, sge->addr, sge->length,
2530 sge->lkey);
2531 return 0;
2532 }
2533
2534 if (ind == ISERT_DATA_KEY_VALID) {
2535 /* Registering data buffer */
2536 mr = fr_desc->data_mr;
2537 frpl = fr_desc->data_frpl;
2538 } else {
2539 /* Registering protection buffer */
2540 mr = fr_desc->pi_ctx->prot_mr;
2541 frpl = fr_desc->pi_ctx->prot_frpl;
2542 }
2543
2544 page_off = mem->offset % PAGE_SIZE;
2249 2545
2250 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2546 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2251 fr_desc, sg_nents, offset); 2547 fr_desc, mem->nents, mem->offset);
2252 2548
2253 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2549 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2254 &fr_desc->data_frpl->page_list[0]); 2550 &frpl->page_list[0]);
2255 2551
2256 if (!fr_desc->valid) { 2552 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
2257 memset(&inv_wr, 0, sizeof(inv_wr)); 2553 memset(&inv_wr, 0, sizeof(inv_wr));
2258 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2554 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2259 inv_wr.opcode = IB_WR_LOCAL_INV; 2555 inv_wr.opcode = IB_WR_LOCAL_INV;
2260 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2556 inv_wr.ex.invalidate_rkey = mr->rkey;
2261 wr = &inv_wr; 2557 wr = &inv_wr;
2262 /* Bump the key */ 2558 /* Bump the key */
2263 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); 2559 key = (u8)(mr->rkey & 0x000000FF);
2264 ib_update_fast_reg_key(fr_desc->data_mr, ++key); 2560 ib_update_fast_reg_key(mr, ++key);
2265 } 2561 }
2266 2562
2267 /* Prepare FASTREG WR */ 2563 /* Prepare FASTREG WR */
2268 memset(&fr_wr, 0, sizeof(fr_wr)); 2564 memset(&fr_wr, 0, sizeof(fr_wr));
2269 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2565 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2270 fr_wr.opcode = IB_WR_FAST_REG_MR; 2566 fr_wr.opcode = IB_WR_FAST_REG_MR;
2271 fr_wr.wr.fast_reg.iova_start = 2567 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2272 fr_desc->data_frpl->page_list[0] + page_off; 2568 fr_wr.wr.fast_reg.page_list = frpl;
2273 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2274 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2569 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2275 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2570 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2276 fr_wr.wr.fast_reg.length = data_len; 2571 fr_wr.wr.fast_reg.length = mem->len;
2277 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; 2572 fr_wr.wr.fast_reg.rkey = mr->rkey;
2278 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2573 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2279 2574
2280 if (!wr) 2575 if (!wr)
@@ -2287,15 +2582,157 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2287 pr_err("fast registration failed, ret:%d\n", ret); 2582 pr_err("fast registration failed, ret:%d\n", ret);
2288 return ret; 2583 return ret;
2289 } 2584 }
2290 fr_desc->valid = false; 2585 fr_desc->ind &= ~ind;
2586
2587 sge->lkey = mr->lkey;
2588 sge->addr = frpl->page_list[0] + page_off;
2589 sge->length = mem->len;
2590
2591 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2592 __func__, __LINE__, sge->addr, sge->length,
2593 sge->lkey);
2594
2595 return ret;
2596}
2597
2598static inline enum ib_t10_dif_type
2599se2ib_prot_type(enum target_prot_type prot_type)
2600{
2601 switch (prot_type) {
2602 case TARGET_DIF_TYPE0_PROT:
2603 return IB_T10DIF_NONE;
2604 case TARGET_DIF_TYPE1_PROT:
2605 return IB_T10DIF_TYPE1;
2606 case TARGET_DIF_TYPE2_PROT:
2607 return IB_T10DIF_TYPE2;
2608 case TARGET_DIF_TYPE3_PROT:
2609 return IB_T10DIF_TYPE3;
2610 default:
2611 return IB_T10DIF_NONE;
2612 }
2613}
2291 2614
2292 ib_sge->lkey = fr_desc->data_mr->lkey; 2615static int
2293 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; 2616isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2294 ib_sge->length = data_len; 2617{
2618 enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
2619
2620 sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
2621 sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
2622 sig_attrs->mem.sig.dif.pi_interval =
2623 se_cmd->se_dev->dev_attrib.block_size;
2624 sig_attrs->wire.sig.dif.pi_interval =
2625 se_cmd->se_dev->dev_attrib.block_size;
2626
2627 switch (se_cmd->prot_op) {
2628 case TARGET_PROT_DIN_INSERT:
2629 case TARGET_PROT_DOUT_STRIP:
2630 sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
2631 sig_attrs->wire.sig.dif.type = ib_prot_type;
2632 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2633 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2634 break;
2635 case TARGET_PROT_DOUT_INSERT:
2636 case TARGET_PROT_DIN_STRIP:
2637 sig_attrs->mem.sig.dif.type = ib_prot_type;
2638 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2639 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2640 sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
2641 break;
2642 case TARGET_PROT_DIN_PASS:
2643 case TARGET_PROT_DOUT_PASS:
2644 sig_attrs->mem.sig.dif.type = ib_prot_type;
2645 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
2646 sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
2647 sig_attrs->wire.sig.dif.type = ib_prot_type;
2648 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
2649 sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
2650 break;
2651 default:
2652 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2653 return -EINVAL;
2654 }
2295 2655
2296 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2656 return 0;
2297 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2657}
2658
2659static inline u8
2660isert_set_prot_checks(u8 prot_checks)
2661{
2662 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2663 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2664 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2665}
2666
2667static int
2668isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2669 struct fast_reg_descriptor *fr_desc,
2670 struct ib_sge *data_sge, struct ib_sge *prot_sge,
2671 struct ib_sge *sig_sge)
2672{
2673 struct ib_send_wr sig_wr, inv_wr;
2674 struct ib_send_wr *bad_wr, *wr = NULL;
2675 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2676 struct ib_sig_attrs sig_attrs;
2677 int ret;
2678 u32 key;
2679
2680 memset(&sig_attrs, 0, sizeof(sig_attrs));
2681 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2682 if (ret)
2683 goto err;
2684
2685 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2686
2687 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2688 memset(&inv_wr, 0, sizeof(inv_wr));
2689 inv_wr.opcode = IB_WR_LOCAL_INV;
2690 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2691 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2692 wr = &inv_wr;
2693 /* Bump the key */
2694 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2695 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2696 }
2697
2698 memset(&sig_wr, 0, sizeof(sig_wr));
2699 sig_wr.opcode = IB_WR_REG_SIG_MR;
2700 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2701 sig_wr.sg_list = data_sge;
2702 sig_wr.num_sge = 1;
2703 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2704 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2705 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2706 if (se_cmd->t_prot_sg)
2707 sig_wr.wr.sig_handover.prot = prot_sge;
2708
2709 if (!wr)
2710 wr = &sig_wr;
2711 else
2712 wr->next = &sig_wr;
2713
2714 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2715 if (ret) {
2716 pr_err("fast registration failed, ret:%d\n", ret);
2717 goto err;
2718 }
2719 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2720
2721 sig_sge->lkey = pi_ctx->sig_mr->lkey;
2722 sig_sge->addr = 0;
2723 sig_sge->length = se_cmd->data_length;
2724 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2725 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2726 /*
2727 * We have protection guards on the wire
2728 * so we need to set a larget transfer
2729 */
2730 sig_sge->length += se_cmd->prot_length;
2298 2731
2732 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2733 sig_sge->addr, sig_sge->length,
2734 sig_sge->lkey);
2735err:
2299 return ret; 2736 return ret;
2300} 2737}
2301 2738
@@ -2305,62 +2742,82 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2305{ 2742{
2306 struct se_cmd *se_cmd = &cmd->se_cmd; 2743 struct se_cmd *se_cmd = &cmd->se_cmd;
2307 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2744 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2308 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2745 struct isert_conn *isert_conn = conn->context;
2309 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2746 struct ib_sge data_sge;
2310 struct ib_send_wr *send_wr; 2747 struct ib_send_wr *send_wr;
2311 struct ib_sge *ib_sge; 2748 struct fast_reg_descriptor *fr_desc = NULL;
2312 struct scatterlist *sg_start; 2749 u32 offset;
2313 struct fast_reg_descriptor *fr_desc; 2750 int ret = 0;
2314 u32 sg_off = 0, sg_nents;
2315 u32 offset = 0, data_len, data_left, rdma_write_max;
2316 int ret = 0, count;
2317 unsigned long flags; 2751 unsigned long flags;
2318 2752
2319 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2753 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2320 data_left = se_cmd->data_length;
2321 } else {
2322 offset = cmd->write_data_done;
2323 sg_off = offset / PAGE_SIZE;
2324 data_left = se_cmd->data_length - cmd->write_data_done;
2325 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2326 }
2327 2754
2328 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 2755 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2329 sg_nents = se_cmd->t_data_nents - sg_off; 2756 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2757 se_cmd->t_data_nents, se_cmd->data_length,
2758 offset, wr->iser_ib_op, &wr->data);
2759 if (ret)
2760 return ret;
2330 2761
2331 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, 2762 if (wr->data.dma_nents != 1 ||
2332 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 2763 se_cmd->prot_op != TARGET_PROT_NORMAL) {
2333 DMA_TO_DEVICE : DMA_FROM_DEVICE); 2764 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2334 if (unlikely(!count)) { 2765 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2335 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); 2766 struct fast_reg_descriptor, list);
2336 return -EINVAL; 2767 list_del(&fr_desc->list);
2768 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2769 wr->fr_desc = fr_desc;
2337 } 2770 }
2338 wr->sge = sg_start;
2339 wr->num_sge = sg_nents;
2340 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2341 isert_cmd, count, sg_start, sg_nents, data_left);
2342 2771
2343 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); 2772 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2344 ib_sge = &wr->s_ib_sge; 2773 ISERT_DATA_KEY_VALID, &data_sge);
2345 wr->ib_sge = ib_sge; 2774 if (ret)
2775 goto unmap_cmd;
2776
2777 if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2778 struct ib_sge prot_sge, sig_sge;
2779
2780 if (se_cmd->t_prot_sg) {
2781 ret = isert_map_data_buf(isert_conn, isert_cmd,
2782 se_cmd->t_prot_sg,
2783 se_cmd->t_prot_nents,
2784 se_cmd->prot_length,
2785 0, wr->iser_ib_op, &wr->prot);
2786 if (ret)
2787 goto unmap_cmd;
2788
2789 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2790 ISERT_PROT_KEY_VALID, &prot_sge);
2791 if (ret)
2792 goto unmap_prot_cmd;
2793 }
2794
2795 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2796 &data_sge, &prot_sge, &sig_sge);
2797 if (ret)
2798 goto unmap_prot_cmd;
2346 2799
2800 fr_desc->ind |= ISERT_PROTECTED;
2801 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2802 } else
2803 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2804
2805 wr->ib_sge = &wr->s_ib_sge;
2347 wr->send_wr_num = 1; 2806 wr->send_wr_num = 1;
2348 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2807 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2349 wr->send_wr = &wr->s_send_wr; 2808 wr->send_wr = &wr->s_send_wr;
2350
2351 wr->isert_cmd = isert_cmd; 2809 wr->isert_cmd = isert_cmd;
2352 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2353 2810
2354 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2811 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2355 send_wr->sg_list = ib_sge; 2812 send_wr->sg_list = &wr->s_ib_sge;
2356 send_wr->num_sge = 1; 2813 send_wr->num_sge = 1;
2357 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2814 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2358 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2815 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2359 send_wr->opcode = IB_WR_RDMA_WRITE; 2816 send_wr->opcode = IB_WR_RDMA_WRITE;
2360 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2817 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2361 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2818 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2362 send_wr->send_flags = 0; 2819 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2363 send_wr->next = &isert_cmd->tx_desc.send_wr; 2820 0 : IB_SEND_SIGNALED;
2364 } else { 2821 } else {
2365 send_wr->opcode = IB_WR_RDMA_READ; 2822 send_wr->opcode = IB_WR_RDMA_READ;
2366 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2823 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2368 send_wr->send_flags = IB_SEND_SIGNALED; 2825 send_wr->send_flags = IB_SEND_SIGNALED;
2369 } 2826 }
2370 2827
2371 data_len = min(data_left, rdma_write_max); 2828 return 0;
2372 wr->cur_rdma_length = data_len; 2829unmap_prot_cmd:
2373 2830 if (se_cmd->t_prot_sg)
2374 /* if there is a single dma entry, dma mr is sufficient */ 2831 isert_unmap_data_buf(isert_conn, &wr->prot);
2375 if (count == 1) { 2832unmap_cmd:
2376 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); 2833 if (fr_desc) {
2377 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2378 ib_sge->lkey = isert_conn->conn_mr->lkey;
2379 wr->fr_desc = NULL;
2380 } else {
2381 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2834 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2382 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2835 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2383 struct fast_reg_descriptor, list);
2384 list_del(&fr_desc->list);
2385 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2836 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2386 wr->fr_desc = fr_desc;
2387
2388 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2389 ib_sge, sg_nents, offset, data_len);
2390 if (ret) {
2391 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2392 goto unmap_sg;
2393 }
2394 } 2837 }
2838 isert_unmap_data_buf(isert_conn, &wr->data);
2395 2839
2396 return 0;
2397
2398unmap_sg:
2399 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2400 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2401 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2402 return ret; 2840 return ret;
2403} 2841}
2404 2842
@@ -2422,25 +2860,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2422 return rc; 2860 return rc;
2423 } 2861 }
2424 2862
2425 /* 2863 if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2426 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2864 /*
2427 */ 2865 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2428 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); 2866 */
2429 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) 2867 isert_create_send_desc(isert_conn, isert_cmd,
2430 &isert_cmd->tx_desc.iscsi_header); 2868 &isert_cmd->tx_desc);
2431 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2869 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2432 isert_init_send_wr(isert_conn, isert_cmd, 2870 &isert_cmd->tx_desc.iscsi_header);
2433 &isert_cmd->tx_desc.send_wr, true); 2871 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2872 isert_init_send_wr(isert_conn, isert_cmd,
2873 &isert_cmd->tx_desc.send_wr, true);
2874 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2875 wr->send_wr_num += 1;
2876 }
2434 2877
2435 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2878 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2436 2879
2437 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2880 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2438 if (rc) { 2881 if (rc) {
2439 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2882 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2440 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2883 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2441 } 2884 }
2442 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2885
2443 isert_cmd); 2886 if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2887 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2888 "READ\n", isert_cmd);
2889 else
2890 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2891 isert_cmd);
2444 2892
2445 return 1; 2893 return 1;
2446} 2894}
@@ -2815,6 +3263,8 @@ static struct iscsit_transport iser_target_transport = {
2815 .iscsit_get_dataout = isert_get_dataout, 3263 .iscsit_get_dataout = isert_get_dataout,
2816 .iscsit_queue_data_in = isert_put_datain, 3264 .iscsit_queue_data_in = isert_put_datain,
2817 .iscsit_queue_status = isert_put_response, 3265 .iscsit_queue_status = isert_put_response,
3266 .iscsit_aborted_task = isert_aborted_task,
3267 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2818}; 3268};
2819 3269
2820static int __init isert_init(void) 3270static int __init isert_init(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index f6ae7f5dd408..4c072ae34c01 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,11 +50,35 @@ struct iser_tx_desc {
50 struct ib_send_wr send_wr; 50 struct ib_send_wr send_wr;
51} __packed; 51} __packed;
52 52
53enum isert_indicator {
54 ISERT_PROTECTED = 1 << 0,
55 ISERT_DATA_KEY_VALID = 1 << 1,
56 ISERT_PROT_KEY_VALID = 1 << 2,
57 ISERT_SIG_KEY_VALID = 1 << 3,
58};
59
60struct pi_context {
61 struct ib_mr *prot_mr;
62 struct ib_fast_reg_page_list *prot_frpl;
63 struct ib_mr *sig_mr;
64};
65
53struct fast_reg_descriptor { 66struct fast_reg_descriptor {
54 struct list_head list; 67 struct list_head list;
55 struct ib_mr *data_mr; 68 struct ib_mr *data_mr;
56 struct ib_fast_reg_page_list *data_frpl; 69 struct ib_fast_reg_page_list *data_frpl;
57 bool valid; 70 u8 ind;
71 struct pi_context *pi_ctx;
72};
73
74struct isert_data_buf {
75 struct scatterlist *sg;
76 int nents;
77 u32 sg_off;
78 u32 len; /* cur_rdma_length */
79 u32 offset;
80 unsigned int dma_nents;
81 enum dma_data_direction dma_dir;
58}; 82};
59 83
60struct isert_rdma_wr { 84struct isert_rdma_wr {
@@ -63,12 +87,11 @@ struct isert_rdma_wr {
63 enum iser_ib_op_code iser_ib_op; 87 enum iser_ib_op_code iser_ib_op;
64 struct ib_sge *ib_sge; 88 struct ib_sge *ib_sge;
65 struct ib_sge s_ib_sge; 89 struct ib_sge s_ib_sge;
66 int num_sge;
67 struct scatterlist *sge;
68 int send_wr_num; 90 int send_wr_num;
69 struct ib_send_wr *send_wr; 91 struct ib_send_wr *send_wr;
70 struct ib_send_wr s_send_wr; 92 struct ib_send_wr s_send_wr;
71 u32 cur_rdma_length; 93 struct isert_data_buf data;
94 struct isert_data_buf prot;
72 struct fast_reg_descriptor *fr_desc; 95 struct fast_reg_descriptor *fr_desc;
73}; 96};
74 97
@@ -141,6 +164,7 @@ struct isert_cq_desc {
141 164
142struct isert_device { 165struct isert_device {
143 int use_fastreg; 166 int use_fastreg;
167 bool pi_capable;
144 int cqs_used; 168 int cqs_used;
145 int refcount; 169 int refcount;
146 int cq_active_qps[ISERT_MAX_CQ]; 170 int cq_active_qps[ISERT_MAX_CQ];
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0e537d8d0e47..fe09f2788b15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1078static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1078static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1079 struct srpt_send_ioctx *ioctx) 1079 struct srpt_send_ioctx *ioctx)
1080{ 1080{
1081 struct ib_device *dev = ch->sport->sdev->device;
1081 struct se_cmd *cmd; 1082 struct se_cmd *cmd;
1082 struct scatterlist *sg, *sg_orig; 1083 struct scatterlist *sg, *sg_orig;
1083 int sg_cnt; 1084 int sg_cnt;
@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1124 1125
1125 db = ioctx->rbufs; 1126 db = ioctx->rbufs;
1126 tsize = cmd->data_length; 1127 tsize = cmd->data_length;
1127 dma_len = sg_dma_len(&sg[0]); 1128 dma_len = ib_sg_dma_len(dev, &sg[0]);
1128 riu = ioctx->rdma_ius; 1129 riu = ioctx->rdma_ius;
1129 1130
1130 /* 1131 /*
@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1155 ++j; 1156 ++j;
1156 if (j < count) { 1157 if (j < count) {
1157 sg = sg_next(sg); 1158 sg = sg_next(sg);
1158 dma_len = sg_dma_len(sg); 1159 dma_len = ib_sg_dma_len(
1160 dev, sg);
1159 } 1161 }
1160 } 1162 }
1161 } else { 1163 } else {
@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1192 tsize = cmd->data_length; 1194 tsize = cmd->data_length;
1193 riu = ioctx->rdma_ius; 1195 riu = ioctx->rdma_ius;
1194 sg = sg_orig; 1196 sg = sg_orig;
1195 dma_len = sg_dma_len(&sg[0]); 1197 dma_len = ib_sg_dma_len(dev, &sg[0]);
1196 dma_addr = sg_dma_address(&sg[0]); 1198 dma_addr = ib_sg_dma_address(dev, &sg[0]);
1197 1199
1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ 1200 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 for (i = 0, j = 0; 1201 for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1216 ++j; 1218 ++j;
1217 if (j < count) { 1219 if (j < count) {
1218 sg = sg_next(sg); 1220 sg = sg_next(sg);
1219 dma_len = sg_dma_len(sg); 1221 dma_len = ib_sg_dma_len(
1220 dma_addr = sg_dma_address(sg); 1222 dev, sg);
1223 dma_addr = ib_sg_dma_address(
1224 dev, sg);
1221 } 1225 }
1222 } 1226 }
1223 } else { 1227 } else {
@@ -2580,7 +2584,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2580 goto destroy_ib; 2584 goto destroy_ib;
2581 } 2585 }
2582 2586
2583 ch->sess = transport_init_session(); 2587 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2584 if (IS_ERR(ch->sess)) { 2588 if (IS_ERR(ch->sess)) {
2585 rej->reason = __constant_cpu_to_be32( 2589 rej->reason = __constant_cpu_to_be32(
2586 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2590 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
3081 srpt_queue_response(cmd); 3085 srpt_queue_response(cmd);
3082} 3086}
3083 3087
3088static void srpt_aborted_task(struct se_cmd *cmd)
3089{
3090 struct srpt_send_ioctx *ioctx = container_of(cmd,
3091 struct srpt_send_ioctx, cmd);
3092
3093 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
3094}
3095
3084static int srpt_queue_status(struct se_cmd *cmd) 3096static int srpt_queue_status(struct se_cmd *cmd)
3085{ 3097{
3086 struct srpt_send_ioctx *ioctx; 3098 struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@ static struct target_core_fabric_ops srpt_template = {
3928 .queue_data_in = srpt_queue_data_in, 3940 .queue_data_in = srpt_queue_data_in,
3929 .queue_status = srpt_queue_status, 3941 .queue_status = srpt_queue_status,
3930 .queue_tm_rsp = srpt_queue_tm_rsp, 3942 .queue_tm_rsp = srpt_queue_tm_rsp,
3943 .aborted_task = srpt_aborted_task,
3931 /* 3944 /*
3932 * Setup function pointers for generic logic in 3945 * Setup function pointers for generic logic in
3933 * target_core_fabric_configfs.c 3946 * target_core_fabric_configfs.c
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 788c4fe2b0c9..68fb66fdb757 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
684 qlt_xmit_tm_rsp(mcmd); 684 qlt_xmit_tm_rsp(mcmd);
685} 685}
686 686
687static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
688{
689 struct qla_tgt_cmd *cmd = container_of(se_cmd,
690 struct qla_tgt_cmd, se_cmd);
691 struct scsi_qla_host *vha = cmd->vha;
692 struct qla_hw_data *ha = vha->hw;
693
694 if (!cmd->sg_mapped)
695 return;
696
697 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
698 cmd->sg_mapped = 0;
699}
700
687/* Local pointer to allocated TCM configfs fabric module */ 701/* Local pointer to allocated TCM configfs fabric module */
688struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 702struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
689struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 703struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,7 +1482,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1468 } 1482 }
1469 se_tpg = &tpg->se_tpg; 1483 se_tpg = &tpg->se_tpg;
1470 1484
1471 se_sess = transport_init_session(); 1485 se_sess = transport_init_session(TARGET_PROT_NORMAL);
1472 if (IS_ERR(se_sess)) { 1486 if (IS_ERR(se_sess)) {
1473 pr_err("Unable to initialize struct se_session\n"); 1487 pr_err("Unable to initialize struct se_session\n");
1474 return PTR_ERR(se_sess); 1488 return PTR_ERR(se_sess);
@@ -1877,6 +1891,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1877 .queue_data_in = tcm_qla2xxx_queue_data_in, 1891 .queue_data_in = tcm_qla2xxx_queue_data_in,
1878 .queue_status = tcm_qla2xxx_queue_status, 1892 .queue_status = tcm_qla2xxx_queue_status,
1879 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1893 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1894 .aborted_task = tcm_qla2xxx_aborted_task,
1880 /* 1895 /*
1881 * Setup function pointers for generic logic in 1896 * Setup function pointers for generic logic in
1882 * target_core_fabric_configfs.c 1897 * target_core_fabric_configfs.c
@@ -1926,6 +1941,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1926 .queue_data_in = tcm_qla2xxx_queue_data_in, 1941 .queue_data_in = tcm_qla2xxx_queue_data_in,
1927 .queue_status = tcm_qla2xxx_queue_status, 1942 .queue_status = tcm_qla2xxx_queue_status,
1928 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1943 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1944 .aborted_task = tcm_qla2xxx_aborted_task,
1929 /* 1945 /*
1930 * Setup function pointers for generic logic in 1946 * Setup function pointers for generic logic in
1931 * target_core_fabric_configfs.c 1947 * target_core_fabric_configfs.c
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b83ec378d04f..78cab13bbb1b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -499,6 +499,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
499 return 0; 499 return 0;
500} 500}
501 501
502static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
503{
504 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
505
506 spin_lock_bh(&conn->cmd_lock);
507 if (!list_empty(&cmd->i_conn_node))
508 list_del_init(&cmd->i_conn_node);
509 spin_unlock_bh(&conn->cmd_lock);
510
511 __iscsit_free_cmd(cmd, scsi_cmd, true);
512}
513
514static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
515{
516 return TARGET_PROT_NORMAL;
517}
518
502static struct iscsit_transport iscsi_target_transport = { 519static struct iscsit_transport iscsi_target_transport = {
503 .name = "iSCSI/TCP", 520 .name = "iSCSI/TCP",
504 .transport_type = ISCSI_TCP, 521 .transport_type = ISCSI_TCP,
@@ -513,6 +530,8 @@ static struct iscsit_transport iscsi_target_transport = {
513 .iscsit_response_queue = iscsit_response_queue, 530 .iscsit_response_queue = iscsit_response_queue,
514 .iscsit_queue_data_in = iscsit_queue_rsp, 531 .iscsit_queue_data_in = iscsit_queue_rsp,
515 .iscsit_queue_status = iscsit_queue_rsp, 532 .iscsit_queue_status = iscsit_queue_rsp,
533 .iscsit_aborted_task = iscsit_aborted_task,
534 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
516}; 535};
517 536
518static int __init iscsi_target_init_module(void) 537static int __init iscsi_target_init_module(void)
@@ -1503,6 +1522,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1503{ 1522{
1504 u32 payload_length = ntoh24(hdr->dlength); 1523 u32 payload_length = ntoh24(hdr->dlength);
1505 1524
1525 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1526 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1527 if (!cmd)
1528 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1529 (unsigned char *)hdr);
1530
1531 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1532 (unsigned char *)hdr);
1533 }
1534
1506 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1535 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1507 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1536 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1508 " not set, protocol error.\n"); 1537 " not set, protocol error.\n");
@@ -2468,6 +2497,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2468{ 2497{
2469 struct iscsi_cmd *cmd; 2498 struct iscsi_cmd *cmd;
2470 struct iscsi_conn *conn_p; 2499 struct iscsi_conn *conn_p;
2500 bool found = false;
2471 2501
2472 /* 2502 /*
2473 * Only send a Asynchronous Message on connections whos network 2503 * Only send a Asynchronous Message on connections whos network
@@ -2476,11 +2506,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2476 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2506 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2477 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2507 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2478 iscsit_inc_conn_usage_count(conn_p); 2508 iscsit_inc_conn_usage_count(conn_p);
2509 found = true;
2479 break; 2510 break;
2480 } 2511 }
2481 } 2512 }
2482 2513
2483 if (!conn_p) 2514 if (!found)
2484 return; 2515 return;
2485 2516
2486 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); 2517 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 1c0088fe9e99..ae03f3e5de1e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1052,6 +1052,11 @@ TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
1052 */ 1052 */
1053DEF_TPG_ATTRIB(default_erl); 1053DEF_TPG_ATTRIB(default_erl);
1054TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); 1054TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
1055/*
1056 * Define iscsi_tpg_attrib_s_t10_pi
1057 */
1058DEF_TPG_ATTRIB(t10_pi);
1059TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
1055 1060
1056static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 1061static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1057 &iscsi_tpg_attrib_authentication.attr, 1062 &iscsi_tpg_attrib_authentication.attr,
@@ -1064,6 +1069,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
1064 &iscsi_tpg_attrib_prod_mode_write_protect.attr, 1069 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
1065 &iscsi_tpg_attrib_demo_mode_discovery.attr, 1070 &iscsi_tpg_attrib_demo_mode_discovery.attr,
1066 &iscsi_tpg_attrib_default_erl.attr, 1071 &iscsi_tpg_attrib_default_erl.attr,
1072 &iscsi_tpg_attrib_t10_pi.attr,
1067 NULL, 1073 NULL,
1068}; 1074};
1069 1075
@@ -1815,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
1815 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1821 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1816} 1822}
1817 1823
1824static void lio_aborted_task(struct se_cmd *se_cmd)
1825{
1826 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1827
1828 cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
1829}
1830
1818static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) 1831static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
1819{ 1832{
1820 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1833 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1999,6 +2012,7 @@ int iscsi_target_register_configfs(void)
1999 fabric->tf_ops.queue_data_in = &lio_queue_data_in; 2012 fabric->tf_ops.queue_data_in = &lio_queue_data_in;
2000 fabric->tf_ops.queue_status = &lio_queue_status; 2013 fabric->tf_ops.queue_status = &lio_queue_status;
2001 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; 2014 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
2015 fabric->tf_ops.aborted_task = &lio_aborted_task;
2002 /* 2016 /*
2003 * Setup function pointers for generic logic in target_core_fabric_configfs.c 2017 * Setup function pointers for generic logic in target_core_fabric_configfs.c
2004 */ 2018 */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 48f7b3bf4e8c..886d74d6f3d4 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -58,7 +58,8 @@
58#define TA_DEMO_MODE_DISCOVERY 1 58#define TA_DEMO_MODE_DISCOVERY 1
59#define TA_DEFAULT_ERL 0 59#define TA_DEFAULT_ERL 0
60#define TA_CACHE_CORE_NPS 0 60#define TA_CACHE_CORE_NPS 0
61 61/* T10 protection information disabled by default */
62#define TA_DEFAULT_T10_PI 0
62 63
63#define ISCSI_IOV_DATA_BUFFER 5 64#define ISCSI_IOV_DATA_BUFFER 5
64 65
@@ -765,6 +766,7 @@ struct iscsi_tpg_attrib {
765 u32 prod_mode_write_protect; 766 u32 prod_mode_write_protect;
766 u32 demo_mode_discovery; 767 u32 demo_mode_discovery;
767 u32 default_erl; 768 u32 default_erl;
769 u8 t10_pi;
768 struct iscsi_portal_group *tpg; 770 struct iscsi_portal_group *tpg;
769}; 771};
770 772
@@ -787,6 +789,7 @@ struct iscsi_np {
787 void *np_context; 789 void *np_context;
788 struct iscsit_transport *np_transport; 790 struct iscsit_transport *np_transport;
789 struct list_head np_list; 791 struct list_head np_list;
792 struct iscsi_tpg_np *tpg_np;
790} ____cacheline_aligned; 793} ____cacheline_aligned;
791 794
792struct iscsi_tpg_np { 795struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e29279e6b577..8739b98f6f93 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -259,6 +259,7 @@ static int iscsi_login_zero_tsih_s1(
259{ 259{
260 struct iscsi_session *sess = NULL; 260 struct iscsi_session *sess = NULL;
261 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 261 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
262 enum target_prot_op sup_pro_ops;
262 int ret; 263 int ret;
263 264
264 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); 265 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -320,8 +321,9 @@ static int iscsi_login_zero_tsih_s1(
320 kfree(sess); 321 kfree(sess);
321 return -ENOMEM; 322 return -ENOMEM;
322 } 323 }
324 sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
323 325
324 sess->se_sess = transport_init_session(); 326 sess->se_sess = transport_init_session(sup_pro_ops);
325 if (IS_ERR(sess->se_sess)) { 327 if (IS_ERR(sess->se_sess)) {
326 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 328 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
327 ISCSI_LOGIN_STATUS_NO_RESOURCES); 329 ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 44a5471de00f..eb96b20dc09e 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -225,6 +225,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; 225 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
226 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; 226 a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
227 a->default_erl = TA_DEFAULT_ERL; 227 a->default_erl = TA_DEFAULT_ERL;
228 a->t10_pi = TA_DEFAULT_T10_PI;
228} 229}
229 230
230int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 231int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -500,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
500 init_completion(&tpg_np->tpg_np_comp); 501 init_completion(&tpg_np->tpg_np_comp);
501 kref_init(&tpg_np->tpg_np_kref); 502 kref_init(&tpg_np->tpg_np_kref);
502 tpg_np->tpg_np = np; 503 tpg_np->tpg_np = np;
504 np->tpg_np = tpg_np;
503 tpg_np->tpg = tpg; 505 tpg_np->tpg = tpg;
504 506
505 spin_lock(&tpg->tpg_np_lock); 507 spin_lock(&tpg->tpg_np_lock);
@@ -858,3 +860,22 @@ int iscsit_ta_default_erl(
858 860
859 return 0; 861 return 0;
860} 862}
863
864int iscsit_ta_t10_pi(
865 struct iscsi_portal_group *tpg,
866 u32 flag)
867{
868 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
869
870 if ((flag != 0) && (flag != 1)) {
871 pr_err("Illegal value %d\n", flag);
872 return -EINVAL;
873 }
874
875 a->t10_pi = flag;
876 pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
877 " %s\n", tpg->tpgt, (a->t10_pi) ?
878 "ON" : "OFF");
879
880 return 0;
881}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 213c0fc7fdc9..0a182f2aa8a2 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); 39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
40extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); 40extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
41extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); 41extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
42extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
42 43
43#endif /* ISCSI_TARGET_TPG_H */ 44#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index e655b042ed18..53e157cb8c54 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
705} 705}
706EXPORT_SYMBOL(iscsit_release_cmd); 706EXPORT_SYMBOL(iscsit_release_cmd);
707 707
708static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 708void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
709 bool check_queues) 709 bool check_queues)
710{ 710{
711 struct iscsi_conn *conn = cmd->conn; 711 struct iscsi_conn *conn = cmd->conn;
712 712
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 561a424d1980..a68508c4fec8 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
30extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 30extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
31extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); 31extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
32extern void iscsit_release_cmd(struct iscsi_cmd *); 32extern void iscsit_release_cmd(struct iscsi_cmd *);
33extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
33extern void iscsit_free_cmd(struct iscsi_cmd *, bool); 34extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
34extern int iscsit_check_session_usage_count(struct iscsi_session *); 35extern int iscsit_check_session_usage_count(struct iscsi_session *);
35extern void iscsit_dec_session_usage_count(struct iscsi_session *); 36extern void iscsit_dec_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index fadad7c5f635..c886ad1c39fb 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -212,6 +212,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
212 se_cmd->se_cmd_flags |= SCF_BIDI; 212 se_cmd->se_cmd_flags |= SCF_BIDI;
213 213
214 } 214 }
215
216 if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
217 se_cmd->prot_pto = true;
218
215 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 219 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
216 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 220 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
217 scsi_bufflen(sc), tcm_loop_sam_attr(sc), 221 scsi_bufflen(sc), tcm_loop_sam_attr(sc),
@@ -915,6 +919,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
915 wake_up(&tl_tmr->tl_tmr_wait); 919 wake_up(&tl_tmr->tl_tmr_wait);
916} 920}
917 921
922static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
923{
924 return;
925}
926
918static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 927static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
919{ 928{
920 switch (tl_hba->tl_proto_id) { 929 switch (tl_hba->tl_proto_id) {
@@ -1009,7 +1018,7 @@ static int tcm_loop_make_nexus(
1009 /* 1018 /*
1010 * Initialize the struct se_session pointer 1019 * Initialize the struct se_session pointer
1011 */ 1020 */
1012 tl_nexus->se_sess = transport_init_session(); 1021 tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
1013 if (IS_ERR(tl_nexus->se_sess)) { 1022 if (IS_ERR(tl_nexus->se_sess)) {
1014 ret = PTR_ERR(tl_nexus->se_sess); 1023 ret = PTR_ERR(tl_nexus->se_sess);
1015 goto out; 1024 goto out;
@@ -1483,6 +1492,7 @@ static int tcm_loop_register_configfs(void)
1483 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; 1492 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1484 fabric->tf_ops.queue_status = &tcm_loop_queue_status; 1493 fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1485 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; 1494 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1495 fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
1486 1496
1487 /* 1497 /*
1488 * Setup function pointers for generic logic in target_core_fabric_configfs.c 1498 * Setup function pointers for generic logic in target_core_fabric_configfs.c
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 24884cac19ce..e7e93727553c 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create(
210 return ERR_PTR(-ENOMEM); 210 return ERR_PTR(-ENOMEM);
211 } 211 }
212 212
213 sess->se_sess = transport_init_session(); 213 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
214 if (IS_ERR(sess->se_sess)) { 214 if (IS_ERR(sess->se_sess)) {
215 pr_err("failed to init se_session\n"); 215 pr_err("failed to init se_session\n");
216 216
@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1846{ 1846{
1847} 1847}
1848 1848
1849static void sbp_aborted_task(struct se_cmd *se_cmd)
1850{
1851 return;
1852}
1853
1849static int sbp_check_stop_free(struct se_cmd *se_cmd) 1854static int sbp_check_stop_free(struct se_cmd *se_cmd)
1850{ 1855{
1851 struct sbp_target_request *req = container_of(se_cmd, 1856 struct sbp_target_request *req = container_of(se_cmd,
@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = {
2526 .queue_data_in = sbp_queue_data_in, 2531 .queue_data_in = sbp_queue_data_in,
2527 .queue_status = sbp_queue_status, 2532 .queue_status = sbp_queue_status,
2528 .queue_tm_rsp = sbp_queue_tm_rsp, 2533 .queue_tm_rsp = sbp_queue_tm_rsp,
2534 .aborted_task = sbp_aborted_task,
2529 .check_stop_free = sbp_check_stop_free, 2535 .check_stop_free = sbp_check_stop_free,
2530 2536
2531 .fabric_make_wwn = sbp_make_tport, 2537 .fabric_make_wwn = sbp_make_tport,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c3d9df6aaf5f..fcbe6125b73e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -455,11 +455,26 @@ out:
455 return rc; 455 return rc;
456} 456}
457 457
458static inline int core_alua_state_nonoptimized( 458static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
459{
460 /*
461 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
462 * The ALUA additional sense code qualifier (ASCQ) is determined
463 * by the ALUA primary or secondary access state..
464 */
465 pr_debug("[%s]: ALUA TG Port not available, "
466 "SenseKey: NOT_READY, ASC/ASCQ: "
467 "0x04/0x%02x\n",
468 cmd->se_tfo->get_fabric_name(), alua_ascq);
469
470 cmd->scsi_asc = 0x04;
471 cmd->scsi_ascq = alua_ascq;
472}
473
474static inline void core_alua_state_nonoptimized(
459 struct se_cmd *cmd, 475 struct se_cmd *cmd,
460 unsigned char *cdb, 476 unsigned char *cdb,
461 int nonop_delay_msecs, 477 int nonop_delay_msecs)
462 u8 *alua_ascq)
463{ 478{
464 /* 479 /*
465 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 480 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -468,13 +483,11 @@ static inline int core_alua_state_nonoptimized(
468 */ 483 */
469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 484 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470 cmd->alua_nonop_delay = nonop_delay_msecs; 485 cmd->alua_nonop_delay = nonop_delay_msecs;
471 return 0;
472} 486}
473 487
474static inline int core_alua_state_lba_dependent( 488static inline int core_alua_state_lba_dependent(
475 struct se_cmd *cmd, 489 struct se_cmd *cmd,
476 struct t10_alua_tg_pt_gp *tg_pt_gp, 490 struct t10_alua_tg_pt_gp *tg_pt_gp)
477 u8 *alua_ascq)
478{ 491{
479 struct se_device *dev = cmd->se_dev; 492 struct se_device *dev = cmd->se_dev;
480 u64 segment_size, segment_mult, sectors, lba; 493 u64 segment_size, segment_mult, sectors, lba;
@@ -520,7 +533,7 @@ static inline int core_alua_state_lba_dependent(
520 } 533 }
521 if (!cur_map) { 534 if (!cur_map) {
522 spin_unlock(&dev->t10_alua.lba_map_lock); 535 spin_unlock(&dev->t10_alua.lba_map_lock);
523 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 536 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
524 return 1; 537 return 1;
525 } 538 }
526 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 539 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
@@ -531,11 +544,11 @@ static inline int core_alua_state_lba_dependent(
531 switch(map_mem->lba_map_mem_alua_state) { 544 switch(map_mem->lba_map_mem_alua_state) {
532 case ALUA_ACCESS_STATE_STANDBY: 545 case ALUA_ACCESS_STATE_STANDBY:
533 spin_unlock(&dev->t10_alua.lba_map_lock); 546 spin_unlock(&dev->t10_alua.lba_map_lock);
534 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 547 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
535 return 1; 548 return 1;
536 case ALUA_ACCESS_STATE_UNAVAILABLE: 549 case ALUA_ACCESS_STATE_UNAVAILABLE:
537 spin_unlock(&dev->t10_alua.lba_map_lock); 550 spin_unlock(&dev->t10_alua.lba_map_lock);
538 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 551 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
539 return 1; 552 return 1;
540 default: 553 default:
541 break; 554 break;
@@ -548,8 +561,7 @@ static inline int core_alua_state_lba_dependent(
548 561
549static inline int core_alua_state_standby( 562static inline int core_alua_state_standby(
550 struct se_cmd *cmd, 563 struct se_cmd *cmd,
551 unsigned char *cdb, 564 unsigned char *cdb)
552 u8 *alua_ascq)
553{ 565{
554 /* 566 /*
555 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 567 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -570,7 +582,7 @@ static inline int core_alua_state_standby(
570 case MI_REPORT_TARGET_PGS: 582 case MI_REPORT_TARGET_PGS:
571 return 0; 583 return 0;
572 default: 584 default:
573 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 585 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
574 return 1; 586 return 1;
575 } 587 }
576 case MAINTENANCE_OUT: 588 case MAINTENANCE_OUT:
@@ -578,7 +590,7 @@ static inline int core_alua_state_standby(
578 case MO_SET_TARGET_PGS: 590 case MO_SET_TARGET_PGS:
579 return 0; 591 return 0;
580 default: 592 default:
581 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 593 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
582 return 1; 594 return 1;
583 } 595 }
584 case REQUEST_SENSE: 596 case REQUEST_SENSE:
@@ -588,7 +600,7 @@ static inline int core_alua_state_standby(
588 case WRITE_BUFFER: 600 case WRITE_BUFFER:
589 return 0; 601 return 0;
590 default: 602 default:
591 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 603 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
592 return 1; 604 return 1;
593 } 605 }
594 606
@@ -597,8 +609,7 @@ static inline int core_alua_state_standby(
597 609
598static inline int core_alua_state_unavailable( 610static inline int core_alua_state_unavailable(
599 struct se_cmd *cmd, 611 struct se_cmd *cmd,
600 unsigned char *cdb, 612 unsigned char *cdb)
601 u8 *alua_ascq)
602{ 613{
603 /* 614 /*
604 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 615 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -613,7 +624,7 @@ static inline int core_alua_state_unavailable(
613 case MI_REPORT_TARGET_PGS: 624 case MI_REPORT_TARGET_PGS:
614 return 0; 625 return 0;
615 default: 626 default:
616 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 627 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
617 return 1; 628 return 1;
618 } 629 }
619 case MAINTENANCE_OUT: 630 case MAINTENANCE_OUT:
@@ -621,7 +632,7 @@ static inline int core_alua_state_unavailable(
621 case MO_SET_TARGET_PGS: 632 case MO_SET_TARGET_PGS:
622 return 0; 633 return 0;
623 default: 634 default:
624 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 635 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
625 return 1; 636 return 1;
626 } 637 }
627 case REQUEST_SENSE: 638 case REQUEST_SENSE:
@@ -629,7 +640,7 @@ static inline int core_alua_state_unavailable(
629 case WRITE_BUFFER: 640 case WRITE_BUFFER:
630 return 0; 641 return 0;
631 default: 642 default:
632 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 643 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
633 return 1; 644 return 1;
634 } 645 }
635 646
@@ -638,8 +649,7 @@ static inline int core_alua_state_unavailable(
638 649
639static inline int core_alua_state_transition( 650static inline int core_alua_state_transition(
640 struct se_cmd *cmd, 651 struct se_cmd *cmd,
641 unsigned char *cdb, 652 unsigned char *cdb)
642 u8 *alua_ascq)
643{ 653{
644 /* 654 /*
645 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 655 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
@@ -654,7 +664,7 @@ static inline int core_alua_state_transition(
654 case MI_REPORT_TARGET_PGS: 664 case MI_REPORT_TARGET_PGS:
655 return 0; 665 return 0;
656 default: 666 default:
657 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 667 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
658 return 1; 668 return 1;
659 } 669 }
660 case REQUEST_SENSE: 670 case REQUEST_SENSE:
@@ -662,7 +672,7 @@ static inline int core_alua_state_transition(
662 case WRITE_BUFFER: 672 case WRITE_BUFFER:
663 return 0; 673 return 0;
664 default: 674 default:
665 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 675 set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
666 return 1; 676 return 1;
667 } 677 }
668 678
@@ -684,8 +694,6 @@ target_alua_state_check(struct se_cmd *cmd)
684 struct t10_alua_tg_pt_gp *tg_pt_gp; 694 struct t10_alua_tg_pt_gp *tg_pt_gp;
685 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 695 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
686 int out_alua_state, nonop_delay_msecs; 696 int out_alua_state, nonop_delay_msecs;
687 u8 alua_ascq;
688 int ret;
689 697
690 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 698 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
691 return 0; 699 return 0;
@@ -701,9 +709,8 @@ target_alua_state_check(struct se_cmd *cmd)
701 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 709 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
702 pr_debug("ALUA: Got secondary offline status for local" 710 pr_debug("ALUA: Got secondary offline status for local"
703 " target port\n"); 711 " target port\n");
704 alua_ascq = ASCQ_04H_ALUA_OFFLINE; 712 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
705 ret = 1; 713 return TCM_CHECK_CONDITION_NOT_READY;
706 goto out;
707 } 714 }
708 /* 715 /*
709 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 716 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -731,20 +738,23 @@ target_alua_state_check(struct se_cmd *cmd)
731 738
732 switch (out_alua_state) { 739 switch (out_alua_state) {
733 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 740 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
734 ret = core_alua_state_nonoptimized(cmd, cdb, 741 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
735 nonop_delay_msecs, &alua_ascq);
736 break; 742 break;
737 case ALUA_ACCESS_STATE_STANDBY: 743 case ALUA_ACCESS_STATE_STANDBY:
738 ret = core_alua_state_standby(cmd, cdb, &alua_ascq); 744 if (core_alua_state_standby(cmd, cdb))
745 return TCM_CHECK_CONDITION_NOT_READY;
739 break; 746 break;
740 case ALUA_ACCESS_STATE_UNAVAILABLE: 747 case ALUA_ACCESS_STATE_UNAVAILABLE:
741 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); 748 if (core_alua_state_unavailable(cmd, cdb))
749 return TCM_CHECK_CONDITION_NOT_READY;
742 break; 750 break;
743 case ALUA_ACCESS_STATE_TRANSITION: 751 case ALUA_ACCESS_STATE_TRANSITION:
744 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 752 if (core_alua_state_transition(cmd, cdb))
753 return TCM_CHECK_CONDITION_NOT_READY;
745 break; 754 break;
746 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 755 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
747 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq); 756 if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
757 return TCM_CHECK_CONDITION_NOT_READY;
748 break; 758 break;
749 /* 759 /*
750 * OFFLINE is a secondary ALUA target port group access state, that is 760 * OFFLINE is a secondary ALUA target port group access state, that is
@@ -757,23 +767,6 @@ target_alua_state_check(struct se_cmd *cmd)
757 return TCM_INVALID_CDB_FIELD; 767 return TCM_INVALID_CDB_FIELD;
758 } 768 }
759 769
760out:
761 if (ret > 0) {
762 /*
763 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
764 * The ALUA additional sense code qualifier (ASCQ) is determined
765 * by the ALUA primary or secondary access state..
766 */
767 pr_debug("[%s]: ALUA TG Port not available, "
768 "SenseKey: NOT_READY, ASC/ASCQ: "
769 "0x04/0x%02x\n",
770 cmd->se_tfo->get_fabric_name(), alua_ascq);
771
772 cmd->scsi_asc = 0x04;
773 cmd->scsi_ascq = alua_ascq;
774 return TCM_CHECK_CONDITION_NOT_READY;
775 }
776
777 return 0; 770 return 0;
778} 771}
779 772
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f0e85b119692..60a9ae6df763 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(
457 pr_err("Missing tfo->queue_tm_rsp()\n"); 457 pr_err("Missing tfo->queue_tm_rsp()\n");
458 return -EINVAL; 458 return -EINVAL;
459 } 459 }
460 if (!tfo->aborted_task) {
461 pr_err("Missing tfo->aborted_task()\n");
462 return -EINVAL;
463 }
460 /* 464 /*
461 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 465 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
462 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 466 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cf991a91a8a9..7d6cddaec525 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -854,25 +854,6 @@ static int fd_init_prot(struct se_device *dev)
854 return 0; 854 return 0;
855} 855}
856 856
857static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
858 u32 unit_size, u32 *ref_tag, u16 app_tag,
859 bool inc_reftag)
860{
861 unsigned char *p = buf;
862 int i;
863
864 for (i = 0; i < unit_size; i += dev->prot_length) {
865 *((u16 *)&p[0]) = 0xffff;
866 *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
867 *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
868
869 if (inc_reftag)
870 (*ref_tag)++;
871
872 p += dev->prot_length;
873 }
874}
875
876static int fd_format_prot(struct se_device *dev) 857static int fd_format_prot(struct se_device *dev)
877{ 858{
878 struct fd_dev *fd_dev = FD_DEV(dev); 859 struct fd_dev *fd_dev = FD_DEV(dev);
@@ -880,10 +861,8 @@ static int fd_format_prot(struct se_device *dev)
880 sector_t prot_length, prot; 861 sector_t prot_length, prot;
881 unsigned char *buf; 862 unsigned char *buf;
882 loff_t pos = 0; 863 loff_t pos = 0;
883 u32 ref_tag = 0;
884 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; 864 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
885 int rc, ret = 0, size, len; 865 int rc, ret = 0, size, len;
886 bool inc_reftag = false;
887 866
888 if (!dev->dev_attrib.pi_prot_type) { 867 if (!dev->dev_attrib.pi_prot_type) {
889 pr_err("Unable to format_prot while pi_prot_type == 0\n"); 868 pr_err("Unable to format_prot while pi_prot_type == 0\n");
@@ -894,37 +873,20 @@ static int fd_format_prot(struct se_device *dev)
894 return -ENODEV; 873 return -ENODEV;
895 } 874 }
896 875
897 switch (dev->dev_attrib.pi_prot_type) {
898 case TARGET_DIF_TYPE3_PROT:
899 ref_tag = 0xffffffff;
900 break;
901 case TARGET_DIF_TYPE2_PROT:
902 case TARGET_DIF_TYPE1_PROT:
903 inc_reftag = true;
904 break;
905 default:
906 break;
907 }
908
909 buf = vzalloc(unit_size); 876 buf = vzalloc(unit_size);
910 if (!buf) { 877 if (!buf) {
911 pr_err("Unable to allocate FILEIO prot buf\n"); 878 pr_err("Unable to allocate FILEIO prot buf\n");
912 return -ENOMEM; 879 return -ENOMEM;
913 } 880 }
914
915 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; 881 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
916 size = prot_length; 882 size = prot_length;
917 883
918 pr_debug("Using FILEIO prot_length: %llu\n", 884 pr_debug("Using FILEIO prot_length: %llu\n",
919 (unsigned long long)prot_length); 885 (unsigned long long)prot_length);
920 886
887 memset(buf, 0xff, unit_size);
921 for (prot = 0; prot < prot_length; prot += unit_size) { 888 for (prot = 0; prot < prot_length; prot += unit_size) {
922
923 fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
924 inc_reftag);
925
926 len = min(unit_size, size); 889 len = min(unit_size, size);
927
928 rc = kernel_write(prot_fd, buf, len, pos); 890 rc = kernel_write(prot_fd, buf, len, pos);
929 if (rc != len) { 891 if (rc != len) {
930 pr_err("vfs_write to prot file failed: %d\n", rc); 892 pr_err("vfs_write to prot file failed: %d\n", rc);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 554d4f75a75a..9e0232cca92e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -203,10 +203,9 @@ static void iblock_free_device(struct se_device *dev)
203 203
204 if (ib_dev->ibd_bd != NULL) 204 if (ib_dev->ibd_bd != NULL)
205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
206 if (ib_dev->ibd_bio_set != NULL) { 206 if (ib_dev->ibd_bio_set != NULL)
207 bioset_integrity_free(ib_dev->ibd_bio_set);
208 bioset_free(ib_dev->ibd_bio_set); 207 bioset_free(ib_dev->ibd_bio_set);
209 } 208
210 kfree(ib_dev); 209 kfree(ib_dev);
211} 210}
212 211
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 66a5aba5a0d9..b920db3388cd 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -242,7 +242,7 @@ static void rd_release_prot_space(struct rd_dev *rd_dev)
242 rd_dev->sg_prot_count = 0; 242 rd_dev->sg_prot_count = 0;
243} 243}
244 244
245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length) 245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
246{ 246{
247 struct rd_dev_sg_table *sg_table; 247 struct rd_dev_sg_table *sg_table;
248 u32 total_sg_needed, sg_tables; 248 u32 total_sg_needed, sg_tables;
@@ -252,8 +252,13 @@ static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
252 252
253 if (rd_dev->rd_flags & RDF_NULLIO) 253 if (rd_dev->rd_flags & RDF_NULLIO)
254 return 0; 254 return 0;
255 255 /*
256 total_sg_needed = rd_dev->rd_page_count / prot_length; 256 * prot_length=8byte dif data
257 * tot sg needed = rd_page_count * (PGSZ/block_size) *
258 * (prot_length/block_size) + pad
259 * PGSZ canceled each other.
260 */
261 total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
257 262
258 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 263 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
259 264
@@ -606,7 +611,8 @@ static int rd_init_prot(struct se_device *dev)
606 if (!dev->dev_attrib.pi_prot_type) 611 if (!dev->dev_attrib.pi_prot_type)
607 return 0; 612 return 0;
608 613
609 return rd_build_prot_space(rd_dev, dev->prot_length); 614 return rd_build_prot_space(rd_dev, dev->prot_length,
615 dev->dev_attrib.block_size);
610} 616}
611 617
612static void rd_free_prot(struct se_device *dev) 618static void rd_free_prot(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 77e6531fb0a1..e0229592ec55 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -89,6 +89,7 @@ static sense_reason_t
89sbc_emulate_readcapacity_16(struct se_cmd *cmd) 89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90{ 90{
91 struct se_device *dev = cmd->se_dev; 91 struct se_device *dev = cmd->se_dev;
92 struct se_session *sess = cmd->se_sess;
92 unsigned char *rbuf; 93 unsigned char *rbuf;
93 unsigned char buf[32]; 94 unsigned char buf[32];
94 unsigned long long blocks = dev->transport->get_blocks(dev); 95 unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -109,8 +110,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
109 /* 110 /*
110 * Set P_TYPE and PROT_EN bits for DIF support 111 * Set P_TYPE and PROT_EN bits for DIF support
111 */ 112 */
112 if (dev->dev_attrib.pi_prot_type) 113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
113 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 114 if (dev->dev_attrib.pi_prot_type)
115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
116 }
114 117
115 if (dev->transport->get_lbppbe) 118 if (dev->transport->get_lbppbe)
116 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -425,13 +428,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
425 goto out; 428 goto out;
426 } 429 }
427 430
428 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 431 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
429 GFP_KERNEL); 432 GFP_KERNEL);
430 if (!write_sg) { 433 if (!write_sg) {
431 pr_err("Unable to allocate compare_and_write sg\n"); 434 pr_err("Unable to allocate compare_and_write sg\n");
432 ret = TCM_OUT_OF_RESOURCES; 435 ret = TCM_OUT_OF_RESOURCES;
433 goto out; 436 goto out;
434 } 437 }
438 sg_init_table(write_sg, cmd->t_data_nents);
435 /* 439 /*
436 * Setup verify and write data payloads from total NumberLBAs. 440 * Setup verify and write data payloads from total NumberLBAs.
437 */ 441 */
@@ -569,30 +573,85 @@ sbc_compare_and_write(struct se_cmd *cmd)
569 return TCM_NO_SENSE; 573 return TCM_NO_SENSE;
570} 574}
571 575
576static int
577sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
578 bool is_write, struct se_cmd *cmd)
579{
580 if (is_write) {
581 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
582 TARGET_PROT_DOUT_INSERT;
583 switch (protect) {
584 case 0x0:
585 case 0x3:
586 cmd->prot_checks = 0;
587 break;
588 case 0x1:
589 case 0x5:
590 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
591 if (prot_type == TARGET_DIF_TYPE1_PROT)
592 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
593 break;
594 case 0x2:
595 if (prot_type == TARGET_DIF_TYPE1_PROT)
596 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
597 break;
598 case 0x4:
599 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
600 break;
601 default:
602 pr_err("Unsupported protect field %d\n", protect);
603 return -EINVAL;
604 }
605 } else {
606 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
607 TARGET_PROT_DIN_STRIP;
608 switch (protect) {
609 case 0x0:
610 case 0x1:
611 case 0x5:
612 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
613 if (prot_type == TARGET_DIF_TYPE1_PROT)
614 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
615 break;
616 case 0x2:
617 if (prot_type == TARGET_DIF_TYPE1_PROT)
618 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
619 break;
620 case 0x3:
621 cmd->prot_checks = 0;
622 break;
623 case 0x4:
624 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
625 break;
626 default:
627 pr_err("Unsupported protect field %d\n", protect);
628 return -EINVAL;
629 }
630 }
631
632 return 0;
633}
634
572static bool 635static bool
573sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 636sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
574 u32 sectors) 637 u32 sectors, bool is_write)
575{ 638{
576 if (!cmd->t_prot_sg || !cmd->t_prot_nents) 639 u8 protect = cdb[1] >> 5;
640
641 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
577 return true; 642 return true;
578 643
579 switch (dev->dev_attrib.pi_prot_type) { 644 switch (dev->dev_attrib.pi_prot_type) {
580 case TARGET_DIF_TYPE3_PROT: 645 case TARGET_DIF_TYPE3_PROT:
581 if (!(cdb[1] & 0xe0))
582 return true;
583
584 cmd->reftag_seed = 0xffffffff; 646 cmd->reftag_seed = 0xffffffff;
585 break; 647 break;
586 case TARGET_DIF_TYPE2_PROT: 648 case TARGET_DIF_TYPE2_PROT:
587 if (cdb[1] & 0xe0) 649 if (protect)
588 return false; 650 return false;
589 651
590 cmd->reftag_seed = cmd->t_task_lba; 652 cmd->reftag_seed = cmd->t_task_lba;
591 break; 653 break;
592 case TARGET_DIF_TYPE1_PROT: 654 case TARGET_DIF_TYPE1_PROT:
593 if (!(cdb[1] & 0xe0))
594 return true;
595
596 cmd->reftag_seed = cmd->t_task_lba; 655 cmd->reftag_seed = cmd->t_task_lba;
597 break; 656 break;
598 case TARGET_DIF_TYPE0_PROT: 657 case TARGET_DIF_TYPE0_PROT:
@@ -600,9 +659,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
600 return true; 659 return true;
601 } 660 }
602 661
662 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
663 is_write, cmd))
664 return false;
665
603 cmd->prot_type = dev->dev_attrib.pi_prot_type; 666 cmd->prot_type = dev->dev_attrib.pi_prot_type;
604 cmd->prot_length = dev->prot_length * sectors; 667 cmd->prot_length = dev->prot_length * sectors;
605 cmd->prot_handover = PROT_SEPERATED; 668 pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
669 __func__, cmd->prot_type, cmd->prot_length,
670 cmd->prot_op, cmd->prot_checks);
606 671
607 return true; 672 return true;
608} 673}
@@ -628,7 +693,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
628 sectors = transport_get_sectors_10(cdb); 693 sectors = transport_get_sectors_10(cdb);
629 cmd->t_task_lba = transport_lba_32(cdb); 694 cmd->t_task_lba = transport_lba_32(cdb);
630 695
631 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 696 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
632 return TCM_UNSUPPORTED_SCSI_OPCODE; 697 return TCM_UNSUPPORTED_SCSI_OPCODE;
633 698
634 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 699 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -639,7 +704,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
639 sectors = transport_get_sectors_12(cdb); 704 sectors = transport_get_sectors_12(cdb);
640 cmd->t_task_lba = transport_lba_32(cdb); 705 cmd->t_task_lba = transport_lba_32(cdb);
641 706
642 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 707 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
643 return TCM_UNSUPPORTED_SCSI_OPCODE; 708 return TCM_UNSUPPORTED_SCSI_OPCODE;
644 709
645 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 710 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -650,7 +715,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
650 sectors = transport_get_sectors_16(cdb); 715 sectors = transport_get_sectors_16(cdb);
651 cmd->t_task_lba = transport_lba_64(cdb); 716 cmd->t_task_lba = transport_lba_64(cdb);
652 717
653 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 718 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
654 return TCM_UNSUPPORTED_SCSI_OPCODE; 719 return TCM_UNSUPPORTED_SCSI_OPCODE;
655 720
656 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 721 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -669,7 +734,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
669 sectors = transport_get_sectors_10(cdb); 734 sectors = transport_get_sectors_10(cdb);
670 cmd->t_task_lba = transport_lba_32(cdb); 735 cmd->t_task_lba = transport_lba_32(cdb);
671 736
672 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 737 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
673 return TCM_UNSUPPORTED_SCSI_OPCODE; 738 return TCM_UNSUPPORTED_SCSI_OPCODE;
674 739
675 if (cdb[1] & 0x8) 740 if (cdb[1] & 0x8)
@@ -682,7 +747,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
682 sectors = transport_get_sectors_12(cdb); 747 sectors = transport_get_sectors_12(cdb);
683 cmd->t_task_lba = transport_lba_32(cdb); 748 cmd->t_task_lba = transport_lba_32(cdb);
684 749
685 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 750 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
686 return TCM_UNSUPPORTED_SCSI_OPCODE; 751 return TCM_UNSUPPORTED_SCSI_OPCODE;
687 752
688 if (cdb[1] & 0x8) 753 if (cdb[1] & 0x8)
@@ -695,7 +760,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
695 sectors = transport_get_sectors_16(cdb); 760 sectors = transport_get_sectors_16(cdb);
696 cmd->t_task_lba = transport_lba_64(cdb); 761 cmd->t_task_lba = transport_lba_64(cdb);
697 762
698 if (!sbc_check_prot(dev, cmd, cdb, sectors)) 763 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
699 return TCM_UNSUPPORTED_SCSI_OPCODE; 764 return TCM_UNSUPPORTED_SCSI_OPCODE;
700 765
701 if (cdb[1] & 0x8) 766 if (cdb[1] & 0x8)
@@ -1031,6 +1096,50 @@ err:
1031} 1096}
1032EXPORT_SYMBOL(sbc_execute_unmap); 1097EXPORT_SYMBOL(sbc_execute_unmap);
1033 1098
1099void
1100sbc_dif_generate(struct se_cmd *cmd)
1101{
1102 struct se_device *dev = cmd->se_dev;
1103 struct se_dif_v1_tuple *sdt;
1104 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1105 sector_t sector = cmd->t_task_lba;
1106 void *daddr, *paddr;
1107 int i, j, offset = 0;
1108
1109 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1110 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1111 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1112
1113 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1114
1115 if (offset >= psg->length) {
1116 kunmap_atomic(paddr);
1117 psg = sg_next(psg);
1118 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1119 offset = 0;
1120 }
1121
1122 sdt = paddr + offset;
1123 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1124 dev->dev_attrib.block_size));
1125 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1126 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1127 sdt->app_tag = 0;
1128
1129 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1130 " app_tag: 0x%04x ref_tag: %u\n",
1131 (unsigned long long)sector, sdt->guard_tag,
1132 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1133
1134 sector++;
1135 offset += sizeof(struct se_dif_v1_tuple);
1136 }
1137
1138 kunmap_atomic(paddr);
1139 kunmap_atomic(daddr);
1140 }
1141}
1142
1034static sense_reason_t 1143static sense_reason_t
1035sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1144sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1036 const void *p, sector_t sector, unsigned int ei_lba) 1145 const void *p, sector_t sector, unsigned int ei_lba)
@@ -1162,9 +1271,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1162} 1271}
1163EXPORT_SYMBOL(sbc_dif_verify_write); 1272EXPORT_SYMBOL(sbc_dif_verify_write);
1164 1273
1165sense_reason_t 1274static sense_reason_t
1166sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1275__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1167 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1276 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1168{ 1277{
1169 struct se_device *dev = cmd->se_dev; 1278 struct se_device *dev = cmd->se_dev;
1170 struct se_dif_v1_tuple *sdt; 1279 struct se_dif_v1_tuple *sdt;
@@ -1217,8 +1326,31 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1217 kunmap_atomic(paddr); 1326 kunmap_atomic(paddr);
1218 kunmap_atomic(daddr); 1327 kunmap_atomic(daddr);
1219 } 1328 }
1220 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1221 1329
1222 return 0; 1330 return 0;
1223} 1331}
1332
1333sense_reason_t
1334sbc_dif_read_strip(struct se_cmd *cmd)
1335{
1336 struct se_device *dev = cmd->se_dev;
1337 u32 sectors = cmd->prot_length / dev->prot_length;
1338
1339 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1340 cmd->t_prot_sg, 0);
1341}
1342
1343sense_reason_t
1344sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1345 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1346{
1347 sense_reason_t rc;
1348
1349 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1350 if (rc)
1351 return rc;
1352
1353 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1354 return 0;
1355}
1224EXPORT_SYMBOL(sbc_dif_verify_read); 1356EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 3bebc71ea033..8653666612a8 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
71{ 71{
72 struct se_lun *lun = cmd->se_lun; 72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev; 73 struct se_device *dev = cmd->se_dev;
74 struct se_session *sess = cmd->se_sess;
74 75
75 /* Set RMB (removable media) for tape devices */ 76 /* Set RMB (removable media) for tape devices */
76 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 77 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -101,10 +102,13 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
101 if (dev->dev_attrib.emulate_3pc) 102 if (dev->dev_attrib.emulate_3pc)
102 buf[5] |= 0x8; 103 buf[5] |= 0x8;
103 /* 104 /*
104 * Set Protection (PROTECT) bit when DIF has been enabled. 105 * Set Protection (PROTECT) bit when DIF has been enabled on the
106 * device, and the transport supports VERIFY + PASS.
105 */ 107 */
106 if (dev->dev_attrib.pi_prot_type) 108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
107 buf[5] |= 0x1; 109 if (dev->dev_attrib.pi_prot_type)
110 buf[5] |= 0x1;
111 }
108 112
109 buf[7] = 0x2; /* CmdQue=1 */ 113 buf[7] = 0x2; /* CmdQue=1 */
110 114
@@ -473,16 +477,19 @@ static sense_reason_t
473spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 477spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
474{ 478{
475 struct se_device *dev = cmd->se_dev; 479 struct se_device *dev = cmd->se_dev;
480 struct se_session *sess = cmd->se_sess;
476 481
477 buf[3] = 0x3c; 482 buf[3] = 0x3c;
478 /* 483 /*
479 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 484 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
480 * only for TYPE3 protection. 485 * only for TYPE3 protection.
481 */ 486 */
482 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 487 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
483 buf[4] = 0x5; 488 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
484 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) 489 buf[4] = 0x5;
485 buf[4] = 0x4; 490 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
491 buf[4] = 0x4;
492 }
486 493
487 /* Set HEADSUP, ORDSUP, SIMPSUP */ 494 /* Set HEADSUP, ORDSUP, SIMPSUP */
488 buf[5] = 0x07; 495 buf[5] = 0x07;
@@ -762,7 +769,7 @@ out:
762 return ret; 769 return ret;
763} 770}
764 771
765static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p) 772static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
766{ 773{
767 p[0] = 0x01; 774 p[0] = 0x01;
768 p[1] = 0x0a; 775 p[1] = 0x0a;
@@ -775,8 +782,11 @@ out:
775 return 12; 782 return 12;
776} 783}
777 784
778static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p) 785static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
779{ 786{
787 struct se_device *dev = cmd->se_dev;
788 struct se_session *sess = cmd->se_sess;
789
780 p[0] = 0x0a; 790 p[0] = 0x0a;
781 p[1] = 0x0a; 791 p[1] = 0x0a;
782 792
@@ -868,8 +878,10 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
868 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 878 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
869 * TAG field. 879 * TAG field.
870 */ 880 */
871 if (dev->dev_attrib.pi_prot_type) 881 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
872 p[5] |= 0x80; 882 if (dev->dev_attrib.pi_prot_type)
883 p[5] |= 0x80;
884 }
873 885
874 p[8] = 0xff; 886 p[8] = 0xff;
875 p[9] = 0xff; 887 p[9] = 0xff;
@@ -879,8 +891,10 @@ out:
879 return 12; 891 return 12;
880} 892}
881 893
882static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p) 894static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
883{ 895{
896 struct se_device *dev = cmd->se_dev;
897
884 p[0] = 0x08; 898 p[0] = 0x08;
885 p[1] = 0x12; 899 p[1] = 0x12;
886 900
@@ -896,7 +910,7 @@ out:
896 return 20; 910 return 20;
897} 911}
898 912
899static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p) 913static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
900{ 914{
901 p[0] = 0x1c; 915 p[0] = 0x1c;
902 p[1] = 0x0a; 916 p[1] = 0x0a;
@@ -912,7 +926,7 @@ out:
912static struct { 926static struct {
913 uint8_t page; 927 uint8_t page;
914 uint8_t subpage; 928 uint8_t subpage;
915 int (*emulate)(struct se_device *, u8, unsigned char *); 929 int (*emulate)(struct se_cmd *, u8, unsigned char *);
916} modesense_handlers[] = { 930} modesense_handlers[] = {
917 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 931 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
918 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 932 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -1050,7 +1064,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1050 * the only two possibilities). 1064 * the only two possibilities).
1051 */ 1065 */
1052 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1066 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1053 ret = modesense_handlers[i].emulate(dev, pc, &buf[length]); 1067 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1054 if (!ten && length + ret >= 255) 1068 if (!ten && length + ret >= 255)
1055 break; 1069 break;
1056 length += ret; 1070 length += ret;
@@ -1063,7 +1077,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1063 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1077 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1064 if (modesense_handlers[i].page == page && 1078 if (modesense_handlers[i].page == page &&
1065 modesense_handlers[i].subpage == subpage) { 1079 modesense_handlers[i].subpage == subpage) {
1066 length += modesense_handlers[i].emulate(dev, pc, &buf[length]); 1080 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1067 goto set_length; 1081 goto set_length;
1068 } 1082 }
1069 1083
@@ -1095,7 +1109,6 @@ set_length:
1095 1109
1096static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1110static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1097{ 1111{
1098 struct se_device *dev = cmd->se_dev;
1099 char *cdb = cmd->t_task_cdb; 1112 char *cdb = cmd->t_task_cdb;
1100 bool ten = cdb[0] == MODE_SELECT_10; 1113 bool ten = cdb[0] == MODE_SELECT_10;
1101 int off = ten ? 8 : 4; 1114 int off = ten ? 8 : 4;
@@ -1131,7 +1144,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1131 if (modesense_handlers[i].page == page && 1144 if (modesense_handlers[i].page == page &&
1132 modesense_handlers[i].subpage == subpage) { 1145 modesense_handlers[i].subpage == subpage) {
1133 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1146 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1134 length = modesense_handlers[i].emulate(dev, 0, tbuf); 1147 length = modesense_handlers[i].emulate(cmd, 0, tbuf);
1135 goto check_contents; 1148 goto check_contents;
1136 } 1149 }
1137 1150
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 70c638f730af..f7cd95e8111a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(
87 struct se_cmd *cmd, 87 struct se_cmd *cmd,
88 int tas) 88 int tas)
89{ 89{
90 bool remove = true;
90 /* 91 /*
91 * TASK ABORTED status (TAS) bit support 92 * TASK ABORTED status (TAS) bit support
92 */ 93 */
93 if ((tmr_nacl && 94 if ((tmr_nacl &&
94 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 95 (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
96 remove = false;
95 transport_send_task_abort(cmd); 97 transport_send_task_abort(cmd);
98 }
96 99
97 transport_cmd_finish_abort(cmd, 0); 100 transport_cmd_finish_abort(cmd, remove);
98} 101}
99 102
100static int target_check_cdb_and_preempt(struct list_head *list, 103static int target_check_cdb_and_preempt(struct list_head *list,
@@ -127,6 +130,11 @@ void core_tmr_abort_task(
127 130
128 if (dev != se_cmd->se_dev) 131 if (dev != se_cmd->se_dev)
129 continue; 132 continue;
133
134 /* skip se_cmd associated with tmr */
135 if (tmr->task_cmd == se_cmd)
136 continue;
137
130 ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd); 138 ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
131 if (tmr->ref_task_tag != ref_tag) 139 if (tmr->ref_task_tag != ref_tag)
132 continue; 140 continue;
@@ -150,18 +158,9 @@ void core_tmr_abort_task(
150 158
151 cancel_work_sync(&se_cmd->work); 159 cancel_work_sync(&se_cmd->work);
152 transport_wait_for_tasks(se_cmd); 160 transport_wait_for_tasks(se_cmd);
153 /*
154 * Now send SAM_STAT_TASK_ABORTED status for the referenced
155 * se_cmd descriptor..
156 */
157 transport_send_task_abort(se_cmd);
158 /*
159 * Also deal with possible extra acknowledge reference..
160 */
161 if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
162 target_put_sess_cmd(se_sess, se_cmd);
163 161
164 target_put_sess_cmd(se_sess, se_cmd); 162 target_put_sess_cmd(se_sess, se_cmd);
163 transport_cmd_finish_abort(se_cmd, true);
165 164
166 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 165 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
167 " ref_tag: %d\n", ref_tag); 166 " ref_tag: %d\n", ref_tag);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2956250b7225..d4b98690a736 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -235,7 +235,7 @@ void transport_subsystem_check_init(void)
235 sub_api_initialized = 1; 235 sub_api_initialized = 1;
236} 236}
237 237
238struct se_session *transport_init_session(void) 238struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
239{ 239{
240 struct se_session *se_sess; 240 struct se_session *se_sess;
241 241
@@ -251,6 +251,7 @@ struct se_session *transport_init_session(void)
251 INIT_LIST_HEAD(&se_sess->sess_wait_list); 251 INIT_LIST_HEAD(&se_sess->sess_wait_list);
252 spin_lock_init(&se_sess->sess_cmd_lock); 252 spin_lock_init(&se_sess->sess_cmd_lock);
253 kref_init(&se_sess->sess_kref); 253 kref_init(&se_sess->sess_kref);
254 se_sess->sup_prot_ops = sup_prot_ops;
254 255
255 return se_sess; 256 return se_sess;
256} 257}
@@ -288,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,
288EXPORT_SYMBOL(transport_alloc_session_tags); 289EXPORT_SYMBOL(transport_alloc_session_tags);
289 290
290struct se_session *transport_init_session_tags(unsigned int tag_num, 291struct se_session *transport_init_session_tags(unsigned int tag_num,
291 unsigned int tag_size) 292 unsigned int tag_size,
293 enum target_prot_op sup_prot_ops)
292{ 294{
293 struct se_session *se_sess; 295 struct se_session *se_sess;
294 int rc; 296 int rc;
295 297
296 se_sess = transport_init_session(); 298 se_sess = transport_init_session(sup_prot_ops);
297 if (IS_ERR(se_sess)) 299 if (IS_ERR(se_sess))
298 return se_sess; 300 return se_sess;
299 301
@@ -603,6 +605,15 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
603 605
604void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 606void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
605{ 607{
608 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
609 transport_lun_remove_cmd(cmd);
610 /*
611 * Allow the fabric driver to unmap any resources before
612 * releasing the descriptor via TFO->release_cmd()
613 */
614 if (remove)
615 cmd->se_tfo->aborted_task(cmd);
616
606 if (transport_cmd_check_stop_to_fabric(cmd)) 617 if (transport_cmd_check_stop_to_fabric(cmd))
607 return; 618 return;
608 if (remove) 619 if (remove)
@@ -1365,6 +1376,13 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1365 target_put_sess_cmd(se_sess, se_cmd); 1376 target_put_sess_cmd(se_sess, se_cmd);
1366 return 0; 1377 return 0;
1367 } 1378 }
1379
1380 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1381 if (rc != 0) {
1382 transport_generic_request_failure(se_cmd, rc);
1383 return 0;
1384 }
1385
1368 /* 1386 /*
1369 * Save pointers for SGLs containing protection information, 1387 * Save pointers for SGLs containing protection information,
1370 * if present. 1388 * if present.
@@ -1374,11 +1392,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1374 se_cmd->t_prot_nents = sgl_prot_count; 1392 se_cmd->t_prot_nents = sgl_prot_count;
1375 } 1393 }
1376 1394
1377 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1378 if (rc != 0) {
1379 transport_generic_request_failure(se_cmd, rc);
1380 return 0;
1381 }
1382 /* 1395 /*
1383 * When a non zero sgl_count has been passed perform SGL passthrough 1396 * When a non zero sgl_count has been passed perform SGL passthrough
1384 * mapping for pre-allocated fabric memory instead of having target 1397 * mapping for pre-allocated fabric memory instead of having target
@@ -1754,6 +1767,15 @@ void target_execute_cmd(struct se_cmd *cmd)
1754 cmd->t_state = TRANSPORT_PROCESSING; 1767 cmd->t_state = TRANSPORT_PROCESSING;
1755 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1768 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1756 spin_unlock_irq(&cmd->t_state_lock); 1769 spin_unlock_irq(&cmd->t_state_lock);
1770 /*
1771 * Perform WRITE_INSERT of PI using software emulation when backend
1772 * device has PI enabled, if the transport has not already generated
1773 * PI using hardware WRITE_INSERT offload.
1774 */
1775 if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
1776 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1777 sbc_dif_generate(cmd);
1778 }
1757 1779
1758 if (target_handle_task_attr(cmd)) { 1780 if (target_handle_task_attr(cmd)) {
1759 spin_lock_irq(&cmd->t_state_lock); 1781 spin_lock_irq(&cmd->t_state_lock);
@@ -1883,6 +1905,21 @@ static void transport_handle_queue_full(
1883 schedule_work(&cmd->se_dev->qf_work_queue); 1905 schedule_work(&cmd->se_dev->qf_work_queue);
1884} 1906}
1885 1907
1908static bool target_check_read_strip(struct se_cmd *cmd)
1909{
1910 sense_reason_t rc;
1911
1912 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
1913 rc = sbc_dif_read_strip(cmd);
1914 if (rc) {
1915 cmd->pi_err = rc;
1916 return true;
1917 }
1918 }
1919
1920 return false;
1921}
1922
1886static void target_complete_ok_work(struct work_struct *work) 1923static void target_complete_ok_work(struct work_struct *work)
1887{ 1924{
1888 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1925 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1947,6 +1984,22 @@ static void target_complete_ok_work(struct work_struct *work)
1947 cmd->data_length; 1984 cmd->data_length;
1948 } 1985 }
1949 spin_unlock(&cmd->se_lun->lun_sep_lock); 1986 spin_unlock(&cmd->se_lun->lun_sep_lock);
1987 /*
1988 * Perform READ_STRIP of PI using software emulation when
1989 * backend had PI enabled, if the transport will not be
1990 * performing hardware READ_STRIP offload.
1991 */
1992 if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
1993 target_check_read_strip(cmd)) {
1994 ret = transport_send_check_condition_and_sense(cmd,
1995 cmd->pi_err, 0);
1996 if (ret == -EAGAIN || ret == -ENOMEM)
1997 goto queue_full;
1998
1999 transport_lun_remove_cmd(cmd);
2000 transport_cmd_check_stop_to_fabric(cmd);
2001 return;
2002 }
1950 2003
1951 trace_target_cmd_complete(cmd); 2004 trace_target_cmd_complete(cmd);
1952 ret = cmd->se_tfo->queue_data_in(cmd); 2005 ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2039,6 +2092,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
2039 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2092 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2040 cmd->t_bidi_data_sg = NULL; 2093 cmd->t_bidi_data_sg = NULL;
2041 cmd->t_bidi_data_nents = 0; 2094 cmd->t_bidi_data_nents = 0;
2095
2096 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2097 cmd->t_prot_sg = NULL;
2098 cmd->t_prot_nents = 0;
2042} 2099}
2043 2100
2044/** 2101/**
@@ -2202,6 +2259,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2202 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2259 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2203 } 2260 }
2204 2261
2262 if (cmd->prot_op != TARGET_PROT_NORMAL) {
2263 ret = target_alloc_sgl(&cmd->t_prot_sg,
2264 &cmd->t_prot_nents,
2265 cmd->prot_length, true);
2266 if (ret < 0)
2267 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2268 }
2269
2205 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2270 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2206 cmd->data_length, zero_flag); 2271 cmd->data_length, zero_flag);
2207 if (ret < 0) 2272 if (ret < 0)
@@ -2770,13 +2835,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2770 if (!(cmd->transport_state & CMD_T_ABORTED)) 2835 if (!(cmd->transport_state & CMD_T_ABORTED))
2771 return 0; 2836 return 0;
2772 2837
2773 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2838 /*
2839 * If cmd has been aborted but either no status is to be sent or it has
2840 * already been sent, just return
2841 */
2842 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
2774 return 1; 2843 return 1;
2775 2844
2776 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2845 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
2777 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2846 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
2778 2847
2779 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2848 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2780 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2849 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2781 trace_target_cmd_complete(cmd); 2850 trace_target_cmd_complete(cmd);
2782 cmd->se_tfo->queue_status(cmd); 2851 cmd->se_tfo->queue_status(cmd);
@@ -2790,7 +2859,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2790 unsigned long flags; 2859 unsigned long flags;
2791 2860
2792 spin_lock_irqsave(&cmd->t_state_lock, flags); 2861 spin_lock_irqsave(&cmd->t_state_lock, flags);
2793 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2862 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
2794 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2863 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2795 return; 2864 return;
2796 } 2865 }
@@ -2805,6 +2874,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
2805 if (cmd->data_direction == DMA_TO_DEVICE) { 2874 if (cmd->data_direction == DMA_TO_DEVICE) {
2806 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2807 cmd->transport_state |= CMD_T_ABORTED; 2876 cmd->transport_state |= CMD_T_ABORTED;
2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2808 smp_mb__after_atomic_inc(); 2878 smp_mb__after_atomic_inc();
2809 return; 2879 return;
2810 } 2880 }
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 752863acecb8..a0bcfd3e7e7d 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -94,20 +94,19 @@ struct ft_lun {
94 */ 94 */
95struct ft_tpg { 95struct ft_tpg {
96 u32 index; 96 u32 index;
97 struct ft_lport_acl *lport_acl; 97 struct ft_lport_wwn *lport_wwn;
98 struct ft_tport *tport; /* active tport or NULL */ 98 struct ft_tport *tport; /* active tport or NULL */
99 struct list_head list; /* linkage in ft_lport_acl tpg_list */
100 struct list_head lun_list; /* head of LUNs */ 99 struct list_head lun_list; /* head of LUNs */
101 struct se_portal_group se_tpg; 100 struct se_portal_group se_tpg;
102 struct workqueue_struct *workqueue; 101 struct workqueue_struct *workqueue;
103}; 102};
104 103
105struct ft_lport_acl { 104struct ft_lport_wwn {
106 u64 wwpn; 105 u64 wwpn;
107 char name[FT_NAMELEN]; 106 char name[FT_NAMELEN];
108 struct list_head list; 107 struct list_head ft_wwn_node;
109 struct list_head tpg_list; 108 struct ft_tpg *tpg;
110 struct se_wwn fc_lport_wwn; 109 struct se_wwn se_wwn;
111}; 110};
112 111
113/* 112/*
@@ -128,7 +127,6 @@ struct ft_cmd {
128 u32 sg_cnt; /* No. of item in scatterlist */ 127 u32 sg_cnt; /* No. of item in scatterlist */
129}; 128};
130 129
131extern struct list_head ft_lport_list;
132extern struct mutex ft_lport_lock; 130extern struct mutex ft_lport_lock;
133extern struct fc4_prov ft_prov; 131extern struct fc4_prov ft_prov;
134extern struct target_fabric_configfs *ft_configfs; 132extern struct target_fabric_configfs *ft_configfs;
@@ -163,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *);
163u32 ft_get_task_tag(struct se_cmd *); 161u32 ft_get_task_tag(struct se_cmd *);
164int ft_get_cmd_state(struct se_cmd *); 162int ft_get_cmd_state(struct se_cmd *);
165void ft_queue_tm_resp(struct se_cmd *); 163void ft_queue_tm_resp(struct se_cmd *);
164void ft_aborted_task(struct se_cmd *);
166 165
167/* 166/*
168 * other internal functions. 167 * other internal functions.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 8b2c1aaf81de..01cf37f212c3 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
426 ft_send_resp_code(cmd, code); 426 ft_send_resp_code(cmd, code);
427} 427}
428 428
429void ft_aborted_task(struct se_cmd *se_cmd)
430{
431 return;
432}
433
429static void ft_send_work(struct work_struct *work); 434static void ft_send_work(struct work_struct *work);
430 435
431/* 436/*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e879da81ad93..efdcb9663a1a 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -50,7 +50,7 @@
50 50
51struct target_fabric_configfs *ft_configfs; 51struct target_fabric_configfs *ft_configfs;
52 52
53LIST_HEAD(ft_lport_list); 53static LIST_HEAD(ft_wwn_list);
54DEFINE_MUTEX(ft_lport_lock); 54DEFINE_MUTEX(ft_lport_lock);
55 55
56unsigned int ft_debug_logging; 56unsigned int ft_debug_logging;
@@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg(
298 struct config_group *group, 298 struct config_group *group,
299 const char *name) 299 const char *name)
300{ 300{
301 struct ft_lport_acl *lacl; 301 struct ft_lport_wwn *ft_wwn;
302 struct ft_tpg *tpg; 302 struct ft_tpg *tpg;
303 struct workqueue_struct *wq; 303 struct workqueue_struct *wq;
304 unsigned long index; 304 unsigned long index;
@@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg(
318 if (index > UINT_MAX) 318 if (index > UINT_MAX)
319 return NULL; 319 return NULL;
320 320
321 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); 321 if ((index != 1)) {
322 pr_err("Error, a single TPG=1 is used for HW port mappings\n");
323 return ERR_PTR(-ENOSYS);
324 }
325
326 ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
322 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 327 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
323 if (!tpg) 328 if (!tpg)
324 return NULL; 329 return NULL;
325 tpg->index = index; 330 tpg->index = index;
326 tpg->lport_acl = lacl; 331 tpg->lport_wwn = ft_wwn;
327 INIT_LIST_HEAD(&tpg->lun_list); 332 INIT_LIST_HEAD(&tpg->lun_list);
328 333
329 wq = alloc_workqueue("tcm_fc", 0, 1); 334 wq = alloc_workqueue("tcm_fc", 0, 1);
@@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg(
342 tpg->workqueue = wq; 347 tpg->workqueue = wq;
343 348
344 mutex_lock(&ft_lport_lock); 349 mutex_lock(&ft_lport_lock);
345 list_add_tail(&tpg->list, &lacl->tpg_list); 350 ft_wwn->tpg = tpg;
346 mutex_unlock(&ft_lport_lock); 351 mutex_unlock(&ft_lport_lock);
347 352
348 return &tpg->se_tpg; 353 return &tpg->se_tpg;
@@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(
351static void ft_del_tpg(struct se_portal_group *se_tpg) 356static void ft_del_tpg(struct se_portal_group *se_tpg)
352{ 357{
353 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 358 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
359 struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
354 360
355 pr_debug("del tpg %s\n", 361 pr_debug("del tpg %s\n",
356 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 362 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
361 synchronize_rcu(); 367 synchronize_rcu();
362 368
363 mutex_lock(&ft_lport_lock); 369 mutex_lock(&ft_lport_lock);
364 list_del(&tpg->list); 370 ft_wwn->tpg = NULL;
365 if (tpg->tport) { 371 if (tpg->tport) {
366 tpg->tport->tpg = NULL; 372 tpg->tport->tpg = NULL;
367 tpg->tport = NULL; 373 tpg->tport = NULL;
@@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
380 */ 386 */
381struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) 387struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
382{ 388{
383 struct ft_lport_acl *lacl; 389 struct ft_lport_wwn *ft_wwn;
384 struct ft_tpg *tpg;
385 390
386 list_for_each_entry(lacl, &ft_lport_list, list) { 391 list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
387 if (lacl->wwpn == lport->wwpn) { 392 if (ft_wwn->wwpn == lport->wwpn)
388 list_for_each_entry(tpg, &lacl->tpg_list, list) 393 return ft_wwn->tpg;
389 return tpg; /* XXX for now return first entry */
390 return NULL;
391 }
392 } 394 }
393 return NULL; 395 return NULL;
394} 396}
@@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
401 * Add lport to allowed config. 403 * Add lport to allowed config.
402 * The name is the WWPN in lower-case ASCII, colon-separated bytes. 404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
403 */ 405 */
404static struct se_wwn *ft_add_lport( 406static struct se_wwn *ft_add_wwn(
405 struct target_fabric_configfs *tf, 407 struct target_fabric_configfs *tf,
406 struct config_group *group, 408 struct config_group *group,
407 const char *name) 409 const char *name)
408{ 410{
409 struct ft_lport_acl *lacl; 411 struct ft_lport_wwn *ft_wwn;
410 struct ft_lport_acl *old_lacl; 412 struct ft_lport_wwn *old_ft_wwn;
411 u64 wwpn; 413 u64 wwpn;
412 414
413 pr_debug("add lport %s\n", name); 415 pr_debug("add wwn %s\n", name);
414 if (ft_parse_wwn(name, &wwpn, 1) < 0) 416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
415 return NULL; 417 return NULL;
416 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); 418 ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
417 if (!lacl) 419 if (!ft_wwn)
418 return NULL; 420 return NULL;
419 lacl->wwpn = wwpn; 421 ft_wwn->wwpn = wwpn;
420 INIT_LIST_HEAD(&lacl->tpg_list);
421 422
422 mutex_lock(&ft_lport_lock); 423 mutex_lock(&ft_lport_lock);
423 list_for_each_entry(old_lacl, &ft_lport_list, list) { 424 list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
424 if (old_lacl->wwpn == wwpn) { 425 if (old_ft_wwn->wwpn == wwpn) {
425 mutex_unlock(&ft_lport_lock); 426 mutex_unlock(&ft_lport_lock);
426 kfree(lacl); 427 kfree(ft_wwn);
427 return NULL; 428 return NULL;
428 } 429 }
429 } 430 }
430 list_add_tail(&lacl->list, &ft_lport_list); 431 list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
431 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); 432 ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
432 mutex_unlock(&ft_lport_lock); 433 mutex_unlock(&ft_lport_lock);
433 434
434 return &lacl->fc_lport_wwn; 435 return &ft_wwn->se_wwn;
435} 436}
436 437
437static void ft_del_lport(struct se_wwn *wwn) 438static void ft_del_wwn(struct se_wwn *wwn)
438{ 439{
439 struct ft_lport_acl *lacl = container_of(wwn, 440 struct ft_lport_wwn *ft_wwn = container_of(wwn,
440 struct ft_lport_acl, fc_lport_wwn); 441 struct ft_lport_wwn, se_wwn);
441 442
442 pr_debug("del lport %s\n", lacl->name); 443 pr_debug("del wwn %s\n", ft_wwn->name);
443 mutex_lock(&ft_lport_lock); 444 mutex_lock(&ft_lport_lock);
444 list_del(&lacl->list); 445 list_del(&ft_wwn->ft_wwn_node);
445 mutex_unlock(&ft_lport_lock); 446 mutex_unlock(&ft_lport_lock);
446 447
447 kfree(lacl); 448 kfree(ft_wwn);
448} 449}
449 450
450static ssize_t ft_wwn_show_attr_version( 451static ssize_t ft_wwn_show_attr_version(
@@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
471{ 472{
472 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; 473 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
473 474
474 return tpg->lport_acl->name; 475 return tpg->lport_wwn->name;
475} 476}
476 477
477static u16 ft_get_tag(struct se_portal_group *se_tpg) 478static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {
536 .queue_data_in = ft_queue_data_in, 537 .queue_data_in = ft_queue_data_in,
537 .queue_status = ft_queue_status, 538 .queue_status = ft_queue_status,
538 .queue_tm_rsp = ft_queue_tm_resp, 539 .queue_tm_rsp = ft_queue_tm_resp,
540 .aborted_task = ft_aborted_task,
539 /* 541 /*
540 * Setup function pointers for generic logic in 542 * Setup function pointers for generic logic in
541 * target_core_fabric_configfs.c 543 * target_core_fabric_configfs.c
542 */ 544 */
543 .fabric_make_wwn = &ft_add_lport, 545 .fabric_make_wwn = &ft_add_wwn,
544 .fabric_drop_wwn = &ft_del_lport, 546 .fabric_drop_wwn = &ft_del_wwn,
545 .fabric_make_tpg = &ft_add_tpg, 547 .fabric_make_tpg = &ft_add_tpg,
546 .fabric_drop_tpg = &ft_del_tpg, 548 .fabric_drop_tpg = &ft_del_tpg,
547 .fabric_post_link = NULL, 549 .fabric_post_link = NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index ae52c08dad09..21ce50880c79 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *);
51 * Lookup or allocate target local port. 51 * Lookup or allocate target local port.
52 * Caller holds ft_lport_lock. 52 * Caller holds ft_lport_lock.
53 */ 53 */
54static struct ft_tport *ft_tport_create(struct fc_lport *lport) 54static struct ft_tport *ft_tport_get(struct fc_lport *lport)
55{ 55{
56 struct ft_tpg *tpg; 56 struct ft_tpg *tpg;
57 struct ft_tport *tport; 57 struct ft_tport *tport;
@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
68 68
69 if (tport) { 69 if (tport) {
70 tport->tpg = tpg; 70 tport->tpg = tpg;
71 tpg->tport = tport;
71 return tport; 72 return tport;
72 } 73 }
73 74
@@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)
114void ft_lport_add(struct fc_lport *lport, void *arg) 115void ft_lport_add(struct fc_lport *lport, void *arg)
115{ 116{
116 mutex_lock(&ft_lport_lock); 117 mutex_lock(&ft_lport_lock);
117 ft_tport_create(lport); 118 ft_tport_get(lport);
118 mutex_unlock(&ft_lport_lock); 119 mutex_unlock(&ft_lport_lock);
119} 120}
120 121
@@ -211,7 +212,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
211 return NULL; 212 return NULL;
212 213
213 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, 214 sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
214 sizeof(struct ft_cmd)); 215 sizeof(struct ft_cmd),
216 TARGET_PROT_NORMAL);
215 if (IS_ERR(sess->se_sess)) { 217 if (IS_ERR(sess->se_sess)) {
216 kfree(sess); 218 kfree(sess);
217 return NULL; 219 return NULL;
@@ -350,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
350 struct ft_node_acl *acl; 352 struct ft_node_acl *acl;
351 u32 fcp_parm; 353 u32 fcp_parm;
352 354
353 tport = ft_tport_create(rdata->local_port); 355 tport = ft_tport_get(rdata->local_port);
354 if (!tport) 356 if (!tport)
355 goto not_target; /* not a target for this local port */ 357 goto not_target; /* not a target for this local port */
356 358
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index 460c266b8e24..f058c0368d61 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1471{ 1471{
1472} 1472}
1473 1473
1474static void usbg_aborted_task(struct se_cmd *se_cmd)
1475{
1476 return;
1477}
1478
1474static const char *usbg_check_wwn(const char *name) 1479static const char *usbg_check_wwn(const char *name)
1475{ 1480{
1476 const char *n; 1481 const char *n;
@@ -1726,7 +1731,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1726 pr_err("Unable to allocate struct tcm_vhost_nexus\n"); 1731 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1727 goto err_unlock; 1732 goto err_unlock;
1728 } 1733 }
1729 tv_nexus->tvn_se_sess = transport_init_session(); 1734 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
1730 if (IS_ERR(tv_nexus->tvn_se_sess)) 1735 if (IS_ERR(tv_nexus->tvn_se_sess))
1731 goto err_free; 1736 goto err_free;
1732 1737
@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = {
1897 .queue_data_in = usbg_send_read_response, 1902 .queue_data_in = usbg_send_read_response,
1898 .queue_status = usbg_send_status_response, 1903 .queue_status = usbg_send_status_response,
1899 .queue_tm_rsp = usbg_queue_tm_rsp, 1904 .queue_tm_rsp = usbg_queue_tm_rsp,
1905 .aborted_task = usbg_aborted_task,
1900 .check_stop_free = usbg_check_stop_free, 1906 .check_stop_free = usbg_check_stop_free,
1901 1907
1902 .fabric_make_wwn = usbg_make_tport, 1908 .fabric_make_wwn = usbg_make_tport,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e48d4a672580..cf50ce93975b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
539 return; 539 return;
540} 540}
541 541
542static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
543{
544 return;
545}
546
542static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 547static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
543{ 548{
544 vs->vs_events_nr--; 549 vs->vs_events_nr--;
@@ -1740,7 +1745,8 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1740 */ 1745 */
1741 tv_nexus->tvn_se_sess = transport_init_session_tags( 1746 tv_nexus->tvn_se_sess = transport_init_session_tags(
1742 TCM_VHOST_DEFAULT_TAGS, 1747 TCM_VHOST_DEFAULT_TAGS,
1743 sizeof(struct tcm_vhost_cmd)); 1748 sizeof(struct tcm_vhost_cmd),
1749 TARGET_PROT_NORMAL);
1744 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1750 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1745 mutex_unlock(&tpg->tv_tpg_mutex); 1751 mutex_unlock(&tpg->tv_tpg_mutex);
1746 kfree(tv_nexus); 1752 kfree(tv_nexus);
@@ -2131,6 +2137,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
2131 .queue_data_in = tcm_vhost_queue_data_in, 2137 .queue_data_in = tcm_vhost_queue_data_in,
2132 .queue_status = tcm_vhost_queue_status, 2138 .queue_status = tcm_vhost_queue_status,
2133 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 2139 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
2140 .aborted_task = tcm_vhost_aborted_task,
2134 /* 2141 /*
2135 * Setup callers for generic logic in target_core_fabric_configfs.c 2142 * Setup callers for generic logic in target_core_fabric_configfs.c
2136 */ 2143 */