aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-31 18:31:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-31 18:31:23 -0500
commit4e13c5d0212f25d69a97606b9d5a85edb52a7737 (patch)
tree002f59b9151f42a6388656762f0e7963d08b89ef
parentdeb2a1d29bf0168ff2575e714e5c1f156be663fb (diff)
parent5259a06ef97068b710f45d092a587e8d740f750f (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - add support for SCSI Referrals (Hannes) - add support for T10 DIF into target core (nab + mkp) - add support for T10 DIF emulation in FILEIO + RAMDISK backends (Sagi + nab) - add support for T10 DIF -> bio_integrity passthrough in IBLOCK backend (nab) - prep changes to iser-target for >= v3.15 T10 DIF support (Sagi) - add support for qla2xxx N_Port ID Virtualization - NPIV (Saurav + Quinn) - allow percpu_ida_alloc() to receive task state bitmask (Kent) - fix >= v3.12 iscsi-target session reset hung task regression (nab) - fix >= v3.13 percpu_ref se_lun->lun_ref_active race (nab) - fix a long-standing network portal creation race (Andy)" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits) target: Fix percpu_ref_put race in transport_lun_remove_cmd target/iscsi: Fix network portal creation race target: Report bad sector in sense data for DIF errors iscsi-target: Convert gfp_t parameter to task state bitmask iscsi-target: Fix connection reset hang with percpu_ida_alloc percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask iscsi-target: Pre-allocate more tags to avoid ack starvation qla2xxx: Configure NPIV fc_vport via tcm_qla2xxx_npiv_make_lport qla2xxx: Enhancements to enable NPIV support for QLOGIC ISPs with TCM/LIO. qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine IB/isert: Move fastreg descriptor creation to a function IB/isert: Avoid frwr notation, user fastreg IB/isert: seperate connection protection domains and dma MRs tcm_loop: Enable DIF/DIX modes in SCSI host LLD target/rd: Add DIF protection into rd_execute_rw target/rd: Add support for protection SGL setup + release target/rd: Refactor rd_build_device_space + rd_release_device_space target/file: Add DIF protection support to fd_execute_rw target/file: Add DIF protection init/format support ...
-rw-r--r--block/blk-mq-tag.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c222
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c171
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c180
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h4
-rw-r--r--drivers/target/Kconfig2
-rw-r--r--drivers/target/iscsi/iscsi_target.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/loopback/tcm_loop.c20
-rw-r--r--drivers/target/target_core_alua.c558
-rw-r--r--drivers/target/target_core_alua.h15
-rw-r--r--drivers/target/target_core_configfs.c194
-rw-r--r--drivers/target/target_core_device.c108
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c256
-rw-r--r--drivers/target/target_core_file.h9
-rw-r--r--drivers/target/target_core_iblock.c93
-rw-r--r--drivers/target/target_core_internal.h8
-rw-r--r--drivers/target/target_core_pr.h5
-rw-r--r--drivers/target/target_core_rd.c252
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_sbc.c252
-rw-r--r--drivers/target/target_core_spc.c114
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/target/target_core_transport.c95
-rw-r--r--drivers/target/target_core_ua.c1
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--include/linux/percpu_ida.h3
-rw-r--r--include/scsi/scsi.h1
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h7
-rw-r--r--include/target/target_core_base.h82
-rw-r--r--include/target/target_core_fabric.h3
-rw-r--r--lib/percpu_ida.c21
43 files changed, 2300 insertions, 505 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d64a02fb1f73..5d70edc9855f 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -36,7 +36,8 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
36{ 36{
37 int tag; 37 int tag;
38 38
39 tag = percpu_ida_alloc(&tags->free_tags, gfp); 39 tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
40 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
40 if (tag < 0) 41 if (tag < 0)
41 return BLK_MQ_TAG_FAIL; 42 return BLK_MQ_TAG_FAIL;
42 return tag + tags->nr_reserved_tags; 43 return tag + tags->nr_reserved_tags;
@@ -52,7 +53,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
52 return BLK_MQ_TAG_FAIL; 53 return BLK_MQ_TAG_FAIL;
53 } 54 }
54 55
55 tag = percpu_ida_alloc(&tags->reserved_tags, gfp); 56 tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
57 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
56 if (tag < 0) 58 if (tag < 0)
57 return BLK_MQ_TAG_FAIL; 59 return BLK_MQ_TAG_FAIL;
58 return tag; 60 return tag;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 9804fca6bf06..2b161be3c1a3 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -47,10 +47,10 @@ static int
47isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 47isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48 struct isert_rdma_wr *wr); 48 struct isert_rdma_wr *wr);
49static void 49static void
50isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 50isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51static int 51static int
52isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 52isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 struct isert_rdma_wr *wr); 53 struct isert_rdma_wr *wr);
54 54
55static void 55static void
56isert_qp_event_callback(struct ib_event *e, void *context) 56isert_qp_event_callback(struct ib_event *e, void *context)
@@ -227,11 +227,11 @@ isert_create_device_ib_res(struct isert_device *device)
227 227
228 /* asign function handlers */ 228 /* asign function handlers */
229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
230 device->use_frwr = 1; 230 device->use_fastreg = 1;
231 device->reg_rdma_mem = isert_reg_rdma_frwr; 231 device->reg_rdma_mem = isert_reg_rdma;
232 device->unreg_rdma_mem = isert_unreg_rdma_frwr; 232 device->unreg_rdma_mem = isert_unreg_rdma;
233 } else { 233 } else {
234 device->use_frwr = 0; 234 device->use_fastreg = 0;
235 device->reg_rdma_mem = isert_map_rdma; 235 device->reg_rdma_mem = isert_map_rdma;
236 device->unreg_rdma_mem = isert_unmap_cmd; 236 device->unreg_rdma_mem = isert_unmap_cmd;
237 } 237 }
@@ -239,9 +239,10 @@ isert_create_device_ib_res(struct isert_device *device)
239 device->cqs_used = min_t(int, num_online_cpus(), 239 device->cqs_used = min_t(int, num_online_cpus(),
240 device->ib_device->num_comp_vectors); 240 device->ib_device->num_comp_vectors);
241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242 pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", 242 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n",
243 device->cqs_used, device->ib_device->name, 244 device->cqs_used, device->ib_device->name,
244 device->ib_device->num_comp_vectors, device->use_frwr); 245 device->ib_device->num_comp_vectors, device->use_fastreg);
245 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
246 device->cqs_used, GFP_KERNEL); 247 device->cqs_used, GFP_KERNEL);
247 if (!device->cq_desc) { 248 if (!device->cq_desc) {
@@ -250,13 +251,6 @@ isert_create_device_ib_res(struct isert_device *device)
250 } 251 }
251 cq_desc = device->cq_desc; 252 cq_desc = device->cq_desc;
252 253
253 device->dev_pd = ib_alloc_pd(ib_dev);
254 if (IS_ERR(device->dev_pd)) {
255 ret = PTR_ERR(device->dev_pd);
256 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
257 goto out_cq_desc;
258 }
259
260 for (i = 0; i < device->cqs_used; i++) { 254 for (i = 0; i < device->cqs_used; i++) {
261 cq_desc[i].device = device; 255 cq_desc[i].device = device;
262 cq_desc[i].cq_index = i; 256 cq_desc[i].cq_index = i;
@@ -294,13 +288,6 @@ isert_create_device_ib_res(struct isert_device *device)
294 goto out_cq; 288 goto out_cq;
295 } 289 }
296 290
297 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
298 if (IS_ERR(device->dev_mr)) {
299 ret = PTR_ERR(device->dev_mr);
300 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
301 goto out_cq;
302 }
303
304 return 0; 291 return 0;
305 292
306out_cq: 293out_cq:
@@ -316,9 +303,6 @@ out_cq:
316 ib_destroy_cq(device->dev_tx_cq[j]); 303 ib_destroy_cq(device->dev_tx_cq[j]);
317 } 304 }
318 } 305 }
319 ib_dealloc_pd(device->dev_pd);
320
321out_cq_desc:
322 kfree(device->cq_desc); 306 kfree(device->cq_desc);
323 307
324 return ret; 308 return ret;
@@ -341,8 +325,6 @@ isert_free_device_ib_res(struct isert_device *device)
341 device->dev_tx_cq[i] = NULL; 325 device->dev_tx_cq[i] = NULL;
342 } 326 }
343 327
344 ib_dereg_mr(device->dev_mr);
345 ib_dealloc_pd(device->dev_pd);
346 kfree(device->cq_desc); 328 kfree(device->cq_desc);
347} 329}
348 330
@@ -398,18 +380,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
398} 380}
399 381
400static void 382static void
401isert_conn_free_frwr_pool(struct isert_conn *isert_conn) 383isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
402{ 384{
403 struct fast_reg_descriptor *fr_desc, *tmp; 385 struct fast_reg_descriptor *fr_desc, *tmp;
404 int i = 0; 386 int i = 0;
405 387
406 if (list_empty(&isert_conn->conn_frwr_pool)) 388 if (list_empty(&isert_conn->conn_fr_pool))
407 return; 389 return;
408 390
409 pr_debug("Freeing conn %p frwr pool", isert_conn); 391 pr_debug("Freeing conn %p fastreg pool", isert_conn);
410 392
411 list_for_each_entry_safe(fr_desc, tmp, 393 list_for_each_entry_safe(fr_desc, tmp,
412 &isert_conn->conn_frwr_pool, list) { 394 &isert_conn->conn_fr_pool, list) {
413 list_del(&fr_desc->list); 395 list_del(&fr_desc->list);
414 ib_free_fast_reg_page_list(fr_desc->data_frpl); 396 ib_free_fast_reg_page_list(fr_desc->data_frpl);
415 ib_dereg_mr(fr_desc->data_mr); 397 ib_dereg_mr(fr_desc->data_mr);
@@ -417,20 +399,47 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
417 ++i; 399 ++i;
418 } 400 }
419 401
420 if (i < isert_conn->conn_frwr_pool_size) 402 if (i < isert_conn->conn_fr_pool_size)
421 pr_warn("Pool still has %d regions registered\n", 403 pr_warn("Pool still has %d regions registered\n",
422 isert_conn->conn_frwr_pool_size - i); 404 isert_conn->conn_fr_pool_size - i);
405}
406
407static int
408isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409 struct fast_reg_descriptor *fr_desc)
410{
411 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412 ISCSI_ISER_SG_TABLESIZE);
413 if (IS_ERR(fr_desc->data_frpl)) {
414 pr_err("Failed to allocate data frpl err=%ld\n",
415 PTR_ERR(fr_desc->data_frpl));
416 return PTR_ERR(fr_desc->data_frpl);
417 }
418
419 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
420 if (IS_ERR(fr_desc->data_mr)) {
421 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc->data_mr));
423 ib_free_fast_reg_page_list(fr_desc->data_frpl);
424 return PTR_ERR(fr_desc->data_mr);
425 }
426 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc, fr_desc->data_frpl->page_list);
428
429 fr_desc->valid = true;
430
431 return 0;
423} 432}
424 433
425static int 434static int
426isert_conn_create_frwr_pool(struct isert_conn *isert_conn) 435isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
427{ 436{
428 struct fast_reg_descriptor *fr_desc; 437 struct fast_reg_descriptor *fr_desc;
429 struct isert_device *device = isert_conn->conn_device; 438 struct isert_device *device = isert_conn->conn_device;
430 int i, ret; 439 int i, ret;
431 440
432 INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); 441 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
433 isert_conn->conn_frwr_pool_size = 0; 442 isert_conn->conn_fr_pool_size = 0;
434 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { 443 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
435 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 444 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
436 if (!fr_desc) { 445 if (!fr_desc) {
@@ -439,40 +448,25 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
439 goto err; 448 goto err;
440 } 449 }
441 450
442 fr_desc->data_frpl = 451 ret = isert_create_fr_desc(device->ib_device,
443 ib_alloc_fast_reg_page_list(device->ib_device, 452 isert_conn->conn_pd, fr_desc);
444 ISCSI_ISER_SG_TABLESIZE); 453 if (ret) {
445 if (IS_ERR(fr_desc->data_frpl)) { 454 pr_err("Failed to create fastreg descriptor err=%d\n",
446 pr_err("Failed to allocate fr_pg_list err=%ld\n", 455 ret);
447 PTR_ERR(fr_desc->data_frpl));
448 ret = PTR_ERR(fr_desc->data_frpl);
449 goto err;
450 }
451
452 fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
453 ISCSI_ISER_SG_TABLESIZE);
454 if (IS_ERR(fr_desc->data_mr)) {
455 pr_err("Failed to allocate frmr err=%ld\n",
456 PTR_ERR(fr_desc->data_mr));
457 ret = PTR_ERR(fr_desc->data_mr);
458 ib_free_fast_reg_page_list(fr_desc->data_frpl);
459 goto err; 456 goto err;
460 } 457 }
461 pr_debug("Create fr_desc %p page_list %p\n",
462 fr_desc, fr_desc->data_frpl->page_list);
463 458
464 fr_desc->valid = true; 459 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
465 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 460 isert_conn->conn_fr_pool_size++;
466 isert_conn->conn_frwr_pool_size++;
467 } 461 }
468 462
469 pr_debug("Creating conn %p frwr pool size=%d", 463 pr_debug("Creating conn %p fastreg pool size=%d",
470 isert_conn, isert_conn->conn_frwr_pool_size); 464 isert_conn, isert_conn->conn_fr_pool_size);
471 465
472 return 0; 466 return 0;
473 467
474err: 468err:
475 isert_conn_free_frwr_pool(isert_conn); 469 isert_conn_free_fastreg_pool(isert_conn);
476 return ret; 470 return ret;
477} 471}
478 472
@@ -558,14 +552,29 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
558 } 552 }
559 553
560 isert_conn->conn_device = device; 554 isert_conn->conn_device = device;
561 isert_conn->conn_pd = device->dev_pd; 555 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
562 isert_conn->conn_mr = device->dev_mr; 556 if (IS_ERR(isert_conn->conn_pd)) {
557 ret = PTR_ERR(isert_conn->conn_pd);
558 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
559 isert_conn, ret);
560 goto out_pd;
561 }
563 562
564 if (device->use_frwr) { 563 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
565 ret = isert_conn_create_frwr_pool(isert_conn); 564 IB_ACCESS_LOCAL_WRITE);
565 if (IS_ERR(isert_conn->conn_mr)) {
566 ret = PTR_ERR(isert_conn->conn_mr);
567 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
568 isert_conn, ret);
569 goto out_mr;
570 }
571
572 if (device->use_fastreg) {
573 ret = isert_conn_create_fastreg_pool(isert_conn);
566 if (ret) { 574 if (ret) {
567 pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); 575 pr_err("Conn: %p failed to create fastreg pool\n",
568 goto out_frwr; 576 isert_conn);
577 goto out_fastreg;
569 } 578 }
570 } 579 }
571 580
@@ -582,9 +591,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
582 return 0; 591 return 0;
583 592
584out_conn_dev: 593out_conn_dev:
585 if (device->use_frwr) 594 if (device->use_fastreg)
586 isert_conn_free_frwr_pool(isert_conn); 595 isert_conn_free_fastreg_pool(isert_conn);
587out_frwr: 596out_fastreg:
597 ib_dereg_mr(isert_conn->conn_mr);
598out_mr:
599 ib_dealloc_pd(isert_conn->conn_pd);
600out_pd:
588 isert_device_try_release(device); 601 isert_device_try_release(device);
589out_rsp_dma_map: 602out_rsp_dma_map:
590 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 603 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@ -608,8 +621,8 @@ isert_connect_release(struct isert_conn *isert_conn)
608 621
609 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 622 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
610 623
611 if (device && device->use_frwr) 624 if (device && device->use_fastreg)
612 isert_conn_free_frwr_pool(isert_conn); 625 isert_conn_free_fastreg_pool(isert_conn);
613 626
614 if (isert_conn->conn_qp) { 627 if (isert_conn->conn_qp) {
615 cq_index = ((struct isert_cq_desc *) 628 cq_index = ((struct isert_cq_desc *)
@@ -623,6 +636,9 @@ isert_connect_release(struct isert_conn *isert_conn)
623 isert_free_rx_descriptors(isert_conn); 636 isert_free_rx_descriptors(isert_conn);
624 rdma_destroy_id(isert_conn->conn_cm_id); 637 rdma_destroy_id(isert_conn->conn_cm_id);
625 638
639 ib_dereg_mr(isert_conn->conn_mr);
640 ib_dealloc_pd(isert_conn->conn_pd);
641
626 if (isert_conn->login_buf) { 642 if (isert_conn->login_buf) {
627 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, 643 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
628 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); 644 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -1024,13 +1040,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
1024} 1040}
1025 1041
1026static struct iscsi_cmd 1042static struct iscsi_cmd
1027*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) 1043*isert_allocate_cmd(struct iscsi_conn *conn)
1028{ 1044{
1029 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1045 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1030 struct isert_cmd *isert_cmd; 1046 struct isert_cmd *isert_cmd;
1031 struct iscsi_cmd *cmd; 1047 struct iscsi_cmd *cmd;
1032 1048
1033 cmd = iscsit_allocate_cmd(conn, gfp); 1049 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1034 if (!cmd) { 1050 if (!cmd) {
1035 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1051 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1036 return NULL; 1052 return NULL;
@@ -1219,7 +1235,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1219 1235
1220 switch (opcode) { 1236 switch (opcode) {
1221 case ISCSI_OP_SCSI_CMD: 1237 case ISCSI_OP_SCSI_CMD:
1222 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1238 cmd = isert_allocate_cmd(conn);
1223 if (!cmd) 1239 if (!cmd)
1224 break; 1240 break;
1225 1241
@@ -1233,7 +1249,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1233 rx_desc, (unsigned char *)hdr); 1249 rx_desc, (unsigned char *)hdr);
1234 break; 1250 break;
1235 case ISCSI_OP_NOOP_OUT: 1251 case ISCSI_OP_NOOP_OUT:
1236 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1252 cmd = isert_allocate_cmd(conn);
1237 if (!cmd) 1253 if (!cmd)
1238 break; 1254 break;
1239 1255
@@ -1246,7 +1262,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1246 (unsigned char *)hdr); 1262 (unsigned char *)hdr);
1247 break; 1263 break;
1248 case ISCSI_OP_SCSI_TMFUNC: 1264 case ISCSI_OP_SCSI_TMFUNC:
1249 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1265 cmd = isert_allocate_cmd(conn);
1250 if (!cmd) 1266 if (!cmd)
1251 break; 1267 break;
1252 1268
@@ -1254,7 +1270,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1254 (unsigned char *)hdr); 1270 (unsigned char *)hdr);
1255 break; 1271 break;
1256 case ISCSI_OP_LOGOUT: 1272 case ISCSI_OP_LOGOUT:
1257 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1273 cmd = isert_allocate_cmd(conn);
1258 if (!cmd) 1274 if (!cmd)
1259 break; 1275 break;
1260 1276
@@ -1265,7 +1281,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1265 HZ); 1281 HZ);
1266 break; 1282 break;
1267 case ISCSI_OP_TEXT: 1283 case ISCSI_OP_TEXT:
1268 cmd = isert_allocate_cmd(conn, GFP_KERNEL); 1284 cmd = isert_allocate_cmd(conn);
1269 if (!cmd) 1285 if (!cmd)
1270 break; 1286 break;
1271 1287
@@ -1404,25 +1420,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1404} 1420}
1405 1421
1406static void 1422static void
1407isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) 1423isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1408{ 1424{
1409 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1425 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1410 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1426 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1411 LIST_HEAD(unmap_list); 1427 LIST_HEAD(unmap_list);
1412 1428
1413 pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); 1429 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
1414 1430
1415 if (wr->fr_desc) { 1431 if (wr->fr_desc) {
1416 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", 1432 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1417 isert_cmd, wr->fr_desc); 1433 isert_cmd, wr->fr_desc);
1418 spin_lock_bh(&isert_conn->conn_lock); 1434 spin_lock_bh(&isert_conn->conn_lock);
1419 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); 1435 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1420 spin_unlock_bh(&isert_conn->conn_lock); 1436 spin_unlock_bh(&isert_conn->conn_lock);
1421 wr->fr_desc = NULL; 1437 wr->fr_desc = NULL;
1422 } 1438 }
1423 1439
1424 if (wr->sge) { 1440 if (wr->sge) {
1425 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); 1441 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1426 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, 1442 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1427 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? 1443 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1428 DMA_TO_DEVICE : DMA_FROM_DEVICE); 1444 DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -2163,26 +2179,22 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2163 2179
2164static int 2180static int
2165isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, 2181isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2166 struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, 2182 struct isert_conn *isert_conn, struct scatterlist *sg_start,
2167 struct ib_sge *ib_sge, u32 offset, unsigned int data_len) 2183 struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
2184 unsigned int data_len)
2168{ 2185{
2169 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2170 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 2186 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2171 struct scatterlist *sg_start;
2172 u32 sg_off, page_off;
2173 struct ib_send_wr fr_wr, inv_wr; 2187 struct ib_send_wr fr_wr, inv_wr;
2174 struct ib_send_wr *bad_wr, *wr = NULL; 2188 struct ib_send_wr *bad_wr, *wr = NULL;
2189 int ret, pagelist_len;
2190 u32 page_off;
2175 u8 key; 2191 u8 key;
2176 int ret, sg_nents, pagelist_len;
2177 2192
2178 sg_off = offset / PAGE_SIZE; 2193 sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
2179 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2180 sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2181 ISCSI_ISER_SG_TABLESIZE);
2182 page_off = offset % PAGE_SIZE; 2194 page_off = offset % PAGE_SIZE;
2183 2195
2184 pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", 2196 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2185 isert_cmd, fr_desc, sg_nents, sg_off, offset); 2197 fr_desc, sg_nents, offset);
2186 2198
2187 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, 2199 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2188 &fr_desc->data_frpl->page_list[0]); 2200 &fr_desc->data_frpl->page_list[0]);
@@ -2232,8 +2244,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2232} 2244}
2233 2245
2234static int 2246static int
2235isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2247isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2236 struct isert_rdma_wr *wr) 2248 struct isert_rdma_wr *wr)
2237{ 2249{
2238 struct se_cmd *se_cmd = &cmd->se_cmd; 2250 struct se_cmd *se_cmd = &cmd->se_cmd;
2239 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2251 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2251,9 +2263,9 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2251 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2263 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2252 data_left = se_cmd->data_length; 2264 data_left = se_cmd->data_length;
2253 } else { 2265 } else {
2254 sg_off = cmd->write_data_done / PAGE_SIZE;
2255 data_left = se_cmd->data_length - cmd->write_data_done;
2256 offset = cmd->write_data_done; 2266 offset = cmd->write_data_done;
2267 sg_off = offset / PAGE_SIZE;
2268 data_left = se_cmd->data_length - cmd->write_data_done;
2257 isert_cmd->tx_desc.isert_cmd = isert_cmd; 2269 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2258 } 2270 }
2259 2271
@@ -2311,16 +2323,16 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2311 wr->fr_desc = NULL; 2323 wr->fr_desc = NULL;
2312 } else { 2324 } else {
2313 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2325 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2314 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, 2326 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2315 struct fast_reg_descriptor, list); 2327 struct fast_reg_descriptor, list);
2316 list_del(&fr_desc->list); 2328 list_del(&fr_desc->list);
2317 spin_unlock_irqrestore(&isert_conn->conn_lock, flags); 2329 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2318 wr->fr_desc = fr_desc; 2330 wr->fr_desc = fr_desc;
2319 2331
2320 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, 2332 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2321 ib_sge, offset, data_len); 2333 ib_sge, sg_nents, offset, data_len);
2322 if (ret) { 2334 if (ret) {
2323 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); 2335 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2324 goto unmap_sg; 2336 goto unmap_sg;
2325 } 2337 }
2326 } 2338 }
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 691f90ff2d83..708a069002f3 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -119,9 +119,9 @@ struct isert_conn {
119 wait_queue_head_t conn_wait; 119 wait_queue_head_t conn_wait;
120 wait_queue_head_t conn_wait_comp_err; 120 wait_queue_head_t conn_wait_comp_err;
121 struct kref conn_kref; 121 struct kref conn_kref;
122 struct list_head conn_frwr_pool; 122 struct list_head conn_fr_pool;
123 int conn_frwr_pool_size; 123 int conn_fr_pool_size;
124 /* lock to protect frwr_pool */ 124 /* lock to protect fastreg pool */
125 spinlock_t conn_lock; 125 spinlock_t conn_lock;
126#define ISERT_COMP_BATCH_COUNT 8 126#define ISERT_COMP_BATCH_COUNT 8
127 int conn_comp_batch; 127 int conn_comp_batch;
@@ -139,13 +139,11 @@ struct isert_cq_desc {
139}; 139};
140 140
141struct isert_device { 141struct isert_device {
142 int use_frwr; 142 int use_fastreg;
143 int cqs_used; 143 int cqs_used;
144 int refcount; 144 int refcount;
145 int cq_active_qps[ISERT_MAX_CQ]; 145 int cq_active_qps[ISERT_MAX_CQ];
146 struct ib_device *ib_device; 146 struct ib_device *ib_device;
147 struct ib_pd *dev_pd;
148 struct ib_mr *dev_mr;
149 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; 147 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
150 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 148 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
151 struct isert_cq_desc *cq_desc; 149 struct isert_cq_desc *cq_desc;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 570c7fcc0c4d..4a0d7c92181f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1990,6 +1990,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1990 1990
1991 vha->flags.delete_progress = 1; 1991 vha->flags.delete_progress = 1;
1992 1992
1993 qlt_remove_target(ha, vha);
1994
1993 fc_remove_host(vha->host); 1995 fc_remove_host(vha->host);
1994 1996
1995 scsi_remove_host(vha->host); 1997 scsi_remove_host(vha->host);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 41d6491d7bd9..e1fe95ef23e1 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2750,6 +2750,13 @@ struct qlfc_fw {
2750 uint32_t len; 2750 uint32_t len;
2751}; 2751};
2752 2752
2753struct scsi_qlt_host {
2754 void *target_lport_ptr;
2755 struct mutex tgt_mutex;
2756 struct mutex tgt_host_action_mutex;
2757 struct qla_tgt *qla_tgt;
2758};
2759
2753struct qlt_hw_data { 2760struct qlt_hw_data {
2754 /* Protected by hw lock */ 2761 /* Protected by hw lock */
2755 uint32_t enable_class_2:1; 2762 uint32_t enable_class_2:1;
@@ -2765,15 +2772,11 @@ struct qlt_hw_data {
2765 uint32_t __iomem *atio_q_in; 2772 uint32_t __iomem *atio_q_in;
2766 uint32_t __iomem *atio_q_out; 2773 uint32_t __iomem *atio_q_out;
2767 2774
2768 void *target_lport_ptr;
2769 struct qla_tgt_func_tmpl *tgt_ops; 2775 struct qla_tgt_func_tmpl *tgt_ops;
2770 struct qla_tgt *qla_tgt;
2771 struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; 2776 struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
2772 uint16_t current_handle; 2777 uint16_t current_handle;
2773 2778
2774 struct qla_tgt_vp_map *tgt_vp_map; 2779 struct qla_tgt_vp_map *tgt_vp_map;
2775 struct mutex tgt_mutex;
2776 struct mutex tgt_host_action_mutex;
2777 2780
2778 int saved_set; 2781 int saved_set;
2779 uint16_t saved_exchange_count; 2782 uint16_t saved_exchange_count;
@@ -3435,6 +3438,7 @@ typedef struct scsi_qla_host {
3435#define VP_ERR_FAB_LOGOUT 4 3438#define VP_ERR_FAB_LOGOUT 4
3436#define VP_ERR_ADAP_NORESOURCES 5 3439#define VP_ERR_ADAP_NORESOURCES 5
3437 struct qla_hw_data *hw; 3440 struct qla_hw_data *hw;
3441 struct scsi_qlt_host vha_tgt;
3438 struct req_que *req; 3442 struct req_que *req;
3439 int fw_heartbeat_counter; 3443 int fw_heartbeat_counter;
3440 int seconds_since_last_heartbeat; 3444 int seconds_since_last_heartbeat;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 38a1257e76e1..9e80d61e5a3a 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -590,7 +590,7 @@ static struct qla_tgt_sess *qlt_create_sess(
590 590
591 /* Check to avoid double sessions */ 591 /* Check to avoid double sessions */
592 spin_lock_irqsave(&ha->hardware_lock, flags); 592 spin_lock_irqsave(&ha->hardware_lock, flags);
593 list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, 593 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
594 sess_list_entry) { 594 sess_list_entry) {
595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 595 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
@@ -627,7 +627,7 @@ static struct qla_tgt_sess *qlt_create_sess(
627 627
628 return NULL; 628 return NULL;
629 } 629 }
630 sess->tgt = ha->tgt.qla_tgt; 630 sess->tgt = vha->vha_tgt.qla_tgt;
631 sess->vha = vha; 631 sess->vha = vha;
632 sess->s_id = fcport->d_id; 632 sess->s_id = fcport->d_id;
633 sess->loop_id = fcport->loop_id; 633 sess->loop_id = fcport->loop_id;
@@ -635,7 +635,7 @@ static struct qla_tgt_sess *qlt_create_sess(
635 635
636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
638 sess, ha->tgt.qla_tgt); 638 sess, vha->vha_tgt.qla_tgt);
639 639
640 be_sid[0] = sess->s_id.b.domain; 640 be_sid[0] = sess->s_id.b.domain;
641 be_sid[1] = sess->s_id.b.area; 641 be_sid[1] = sess->s_id.b.area;
@@ -662,8 +662,8 @@ static struct qla_tgt_sess *qlt_create_sess(
662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 662 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
663 663
664 spin_lock_irqsave(&ha->hardware_lock, flags); 664 spin_lock_irqsave(&ha->hardware_lock, flags);
665 list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); 665 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
666 ha->tgt.qla_tgt->sess_count++; 666 vha->vha_tgt.qla_tgt->sess_count++;
667 spin_unlock_irqrestore(&ha->hardware_lock, flags); 667 spin_unlock_irqrestore(&ha->hardware_lock, flags);
668 668
669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -682,7 +682,7 @@ static struct qla_tgt_sess *qlt_create_sess(
682void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 682void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
683{ 683{
684 struct qla_hw_data *ha = vha->hw; 684 struct qla_hw_data *ha = vha->hw;
685 struct qla_tgt *tgt = ha->tgt.qla_tgt; 685 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
686 struct qla_tgt_sess *sess; 686 struct qla_tgt_sess *sess;
687 unsigned long flags; 687 unsigned long flags;
688 688
@@ -692,6 +692,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
692 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 692 if (!tgt || (fcport->port_type != FCT_INITIATOR))
693 return; 693 return;
694 694
695 if (qla_ini_mode_enabled(vha))
696 return;
697
695 spin_lock_irqsave(&ha->hardware_lock, flags); 698 spin_lock_irqsave(&ha->hardware_lock, flags);
696 if (tgt->tgt_stop) { 699 if (tgt->tgt_stop) {
697 spin_unlock_irqrestore(&ha->hardware_lock, flags); 700 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -701,9 +704,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
701 if (!sess) { 704 if (!sess) {
702 spin_unlock_irqrestore(&ha->hardware_lock, flags); 705 spin_unlock_irqrestore(&ha->hardware_lock, flags);
703 706
704 mutex_lock(&ha->tgt.tgt_mutex); 707 mutex_lock(&vha->vha_tgt.tgt_mutex);
705 sess = qlt_create_sess(vha, fcport, false); 708 sess = qlt_create_sess(vha, fcport, false);
706 mutex_unlock(&ha->tgt.tgt_mutex); 709 mutex_unlock(&vha->vha_tgt.tgt_mutex);
707 710
708 spin_lock_irqsave(&ha->hardware_lock, flags); 711 spin_lock_irqsave(&ha->hardware_lock, flags);
709 } else { 712 } else {
@@ -739,7 +742,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
739void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 742void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
740{ 743{
741 struct qla_hw_data *ha = vha->hw; 744 struct qla_hw_data *ha = vha->hw;
742 struct qla_tgt *tgt = ha->tgt.qla_tgt; 745 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
743 struct qla_tgt_sess *sess; 746 struct qla_tgt_sess *sess;
744 unsigned long flags; 747 unsigned long flags;
745 748
@@ -806,12 +809,12 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
806 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 809 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
807 * Lock is needed, because we still can get an incoming packet. 810 * Lock is needed, because we still can get an incoming packet.
808 */ 811 */
809 mutex_lock(&ha->tgt.tgt_mutex); 812 mutex_lock(&vha->vha_tgt.tgt_mutex);
810 spin_lock_irqsave(&ha->hardware_lock, flags); 813 spin_lock_irqsave(&ha->hardware_lock, flags);
811 tgt->tgt_stop = 1; 814 tgt->tgt_stop = 1;
812 qlt_clear_tgt_db(tgt, true); 815 qlt_clear_tgt_db(tgt, true);
813 spin_unlock_irqrestore(&ha->hardware_lock, flags); 816 spin_unlock_irqrestore(&ha->hardware_lock, flags);
814 mutex_unlock(&ha->tgt.tgt_mutex); 817 mutex_unlock(&vha->vha_tgt.tgt_mutex);
815 818
816 flush_delayed_work(&tgt->sess_del_work); 819 flush_delayed_work(&tgt->sess_del_work);
817 820
@@ -845,20 +848,21 @@ EXPORT_SYMBOL(qlt_stop_phase1);
845void qlt_stop_phase2(struct qla_tgt *tgt) 848void qlt_stop_phase2(struct qla_tgt *tgt)
846{ 849{
847 struct qla_hw_data *ha = tgt->ha; 850 struct qla_hw_data *ha = tgt->ha;
851 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
848 unsigned long flags; 852 unsigned long flags;
849 853
850 if (tgt->tgt_stopped) { 854 if (tgt->tgt_stopped) {
851 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, 855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
852 "Already in tgt->tgt_stopped state\n"); 856 "Already in tgt->tgt_stopped state\n");
853 dump_stack(); 857 dump_stack();
854 return; 858 return;
855 } 859 }
856 860
857 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, 861 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
858 "Waiting for %d IRQ commands to complete (tgt %p)", 862 "Waiting for %d IRQ commands to complete (tgt %p)",
859 tgt->irq_cmd_count, tgt); 863 tgt->irq_cmd_count, tgt);
860 864
861 mutex_lock(&ha->tgt.tgt_mutex); 865 mutex_lock(&vha->vha_tgt.tgt_mutex);
862 spin_lock_irqsave(&ha->hardware_lock, flags); 866 spin_lock_irqsave(&ha->hardware_lock, flags);
863 while (tgt->irq_cmd_count != 0) { 867 while (tgt->irq_cmd_count != 0) {
864 spin_unlock_irqrestore(&ha->hardware_lock, flags); 868 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -868,9 +872,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
868 tgt->tgt_stop = 0; 872 tgt->tgt_stop = 0;
869 tgt->tgt_stopped = 1; 873 tgt->tgt_stopped = 1;
870 spin_unlock_irqrestore(&ha->hardware_lock, flags); 874 spin_unlock_irqrestore(&ha->hardware_lock, flags);
871 mutex_unlock(&ha->tgt.tgt_mutex); 875 mutex_unlock(&vha->vha_tgt.tgt_mutex);
872 876
873 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", 877 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
874 tgt); 878 tgt);
875} 879}
876EXPORT_SYMBOL(qlt_stop_phase2); 880EXPORT_SYMBOL(qlt_stop_phase2);
@@ -878,14 +882,14 @@ EXPORT_SYMBOL(qlt_stop_phase2);
878/* Called from qlt_remove_target() -> qla2x00_remove_one() */ 882/* Called from qlt_remove_target() -> qla2x00_remove_one() */
879static void qlt_release(struct qla_tgt *tgt) 883static void qlt_release(struct qla_tgt *tgt)
880{ 884{
881 struct qla_hw_data *ha = tgt->ha; 885 scsi_qla_host_t *vha = tgt->vha;
882 886
883 if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 887 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
884 qlt_stop_phase2(tgt); 888 qlt_stop_phase2(tgt);
885 889
886 ha->tgt.qla_tgt = NULL; 890 vha->vha_tgt.qla_tgt = NULL;
887 891
888 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, 892 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
889 "Release of tgt %p finished\n", tgt); 893 "Release of tgt %p finished\n", tgt);
890 894
891 kfree(tgt); 895 kfree(tgt);
@@ -949,8 +953,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
949 return; 953 return;
950 } 954 }
951 955
952 if (ha->tgt.qla_tgt != NULL) 956 if (vha->vha_tgt.qla_tgt != NULL)
953 ha->tgt.qla_tgt->notify_ack_expected++; 957 vha->vha_tgt.qla_tgt->notify_ack_expected++;
954 958
955 pkt->entry_type = NOTIFY_ACK_TYPE; 959 pkt->entry_type = NOTIFY_ACK_TYPE;
956 pkt->entry_count = 1; 960 pkt->entry_count = 1;
@@ -1054,7 +1058,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1054 /* Other bytes are zero */ 1058 /* Other bytes are zero */
1055 } 1059 }
1056 1060
1057 ha->tgt.qla_tgt->abts_resp_expected++; 1061 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1058 1062
1059 qla2x00_start_iocbs(vha, vha->req); 1063 qla2x00_start_iocbs(vha, vha->req);
1060} 1064}
@@ -1206,7 +1210,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1207 "qla_target(%d): task abort for non-existant session\n", 1211 "qla_target(%d): task abort for non-existant session\n",
1208 vha->vp_idx); 1212 vha->vp_idx);
1209 rc = qlt_sched_sess_work(ha->tgt.qla_tgt, 1213 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1210 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1214 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1211 if (rc != 0) { 1215 if (rc != 0) {
1212 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1216 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
@@ -2157,8 +2161,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2157 struct qla_tgt_cmd *cmd, void *ctio) 2161 struct qla_tgt_cmd *cmd, void *ctio)
2158{ 2162{
2159 struct qla_tgt_srr_ctio *sc; 2163 struct qla_tgt_srr_ctio *sc;
2160 struct qla_hw_data *ha = vha->hw; 2164 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2161 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2162 struct qla_tgt_srr_imm *imm; 2165 struct qla_tgt_srr_imm *imm;
2163 2166
2164 tgt->ctio_srr_id++; 2167 tgt->ctio_srr_id++;
@@ -2474,7 +2477,7 @@ static void qlt_do_work(struct work_struct *work)
2474 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2477 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2475 scsi_qla_host_t *vha = cmd->vha; 2478 scsi_qla_host_t *vha = cmd->vha;
2476 struct qla_hw_data *ha = vha->hw; 2479 struct qla_hw_data *ha = vha->hw;
2477 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2480 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2478 struct qla_tgt_sess *sess = NULL; 2481 struct qla_tgt_sess *sess = NULL;
2479 struct atio_from_isp *atio = &cmd->atio; 2482 struct atio_from_isp *atio = &cmd->atio;
2480 unsigned char *cdb; 2483 unsigned char *cdb;
@@ -2507,10 +2510,10 @@ static void qlt_do_work(struct work_struct *work)
2507 goto out_term; 2510 goto out_term;
2508 } 2511 }
2509 2512
2510 mutex_lock(&ha->tgt.tgt_mutex); 2513 mutex_lock(&vha->vha_tgt.tgt_mutex);
2511 sess = qlt_make_local_sess(vha, s_id); 2514 sess = qlt_make_local_sess(vha, s_id);
2512 /* sess has an extra creation ref. */ 2515 /* sess has an extra creation ref. */
2513 mutex_unlock(&ha->tgt.tgt_mutex); 2516 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2514 2517
2515 if (!sess) 2518 if (!sess)
2516 goto out_term; 2519 goto out_term;
@@ -2576,8 +2579,7 @@ out_term:
2576static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2579static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2577 struct atio_from_isp *atio) 2580 struct atio_from_isp *atio)
2578{ 2581{
2579 struct qla_hw_data *ha = vha->hw; 2582 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2580 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2581 struct qla_tgt_cmd *cmd; 2583 struct qla_tgt_cmd *cmd;
2582 2584
2583 if (unlikely(tgt->tgt_stop)) { 2585 if (unlikely(tgt->tgt_stop)) {
@@ -2597,7 +2599,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2597 2599
2598 memcpy(&cmd->atio, atio, sizeof(*atio)); 2600 memcpy(&cmd->atio, atio, sizeof(*atio));
2599 cmd->state = QLA_TGT_STATE_NEW; 2601 cmd->state = QLA_TGT_STATE_NEW;
2600 cmd->tgt = ha->tgt.qla_tgt; 2602 cmd->tgt = vha->vha_tgt.qla_tgt;
2601 cmd->vha = vha; 2603 cmd->vha = vha;
2602 2604
2603 INIT_WORK(&cmd->work, qlt_do_work); 2605 INIT_WORK(&cmd->work, qlt_do_work);
@@ -2723,7 +2725,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2723 uint32_t lun, unpacked_lun; 2725 uint32_t lun, unpacked_lun;
2724 int lun_size, fn; 2726 int lun_size, fn;
2725 2727
2726 tgt = ha->tgt.qla_tgt; 2728 tgt = vha->vha_tgt.qla_tgt;
2727 2729
2728 lun = a->u.isp24.fcp_cmnd.lun; 2730 lun = a->u.isp24.fcp_cmnd.lun;
2729 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 2731 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
@@ -2797,7 +2799,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
2797 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2798 "qla_target(%d): task abort for unexisting " 2800 "qla_target(%d): task abort for unexisting "
2799 "session\n", vha->vp_idx); 2801 "session\n", vha->vp_idx);
2800 return qlt_sched_sess_work(ha->tgt.qla_tgt, 2802 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
2801 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2803 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2802 } 2804 }
2803 2805
@@ -2810,7 +2812,6 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
2810static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2812static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2811 struct imm_ntfy_from_isp *iocb) 2813 struct imm_ntfy_from_isp *iocb)
2812{ 2814{
2813 struct qla_hw_data *ha = vha->hw;
2814 int res = 0; 2815 int res = 0;
2815 2816
2816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 2817 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
@@ -2828,7 +2829,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2828 case ELS_PDISC: 2829 case ELS_PDISC:
2829 case ELS_ADISC: 2830 case ELS_ADISC:
2830 { 2831 {
2831 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2832 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2832 if (tgt->link_reinit_iocb_pending) { 2833 if (tgt->link_reinit_iocb_pending) {
2833 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2834 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
2834 0, 0, 0, 0, 0, 0); 2835 0, 0, 0, 0, 0, 0);
@@ -3202,8 +3203,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3202 struct imm_ntfy_from_isp *iocb) 3203 struct imm_ntfy_from_isp *iocb)
3203{ 3204{
3204 struct qla_tgt_srr_imm *imm; 3205 struct qla_tgt_srr_imm *imm;
3205 struct qla_hw_data *ha = vha->hw; 3206 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3206 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3207 struct qla_tgt_srr_ctio *sctio; 3207 struct qla_tgt_srr_ctio *sctio;
3208 3208
3209 tgt->imm_srr_id++; 3209 tgt->imm_srr_id++;
@@ -3313,7 +3313,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3313 3313
3314 case IMM_NTFY_LIP_LINK_REINIT: 3314 case IMM_NTFY_LIP_LINK_REINIT:
3315 { 3315 {
3316 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3316 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3318 "qla_target(%d): LINK REINIT (loop %#x, " 3318 "qla_target(%d): LINK REINIT (loop %#x, "
3319 "subcode %x)\n", vha->vp_idx, 3319 "subcode %x)\n", vha->vp_idx,
@@ -3489,7 +3489,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3489 struct atio_from_isp *atio) 3489 struct atio_from_isp *atio)
3490{ 3490{
3491 struct qla_hw_data *ha = vha->hw; 3491 struct qla_hw_data *ha = vha->hw;
3492 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3492 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3493 int rc; 3493 int rc;
3494 3494
3495 if (unlikely(tgt == NULL)) { 3495 if (unlikely(tgt == NULL)) {
@@ -3591,7 +3591,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3591static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3591static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3592{ 3592{
3593 struct qla_hw_data *ha = vha->hw; 3593 struct qla_hw_data *ha = vha->hw;
3594 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3594 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3595 3595
3596 if (unlikely(tgt == NULL)) { 3596 if (unlikely(tgt == NULL)) {
3597 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 3597 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
@@ -3794,7 +3794,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3794 uint16_t *mailbox) 3794 uint16_t *mailbox)
3795{ 3795{
3796 struct qla_hw_data *ha = vha->hw; 3796 struct qla_hw_data *ha = vha->hw;
3797 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3797 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3798 int login_code; 3798 int login_code;
3799 3799
3800 ql_dbg(ql_dbg_tgt, vha, 0xe039, 3800 ql_dbg(ql_dbg_tgt, vha, 0xe039,
@@ -3924,14 +3924,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
3924static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 3924static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
3925 uint8_t *s_id) 3925 uint8_t *s_id)
3926{ 3926{
3927 struct qla_hw_data *ha = vha->hw;
3928 struct qla_tgt_sess *sess = NULL; 3927 struct qla_tgt_sess *sess = NULL;
3929 fc_port_t *fcport = NULL; 3928 fc_port_t *fcport = NULL;
3930 int rc, global_resets; 3929 int rc, global_resets;
3931 uint16_t loop_id = 0; 3930 uint16_t loop_id = 0;
3932 3931
3933retry: 3932retry:
3934 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); 3933 global_resets =
3934 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
3935 3935
3936 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 3936 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
3937 if (rc != 0) { 3937 if (rc != 0) {
@@ -3958,12 +3958,13 @@ retry:
3958 return NULL; 3958 return NULL;
3959 3959
3960 if (global_resets != 3960 if (global_resets !=
3961 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { 3961 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
3962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 3962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
3963 "qla_target(%d): global reset during session discovery " 3963 "qla_target(%d): global reset during session discovery "
3964 "(counter was %d, new %d), retrying", vha->vp_idx, 3964 "(counter was %d, new %d), retrying", vha->vp_idx,
3965 global_resets, 3965 global_resets,
3966 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); 3966 atomic_read(&vha->vha_tgt.
3967 qla_tgt->tgt_global_resets_count));
3967 goto retry; 3968 goto retry;
3968 } 3969 }
3969 3970
@@ -3998,10 +3999,10 @@ static void qlt_abort_work(struct qla_tgt *tgt,
3998 if (!sess) { 3999 if (!sess) {
3999 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4000 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4000 4001
4001 mutex_lock(&ha->tgt.tgt_mutex); 4002 mutex_lock(&vha->vha_tgt.tgt_mutex);
4002 sess = qlt_make_local_sess(vha, s_id); 4003 sess = qlt_make_local_sess(vha, s_id);
4003 /* sess has got an extra creation ref */ 4004 /* sess has got an extra creation ref */
4004 mutex_unlock(&ha->tgt.tgt_mutex); 4005 mutex_unlock(&vha->vha_tgt.tgt_mutex);
4005 4006
4006 spin_lock_irqsave(&ha->hardware_lock, flags); 4007 spin_lock_irqsave(&ha->hardware_lock, flags);
4007 if (!sess) 4008 if (!sess)
@@ -4052,10 +4053,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
4052 if (!sess) { 4053 if (!sess) {
4053 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4054 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4054 4055
4055 mutex_lock(&ha->tgt.tgt_mutex); 4056 mutex_lock(&vha->vha_tgt.tgt_mutex);
4056 sess = qlt_make_local_sess(vha, s_id); 4057 sess = qlt_make_local_sess(vha, s_id);
4057 /* sess has got an extra creation ref */ 4058 /* sess has got an extra creation ref */
4058 mutex_unlock(&ha->tgt.tgt_mutex); 4059 mutex_unlock(&vha->vha_tgt.tgt_mutex);
4059 4060
4060 spin_lock_irqsave(&ha->hardware_lock, flags); 4061 spin_lock_irqsave(&ha->hardware_lock, flags);
4061 if (!sess) 4062 if (!sess)
@@ -4141,9 +4142,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4141 } 4142 }
4142 4143
4143 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4144 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4144 "Registering target for host %ld(%p)", base_vha->host_no, ha); 4145 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
4145 4146
4146 BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); 4147 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
4147 4148
4148 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4149 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4149 if (!tgt) { 4150 if (!tgt) {
@@ -4171,7 +4172,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4171 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4172 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4172 atomic_set(&tgt->tgt_global_resets_count, 0); 4173 atomic_set(&tgt->tgt_global_resets_count, 0);
4173 4174
4174 ha->tgt.qla_tgt = tgt; 4175 base_vha->vha_tgt.qla_tgt = tgt;
4175 4176
4176 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4177 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4177 "qla_target(%d): using 64 Bit PCI addressing", 4178 "qla_target(%d): using 64 Bit PCI addressing",
@@ -4192,16 +4193,16 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4192/* Must be called under tgt_host_action_mutex */ 4193/* Must be called under tgt_host_action_mutex */
4193int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4194int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4194{ 4195{
4195 if (!ha->tgt.qla_tgt) 4196 if (!vha->vha_tgt.qla_tgt)
4196 return 0; 4197 return 0;
4197 4198
4198 mutex_lock(&qla_tgt_mutex); 4199 mutex_lock(&qla_tgt_mutex);
4199 list_del(&ha->tgt.qla_tgt->tgt_list_entry); 4200 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
4200 mutex_unlock(&qla_tgt_mutex); 4201 mutex_unlock(&qla_tgt_mutex);
4201 4202
4202 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4203 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4203 vha->host_no, ha); 4204 vha->host_no, ha);
4204 qlt_release(ha->tgt.qla_tgt); 4205 qlt_release(vha->vha_tgt.qla_tgt);
4205 4206
4206 return 0; 4207 return 0;
4207} 4208}
@@ -4235,8 +4236,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4235 * @callback: lport initialization callback for tcm_qla2xxx code 4236 * @callback: lport initialization callback for tcm_qla2xxx code
4236 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4237 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4237 */ 4238 */
4238int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, 4239int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
4239 int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) 4240 u64 npiv_wwpn, u64 npiv_wwnn,
4241 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
4240{ 4242{
4241 struct qla_tgt *tgt; 4243 struct qla_tgt *tgt;
4242 struct scsi_qla_host *vha; 4244 struct scsi_qla_host *vha;
@@ -4255,14 +4257,11 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4255 if (!host) 4257 if (!host)
4256 continue; 4258 continue;
4257 4259
4258 if (ha->tgt.tgt_ops != NULL)
4259 continue;
4260
4261 if (!(host->hostt->supported_mode & MODE_TARGET)) 4260 if (!(host->hostt->supported_mode & MODE_TARGET))
4262 continue; 4261 continue;
4263 4262
4264 spin_lock_irqsave(&ha->hardware_lock, flags); 4263 spin_lock_irqsave(&ha->hardware_lock, flags);
4265 if (host->active_mode & MODE_TARGET) { 4264 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
4266 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4265 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4267 host->host_no); 4266 host->host_no);
4268 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4267 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4276,24 +4275,18 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4276 " qla2xxx scsi_host\n"); 4275 " qla2xxx scsi_host\n");
4277 continue; 4276 continue;
4278 } 4277 }
4279 qlt_lport_dump(vha, wwpn, b); 4278 qlt_lport_dump(vha, phys_wwpn, b);
4280 4279
4281 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4280 if (memcmp(vha->port_name, b, WWN_SIZE)) {
4282 scsi_host_put(host); 4281 scsi_host_put(host);
4283 continue; 4282 continue;
4284 } 4283 }
4285 /*
4286 * Setup passed parameters ahead of invoking callback
4287 */
4288 ha->tgt.tgt_ops = qla_tgt_ops;
4289 ha->tgt.target_lport_ptr = target_lport_ptr;
4290 rc = (*callback)(vha);
4291 if (rc != 0) {
4292 ha->tgt.tgt_ops = NULL;
4293 ha->tgt.target_lport_ptr = NULL;
4294 scsi_host_put(host);
4295 }
4296 mutex_unlock(&qla_tgt_mutex); 4284 mutex_unlock(&qla_tgt_mutex);
4285
4286 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
4287 if (rc != 0)
4288 scsi_host_put(host);
4289
4297 return rc; 4290 return rc;
4298 } 4291 }
4299 mutex_unlock(&qla_tgt_mutex); 4292 mutex_unlock(&qla_tgt_mutex);
@@ -4314,7 +4307,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
4314 /* 4307 /*
4315 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4308 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4316 */ 4309 */
4317 ha->tgt.target_lport_ptr = NULL; 4310 vha->vha_tgt.target_lport_ptr = NULL;
4318 ha->tgt.tgt_ops = NULL; 4311 ha->tgt.tgt_ops = NULL;
4319 /* 4312 /*
4320 * Release the Scsi_Host reference for the underlying qla2xxx host 4313 * Release the Scsi_Host reference for the underlying qla2xxx host
@@ -4376,8 +4369,9 @@ void
4376qlt_enable_vha(struct scsi_qla_host *vha) 4369qlt_enable_vha(struct scsi_qla_host *vha)
4377{ 4370{
4378 struct qla_hw_data *ha = vha->hw; 4371 struct qla_hw_data *ha = vha->hw;
4379 struct qla_tgt *tgt = ha->tgt.qla_tgt; 4372 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4380 unsigned long flags; 4373 unsigned long flags;
4374 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4381 4375
4382 if (!tgt) { 4376 if (!tgt) {
4383 ql_dbg(ql_dbg_tgt, vha, 0xe069, 4377 ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -4392,9 +4386,14 @@ qlt_enable_vha(struct scsi_qla_host *vha)
4392 qlt_set_mode(vha); 4386 qlt_set_mode(vha);
4393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4387 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4394 4388
4395 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4389 if (vha->vp_idx) {
4396 qla2xxx_wake_dpc(vha); 4390 qla24xx_disable_vp(vha);
4397 qla2x00_wait_for_hba_online(vha); 4391 qla24xx_enable_vp(vha);
4392 } else {
4393 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
4394 qla2xxx_wake_dpc(base_vha);
4395 qla2x00_wait_for_hba_online(base_vha);
4396 }
4398} 4397}
4399EXPORT_SYMBOL(qlt_enable_vha); 4398EXPORT_SYMBOL(qlt_enable_vha);
4400 4399
@@ -4407,7 +4406,7 @@ void
4407qlt_disable_vha(struct scsi_qla_host *vha) 4406qlt_disable_vha(struct scsi_qla_host *vha)
4408{ 4407{
4409 struct qla_hw_data *ha = vha->hw; 4408 struct qla_hw_data *ha = vha->hw;
4410 struct qla_tgt *tgt = ha->tgt.qla_tgt; 4409 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4411 unsigned long flags; 4410 unsigned long flags;
4412 4411
4413 if (!tgt) { 4412 if (!tgt) {
@@ -4438,8 +4437,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4438 if (!qla_tgt_mode_enabled(vha)) 4437 if (!qla_tgt_mode_enabled(vha))
4439 return; 4438 return;
4440 4439
4441 mutex_init(&ha->tgt.tgt_mutex); 4440 vha->vha_tgt.qla_tgt = NULL;
4442 mutex_init(&ha->tgt.tgt_host_action_mutex); 4441
4442 mutex_init(&vha->vha_tgt.tgt_mutex);
4443 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
4443 4444
4444 qlt_clear_mode(vha); 4445 qlt_clear_mode(vha);
4445 4446
@@ -4450,6 +4451,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4450 * assigning the value appropriately. 4451 * assigning the value appropriately.
4451 */ 4452 */
4452 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4453 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4454
4455 qlt_add_target(ha, vha);
4453} 4456}
4454 4457
4455void 4458void
@@ -4768,8 +4771,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4768 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4771 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
4769 } 4772 }
4770 4773
4771 mutex_init(&ha->tgt.tgt_mutex); 4774 mutex_init(&base_vha->vha_tgt.tgt_mutex);
4772 mutex_init(&ha->tgt.tgt_host_action_mutex); 4775 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
4773 qlt_clear_mode(base_vha); 4776 qlt_clear_mode(base_vha);
4774} 4777}
4775 4778
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index b33e411f28a0..1d10eecad499 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -932,8 +932,8 @@ void qlt_disable_vha(struct scsi_qla_host *);
932 */ 932 */
933extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); 933extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
934extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); 934extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
935extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, 935extern int qlt_lport_register(void *, u64, u64, u64,
936 int (*callback)(struct scsi_qla_host *), void *); 936 int (*callback)(struct scsi_qla_host *, void *, u64, u64));
937extern void qlt_lport_deregister(struct scsi_qla_host *); 937extern void qlt_lport_deregister(struct scsi_qla_host *);
938extern void qlt_unreg_sess(struct qla_tgt_sess *); 938extern void qlt_unreg_sess(struct qla_tgt_sess *);
939extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 939extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7eb19be35d46..75a141bbe74d 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -53,16 +53,6 @@
53struct workqueue_struct *tcm_qla2xxx_free_wq; 53struct workqueue_struct *tcm_qla2xxx_free_wq;
54struct workqueue_struct *tcm_qla2xxx_cmd_wq; 54struct workqueue_struct *tcm_qla2xxx_cmd_wq;
55 55
56static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
57{
58 return 1;
59}
60
61static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
62{
63 return 0;
64}
65
66/* 56/*
67 * Parse WWN. 57 * Parse WWN.
68 * If strict, we require lower-case hex and colon separators to be sure 58 * If strict, we require lower-case hex and colon separators to be sure
@@ -174,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn(
174 *wwnn = 0; 164 *wwnn = 0;
175 165
176 /* count may include a LF at end of string */ 166 /* count may include a LF at end of string */
177 if (name[cnt-1] == '\n') 167 if (name[cnt-1] == '\n' || name[cnt-1] == 0)
178 cnt--; 168 cnt--;
179 169
180 /* validate we have enough characters for WWPN */ 170 /* validate we have enough characters for WWPN */
@@ -777,6 +767,9 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess)
777 767
778static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 768static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
779{ 769{
770 if (!sess)
771 return;
772
780 assert_spin_locked(&sess->vha->hw->hardware_lock); 773 assert_spin_locked(&sess->vha->hw->hardware_lock);
781 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); 774 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
782} 775}
@@ -957,7 +950,6 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
957 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 950 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
958 struct tcm_qla2xxx_lport, lport_wwn); 951 struct tcm_qla2xxx_lport, lport_wwn);
959 struct scsi_qla_host *vha = lport->qla_vha; 952 struct scsi_qla_host *vha = lport->qla_vha;
960 struct qla_hw_data *ha = vha->hw;
961 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 953 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
962 struct tcm_qla2xxx_tpg, se_tpg); 954 struct tcm_qla2xxx_tpg, se_tpg);
963 unsigned long op; 955 unsigned long op;
@@ -977,12 +969,12 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
977 atomic_set(&tpg->lport_tpg_enabled, 1); 969 atomic_set(&tpg->lport_tpg_enabled, 1);
978 qlt_enable_vha(vha); 970 qlt_enable_vha(vha);
979 } else { 971 } else {
980 if (!ha->tgt.qla_tgt) { 972 if (!vha->vha_tgt.qla_tgt) {
981 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); 973 pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
982 return -ENODEV; 974 return -ENODEV;
983 } 975 }
984 atomic_set(&tpg->lport_tpg_enabled, 0); 976 atomic_set(&tpg->lport_tpg_enabled, 0);
985 qlt_stop_phase1(ha->tgt.qla_tgt); 977 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
986 } 978 }
987 979
988 return count; 980 return count;
@@ -1011,7 +1003,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
1011 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1003 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1012 return ERR_PTR(-EINVAL); 1004 return ERR_PTR(-EINVAL);
1013 1005
1014 if (!lport->qla_npiv_vp && (tpgt != 1)) { 1006 if ((tpgt != 1)) {
1015 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1007 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1016 return ERR_PTR(-ENOSYS); 1008 return ERR_PTR(-ENOSYS);
1017 } 1009 }
@@ -1038,11 +1030,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
1038 kfree(tpg); 1030 kfree(tpg);
1039 return NULL; 1031 return NULL;
1040 } 1032 }
1041 /* 1033
1042 * Setup local TPG=1 pointer for non NPIV mode. 1034 lport->tpg_1 = tpg;
1043 */
1044 if (lport->qla_npiv_vp == NULL)
1045 lport->tpg_1 = tpg;
1046 1035
1047 return &tpg->se_tpg; 1036 return &tpg->se_tpg;
1048} 1037}
@@ -1053,19 +1042,17 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1053 struct tcm_qla2xxx_tpg, se_tpg); 1042 struct tcm_qla2xxx_tpg, se_tpg);
1054 struct tcm_qla2xxx_lport *lport = tpg->lport; 1043 struct tcm_qla2xxx_lport *lport = tpg->lport;
1055 struct scsi_qla_host *vha = lport->qla_vha; 1044 struct scsi_qla_host *vha = lport->qla_vha;
1056 struct qla_hw_data *ha = vha->hw;
1057 /* 1045 /*
1058 * Call into qla2x_target.c LLD logic to shutdown the active 1046 * Call into qla2x_target.c LLD logic to shutdown the active
1059 * FC Nexuses and disable target mode operation for this qla_hw_data 1047 * FC Nexuses and disable target mode operation for this qla_hw_data
1060 */ 1048 */
1061 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) 1049 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
1062 qlt_stop_phase1(ha->tgt.qla_tgt); 1050 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1063 1051
1064 core_tpg_deregister(se_tpg); 1052 core_tpg_deregister(se_tpg);
1065 /* 1053 /*
1066 * Clear local TPG=1 pointer for non NPIV mode. 1054 * Clear local TPG=1 pointer for non NPIV mode.
1067 */ 1055 */
1068 if (lport->qla_npiv_vp == NULL)
1069 lport->tpg_1 = NULL; 1056 lport->tpg_1 = NULL;
1070 1057
1071 kfree(tpg); 1058 kfree(tpg);
@@ -1095,12 +1082,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1095 tpg->lport = lport; 1082 tpg->lport = lport;
1096 tpg->lport_tpgt = tpgt; 1083 tpg->lport_tpgt = tpgt;
1097 1084
1085 /*
1086 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1087 * NodeACLs
1088 */
1089 tpg->tpg_attrib.generate_node_acls = 1;
1090 tpg->tpg_attrib.demo_mode_write_protect = 1;
1091 tpg->tpg_attrib.cache_dynamic_acls = 1;
1092 tpg->tpg_attrib.demo_mode_login_only = 1;
1093
1098 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, 1094 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1099 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1095 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1100 if (ret < 0) { 1096 if (ret < 0) {
1101 kfree(tpg); 1097 kfree(tpg);
1102 return NULL; 1098 return NULL;
1103 } 1099 }
1100 lport->tpg_1 = tpg;
1104 return &tpg->se_tpg; 1101 return &tpg->se_tpg;
1105} 1102}
1106 1103
@@ -1111,13 +1108,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1111 scsi_qla_host_t *vha, 1108 scsi_qla_host_t *vha,
1112 const uint8_t *s_id) 1109 const uint8_t *s_id)
1113{ 1110{
1114 struct qla_hw_data *ha = vha->hw;
1115 struct tcm_qla2xxx_lport *lport; 1111 struct tcm_qla2xxx_lport *lport;
1116 struct se_node_acl *se_nacl; 1112 struct se_node_acl *se_nacl;
1117 struct tcm_qla2xxx_nacl *nacl; 1113 struct tcm_qla2xxx_nacl *nacl;
1118 u32 key; 1114 u32 key;
1119 1115
1120 lport = ha->tgt.target_lport_ptr; 1116 lport = vha->vha_tgt.target_lport_ptr;
1121 if (!lport) { 1117 if (!lport) {
1122 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1118 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1123 dump_stack(); 1119 dump_stack();
@@ -1221,13 +1217,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1221 scsi_qla_host_t *vha, 1217 scsi_qla_host_t *vha,
1222 const uint16_t loop_id) 1218 const uint16_t loop_id)
1223{ 1219{
1224 struct qla_hw_data *ha = vha->hw;
1225 struct tcm_qla2xxx_lport *lport; 1220 struct tcm_qla2xxx_lport *lport;
1226 struct se_node_acl *se_nacl; 1221 struct se_node_acl *se_nacl;
1227 struct tcm_qla2xxx_nacl *nacl; 1222 struct tcm_qla2xxx_nacl *nacl;
1228 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1223 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1229 1224
1230 lport = ha->tgt.target_lport_ptr; 1225 lport = vha->vha_tgt.target_lport_ptr;
1231 if (!lport) { 1226 if (!lport) {
1232 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1227 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1233 dump_stack(); 1228 dump_stack();
@@ -1341,6 +1336,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1341{ 1336{
1342 struct qla_tgt *tgt = sess->tgt; 1337 struct qla_tgt *tgt = sess->tgt;
1343 struct qla_hw_data *ha = tgt->ha; 1338 struct qla_hw_data *ha = tgt->ha;
1339 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1344 struct se_session *se_sess; 1340 struct se_session *se_sess;
1345 struct se_node_acl *se_nacl; 1341 struct se_node_acl *se_nacl;
1346 struct tcm_qla2xxx_lport *lport; 1342 struct tcm_qla2xxx_lport *lport;
@@ -1357,7 +1353,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1357 se_nacl = se_sess->se_node_acl; 1353 se_nacl = se_sess->se_node_acl;
1358 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1354 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1359 1355
1360 lport = ha->tgt.target_lport_ptr; 1356 lport = vha->vha_tgt.target_lport_ptr;
1361 if (!lport) { 1357 if (!lport) {
1362 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1358 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1363 dump_stack(); 1359 dump_stack();
@@ -1391,7 +1387,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1391 unsigned char port_name[36]; 1387 unsigned char port_name[36];
1392 unsigned long flags; 1388 unsigned long flags;
1393 1389
1394 lport = ha->tgt.target_lport_ptr; 1390 lport = vha->vha_tgt.target_lport_ptr;
1395 if (!lport) { 1391 if (!lport) {
1396 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1392 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1397 dump_stack(); 1393 dump_stack();
@@ -1455,7 +1451,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1455{ 1451{
1456 struct qla_tgt *tgt = sess->tgt; 1452 struct qla_tgt *tgt = sess->tgt;
1457 struct qla_hw_data *ha = tgt->ha; 1453 struct qla_hw_data *ha = tgt->ha;
1458 struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; 1454 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1455 struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
1459 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1456 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
1460 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1457 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
1461 struct tcm_qla2xxx_nacl, se_node_acl); 1458 struct tcm_qla2xxx_nacl, se_node_acl);
@@ -1562,15 +1559,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1562 return 0; 1559 return 0;
1563} 1560}
1564 1561
1565static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) 1562static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
1563 void *target_lport_ptr,
1564 u64 npiv_wwpn, u64 npiv_wwnn)
1566{ 1565{
1567 struct qla_hw_data *ha = vha->hw; 1566 struct qla_hw_data *ha = vha->hw;
1568 struct tcm_qla2xxx_lport *lport; 1567 struct tcm_qla2xxx_lport *lport =
1568 (struct tcm_qla2xxx_lport *)target_lport_ptr;
1569 /* 1569 /*
1570 * Setup local pointer to vha, NPIV VP pointer (if present) and 1570 * Setup tgt_ops, local pointer to vha and target_lport_ptr
1571 * vha->tcm_lport pointer
1572 */ 1571 */
1573 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; 1572 ha->tgt.tgt_ops = &tcm_qla2xxx_template;
1573 vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1574 lport->qla_vha = vha; 1574 lport->qla_vha = vha;
1575 1575
1576 return 0; 1576 return 0;
@@ -1602,8 +1602,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport(
1602 if (ret != 0) 1602 if (ret != 0)
1603 goto out; 1603 goto out;
1604 1604
1605 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, 1605 ret = qlt_lport_register(lport, wwpn, 0, 0,
1606 tcm_qla2xxx_lport_register_cb, lport); 1606 tcm_qla2xxx_lport_register_cb);
1607 if (ret != 0) 1607 if (ret != 0)
1608 goto out_lport; 1608 goto out_lport;
1609 1609
@@ -1621,7 +1621,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1621 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1621 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1622 struct tcm_qla2xxx_lport, lport_wwn); 1622 struct tcm_qla2xxx_lport, lport_wwn);
1623 struct scsi_qla_host *vha = lport->qla_vha; 1623 struct scsi_qla_host *vha = lport->qla_vha;
1624 struct qla_hw_data *ha = vha->hw;
1625 struct se_node_acl *node; 1624 struct se_node_acl *node;
1626 u32 key = 0; 1625 u32 key = 0;
1627 1626
@@ -1630,8 +1629,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1630 * shutdown of struct qla_tgt after the call to 1629 * shutdown of struct qla_tgt after the call to
1631 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1630 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1632 */ 1631 */
1633 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) 1632 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
1634 qlt_stop_phase2(ha->tgt.qla_tgt); 1633 qlt_stop_phase2(vha->vha_tgt.qla_tgt);
1635 1634
1636 qlt_lport_deregister(vha); 1635 qlt_lport_deregister(vha);
1637 1636
@@ -1642,17 +1641,70 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1642 kfree(lport); 1641 kfree(lport);
1643} 1642}
1644 1643
1644static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1645 void *target_lport_ptr,
1646 u64 npiv_wwpn, u64 npiv_wwnn)
1647{
1648 struct fc_vport *vport;
1649 struct Scsi_Host *sh = base_vha->host;
1650 struct scsi_qla_host *npiv_vha;
1651 struct tcm_qla2xxx_lport *lport =
1652 (struct tcm_qla2xxx_lport *)target_lport_ptr;
1653 struct fc_vport_identifiers vport_id;
1654
1655 if (!qla_tgt_mode_enabled(base_vha)) {
1656 pr_err("qla2xxx base_vha not enabled for target mode\n");
1657 return -EPERM;
1658 }
1659
1660 memset(&vport_id, 0, sizeof(vport_id));
1661 vport_id.port_name = npiv_wwpn;
1662 vport_id.node_name = npiv_wwnn;
1663 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
1664 vport_id.vport_type = FC_PORTTYPE_NPIV;
1665 vport_id.disable = false;
1666
1667 vport = fc_vport_create(sh, 0, &vport_id);
1668 if (!vport) {
1669 pr_err("fc_vport_create failed for qla2xxx_npiv\n");
1670 return -ENODEV;
1671 }
1672 /*
1673 * Setup local pointer to NPIV vhba + target_lport_ptr
1674 */
1675 npiv_vha = (struct scsi_qla_host *)vport->dd_data;
1676 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1677 lport->qla_vha = npiv_vha;
1678
1679 scsi_host_get(npiv_vha->host);
1680 return 0;
1681}
1682
1683
1645static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1684static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1646 struct target_fabric_configfs *tf, 1685 struct target_fabric_configfs *tf,
1647 struct config_group *group, 1686 struct config_group *group,
1648 const char *name) 1687 const char *name)
1649{ 1688{
1650 struct tcm_qla2xxx_lport *lport; 1689 struct tcm_qla2xxx_lport *lport;
1651 u64 npiv_wwpn, npiv_wwnn; 1690 u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
1691 char *p, tmp[128];
1652 int ret; 1692 int ret;
1653 1693
1654 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, 1694 snprintf(tmp, 128, "%s", name);
1655 &npiv_wwpn, &npiv_wwnn) < 0) 1695
1696 p = strchr(tmp, '@');
1697 if (!p) {
1698 pr_err("Unable to locate NPIV '@' seperator\n");
1699 return ERR_PTR(-EINVAL);
1700 }
1701 *p++ = '\0';
1702
1703 if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
1704 return ERR_PTR(-EINVAL);
1705
1706 if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
1707 &npiv_wwpn, &npiv_wwnn) < 0)
1656 return ERR_PTR(-EINVAL); 1708 return ERR_PTR(-EINVAL);
1657 1709
1658 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1710 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
@@ -1666,12 +1718,19 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1666 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1718 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1667 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1719 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
1668 1720
1669/* FIXME: tcm_qla2xxx_npiv_make_lport */ 1721 ret = tcm_qla2xxx_init_lport(lport);
1670 ret = -ENOSYS;
1671 if (ret != 0) 1722 if (ret != 0)
1672 goto out; 1723 goto out;
1673 1724
1725 ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
1726 tcm_qla2xxx_lport_register_npiv_cb);
1727 if (ret != 0)
1728 goto out_lport;
1729
1674 return &lport->lport_wwn; 1730 return &lport->lport_wwn;
1731out_lport:
1732 vfree(lport->lport_loopid_map);
1733 btree_destroy32(&lport->lport_fcport_map);
1675out: 1734out:
1676 kfree(lport); 1735 kfree(lport);
1677 return ERR_PTR(ret); 1736 return ERR_PTR(ret);
@@ -1681,14 +1740,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1681{ 1740{
1682 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1741 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1683 struct tcm_qla2xxx_lport, lport_wwn); 1742 struct tcm_qla2xxx_lport, lport_wwn);
1684 struct scsi_qla_host *vha = lport->qla_vha; 1743 struct scsi_qla_host *npiv_vha = lport->qla_vha;
1685 struct Scsi_Host *sh = vha->host; 1744 struct qla_hw_data *ha = npiv_vha->hw;
1745 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1746
1747 scsi_host_put(npiv_vha->host);
1686 /* 1748 /*
1687 * Notify libfc that we want to release the lport->npiv_vport 1749 * Notify libfc that we want to release the vha->fc_vport
1688 */ 1750 */
1689 fc_vport_terminate(lport->npiv_vport); 1751 fc_vport_terminate(npiv_vha->fc_vport);
1690 1752 scsi_host_put(base_vha->host);
1691 scsi_host_put(sh);
1692 kfree(lport); 1753 kfree(lport);
1693} 1754}
1694 1755
@@ -1769,14 +1830,16 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1769 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, 1830 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1770 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, 1831 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1771 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, 1832 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1772 .tpg_check_demo_mode = tcm_qla2xxx_check_false, 1833 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1773 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, 1834 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1774 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, 1835 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
1775 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, 1836 .tpg_check_prod_mode_write_protect =
1837 tcm_qla2xxx_check_prod_write_protect,
1776 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1838 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
1777 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, 1839 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1778 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1840 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1779 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1841 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1842 .check_stop_free = tcm_qla2xxx_check_stop_free,
1780 .release_cmd = tcm_qla2xxx_release_cmd, 1843 .release_cmd = tcm_qla2xxx_release_cmd,
1781 .put_session = tcm_qla2xxx_put_session, 1844 .put_session = tcm_qla2xxx_put_session,
1782 .shutdown_session = tcm_qla2xxx_shutdown_session, 1845 .shutdown_session = tcm_qla2xxx_shutdown_session,
@@ -1871,7 +1934,8 @@ static int tcm_qla2xxx_register_configfs(void)
1871 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl 1934 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1872 */ 1935 */
1873 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 1936 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1874 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; 1937 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
1938 tcm_qla2xxx_tpg_attrs;
1875 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 1939 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
1876 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 1940 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
1877 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 1941 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 771f7b816443..275d8b9a7a34 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -70,12 +70,8 @@ struct tcm_qla2xxx_lport {
70 struct tcm_qla2xxx_fc_loopid *lport_loopid_map; 70 struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
71 /* Pointer to struct scsi_qla_host from qla2xxx LLD */ 71 /* Pointer to struct scsi_qla_host from qla2xxx LLD */
72 struct scsi_qla_host *qla_vha; 72 struct scsi_qla_host *qla_vha;
73 /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
74 struct scsi_qla_host *qla_npiv_vp;
75 /* Pointer to struct qla_tgt pointer */ 73 /* Pointer to struct qla_tgt pointer */
76 struct qla_tgt lport_qla_tgt; 74 struct qla_tgt lport_qla_tgt;
77 /* Pointer to struct fc_vport for NPIV vport from libfc */
78 struct fc_vport *npiv_vport;
79 /* Pointer to TPG=1 for non NPIV mode */ 75 /* Pointer to TPG=1 for non NPIV mode */
80 struct tcm_qla2xxx_tpg *tpg_1; 76 struct tcm_qla2xxx_tpg *tpg_1;
81 /* Returned by tcm_qla2xxx_make_lport() */ 77 /* Returned by tcm_qla2xxx_make_lport() */
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 18303686eb58..dc2d84ac5a0e 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -3,6 +3,7 @@ menuconfig TARGET_CORE
3 tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" 3 tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
4 depends on SCSI && BLOCK 4 depends on SCSI && BLOCK
5 select CONFIGFS_FS 5 select CONFIGFS_FS
6 select CRC_T10DIF
6 default n 7 default n
7 help 8 help
8 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled 9 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
@@ -13,6 +14,7 @@ if TARGET_CORE
13 14
14config TCM_IBLOCK 15config TCM_IBLOCK
15 tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" 16 tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
17 select BLK_DEV_INTEGRITY
16 help 18 help
17 Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered 19 Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
18 access to Linux/Block devices using BIO 20 access to Linux/Block devices using BIO
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 00867190413c..7f1a7ce4b771 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -52,7 +52,7 @@
52static LIST_HEAD(g_tiqn_list); 52static LIST_HEAD(g_tiqn_list);
53static LIST_HEAD(g_np_list); 53static LIST_HEAD(g_np_list);
54static DEFINE_SPINLOCK(tiqn_lock); 54static DEFINE_SPINLOCK(tiqn_lock);
55static DEFINE_SPINLOCK(np_lock); 55static DEFINE_MUTEX(np_lock);
56 56
57static struct idr tiqn_idr; 57static struct idr tiqn_idr;
58struct idr sess_idr; 58struct idr sess_idr;
@@ -307,6 +307,9 @@ bool iscsit_check_np_match(
307 return false; 307 return false;
308} 308}
309 309
310/*
311 * Called with mutex np_lock held
312 */
310static struct iscsi_np *iscsit_get_np( 313static struct iscsi_np *iscsit_get_np(
311 struct __kernel_sockaddr_storage *sockaddr, 314 struct __kernel_sockaddr_storage *sockaddr,
312 int network_transport) 315 int network_transport)
@@ -314,11 +317,10 @@ static struct iscsi_np *iscsit_get_np(
314 struct iscsi_np *np; 317 struct iscsi_np *np;
315 bool match; 318 bool match;
316 319
317 spin_lock_bh(&np_lock);
318 list_for_each_entry(np, &g_np_list, np_list) { 320 list_for_each_entry(np, &g_np_list, np_list) {
319 spin_lock(&np->np_thread_lock); 321 spin_lock_bh(&np->np_thread_lock);
320 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 322 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
321 spin_unlock(&np->np_thread_lock); 323 spin_unlock_bh(&np->np_thread_lock);
322 continue; 324 continue;
323 } 325 }
324 326
@@ -330,13 +332,11 @@ static struct iscsi_np *iscsit_get_np(
330 * while iscsi_tpg_add_network_portal() is called. 332 * while iscsi_tpg_add_network_portal() is called.
331 */ 333 */
332 np->np_exports++; 334 np->np_exports++;
333 spin_unlock(&np->np_thread_lock); 335 spin_unlock_bh(&np->np_thread_lock);
334 spin_unlock_bh(&np_lock);
335 return np; 336 return np;
336 } 337 }
337 spin_unlock(&np->np_thread_lock); 338 spin_unlock_bh(&np->np_thread_lock);
338 } 339 }
339 spin_unlock_bh(&np_lock);
340 340
341 return NULL; 341 return NULL;
342} 342}
@@ -350,16 +350,22 @@ struct iscsi_np *iscsit_add_np(
350 struct sockaddr_in6 *sock_in6; 350 struct sockaddr_in6 *sock_in6;
351 struct iscsi_np *np; 351 struct iscsi_np *np;
352 int ret; 352 int ret;
353
354 mutex_lock(&np_lock);
355
353 /* 356 /*
354 * Locate the existing struct iscsi_np if already active.. 357 * Locate the existing struct iscsi_np if already active..
355 */ 358 */
356 np = iscsit_get_np(sockaddr, network_transport); 359 np = iscsit_get_np(sockaddr, network_transport);
357 if (np) 360 if (np) {
361 mutex_unlock(&np_lock);
358 return np; 362 return np;
363 }
359 364
360 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 365 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
361 if (!np) { 366 if (!np) {
362 pr_err("Unable to allocate memory for struct iscsi_np\n"); 367 pr_err("Unable to allocate memory for struct iscsi_np\n");
368 mutex_unlock(&np_lock);
363 return ERR_PTR(-ENOMEM); 369 return ERR_PTR(-ENOMEM);
364 } 370 }
365 371
@@ -382,6 +388,7 @@ struct iscsi_np *iscsit_add_np(
382 ret = iscsi_target_setup_login_socket(np, sockaddr); 388 ret = iscsi_target_setup_login_socket(np, sockaddr);
383 if (ret != 0) { 389 if (ret != 0) {
384 kfree(np); 390 kfree(np);
391 mutex_unlock(&np_lock);
385 return ERR_PTR(ret); 392 return ERR_PTR(ret);
386 } 393 }
387 394
@@ -390,6 +397,7 @@ struct iscsi_np *iscsit_add_np(
390 pr_err("Unable to create kthread: iscsi_np\n"); 397 pr_err("Unable to create kthread: iscsi_np\n");
391 ret = PTR_ERR(np->np_thread); 398 ret = PTR_ERR(np->np_thread);
392 kfree(np); 399 kfree(np);
400 mutex_unlock(&np_lock);
393 return ERR_PTR(ret); 401 return ERR_PTR(ret);
394 } 402 }
395 /* 403 /*
@@ -400,10 +408,10 @@ struct iscsi_np *iscsit_add_np(
400 * point because iscsi_np has not been added to g_np_list yet. 408 * point because iscsi_np has not been added to g_np_list yet.
401 */ 409 */
402 np->np_exports = 1; 410 np->np_exports = 1;
411 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
403 412
404 spin_lock_bh(&np_lock);
405 list_add_tail(&np->np_list, &g_np_list); 413 list_add_tail(&np->np_list, &g_np_list);
406 spin_unlock_bh(&np_lock); 414 mutex_unlock(&np_lock);
407 415
408 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 416 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
409 np->np_ip, np->np_port, np->np_transport->name); 417 np->np_ip, np->np_port, np->np_transport->name);
@@ -470,9 +478,9 @@ int iscsit_del_np(struct iscsi_np *np)
470 478
471 np->np_transport->iscsit_free_np(np); 479 np->np_transport->iscsit_free_np(np);
472 480
473 spin_lock_bh(&np_lock); 481 mutex_lock(&np_lock);
474 list_del(&np->np_list); 482 list_del(&np->np_list);
475 spin_unlock_bh(&np_lock); 483 mutex_unlock(&np_lock);
476 484
477 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 485 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
478 np->np_ip, np->np_port, np->np_transport->name); 486 np->np_ip, np->np_port, np->np_transport->name);
@@ -622,7 +630,7 @@ static int iscsit_add_reject(
622{ 630{
623 struct iscsi_cmd *cmd; 631 struct iscsi_cmd *cmd;
624 632
625 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 633 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
626 if (!cmd) 634 if (!cmd)
627 return -1; 635 return -1;
628 636
@@ -2475,7 +2483,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2475 if (!conn_p) 2483 if (!conn_p)
2476 return; 2484 return;
2477 2485
2478 cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); 2486 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2479 if (!cmd) { 2487 if (!cmd) {
2480 iscsit_dec_conn_usage_count(conn_p); 2488 iscsit_dec_conn_usage_count(conn_p);
2481 return; 2489 return;
@@ -3951,7 +3959,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3951 3959
3952 switch (hdr->opcode & ISCSI_OPCODE_MASK) { 3960 switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3953 case ISCSI_OP_SCSI_CMD: 3961 case ISCSI_OP_SCSI_CMD:
3954 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3962 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3955 if (!cmd) 3963 if (!cmd)
3956 goto reject; 3964 goto reject;
3957 3965
@@ -3963,28 +3971,28 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3963 case ISCSI_OP_NOOP_OUT: 3971 case ISCSI_OP_NOOP_OUT:
3964 cmd = NULL; 3972 cmd = NULL;
3965 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 3973 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3966 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3974 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3967 if (!cmd) 3975 if (!cmd)
3968 goto reject; 3976 goto reject;
3969 } 3977 }
3970 ret = iscsit_handle_nop_out(conn, cmd, buf); 3978 ret = iscsit_handle_nop_out(conn, cmd, buf);
3971 break; 3979 break;
3972 case ISCSI_OP_SCSI_TMFUNC: 3980 case ISCSI_OP_SCSI_TMFUNC:
3973 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3981 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3974 if (!cmd) 3982 if (!cmd)
3975 goto reject; 3983 goto reject;
3976 3984
3977 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 3985 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
3978 break; 3986 break;
3979 case ISCSI_OP_TEXT: 3987 case ISCSI_OP_TEXT:
3980 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3988 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3981 if (!cmd) 3989 if (!cmd)
3982 goto reject; 3990 goto reject;
3983 3991
3984 ret = iscsit_handle_text_cmd(conn, cmd, buf); 3992 ret = iscsit_handle_text_cmd(conn, cmd, buf);
3985 break; 3993 break;
3986 case ISCSI_OP_LOGOUT: 3994 case ISCSI_OP_LOGOUT:
3987 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 3995 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3988 if (!cmd) 3996 if (!cmd)
3989 goto reject; 3997 goto reject;
3990 3998
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 83c965c65386..582ba84075ec 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
1192 */ 1192 */
1193alloc_tags: 1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS; 1195 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
1197 1197
1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 0819e688a398..e655b042ed18 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -152,13 +152,16 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
152 * May be called from software interrupt (timer) context for allocating 152 * May be called from software interrupt (timer) context for allocating
153 * iSCSI NopINs. 153 * iSCSI NopINs.
154 */ 154 */
155struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 155struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
156{ 156{
157 struct iscsi_cmd *cmd; 157 struct iscsi_cmd *cmd;
158 struct se_session *se_sess = conn->sess->se_sess; 158 struct se_session *se_sess = conn->sess->se_sess;
159 int size, tag; 159 int size, tag;
160 160
161 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); 161 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
162 if (tag < 0)
163 return NULL;
164
162 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 165 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
163 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 166 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
164 memset(cmd, 0, size); 167 memset(cmd, 0, size);
@@ -926,7 +929,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
926 u8 state; 929 u8 state;
927 struct iscsi_cmd *cmd; 930 struct iscsi_cmd *cmd;
928 931
929 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); 932 cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
930 if (!cmd) 933 if (!cmd)
931 return -1; 934 return -1;
932 935
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index e4fc34a02f57..561a424d1980 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -9,7 +9,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); 9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); 10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
11extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); 11extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
12extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 12extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
13extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 13extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
14extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); 14extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
15extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); 15extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 1b41e6776152..fadad7c5f635 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -217,7 +217,8 @@ static void tcm_loop_submission_work(struct work_struct *work)
217 scsi_bufflen(sc), tcm_loop_sam_attr(sc), 217 scsi_bufflen(sc), tcm_loop_sam_attr(sc),
218 sc->sc_data_direction, 0, 218 sc->sc_data_direction, 0,
219 scsi_sglist(sc), scsi_sg_count(sc), 219 scsi_sglist(sc), scsi_sg_count(sc),
220 sgl_bidi, sgl_bidi_count); 220 sgl_bidi, sgl_bidi_count,
221 scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
221 if (rc < 0) { 222 if (rc < 0) {
222 set_host_byte(sc, DID_NO_CONNECT); 223 set_host_byte(sc, DID_NO_CONNECT);
223 goto out_done; 224 goto out_done;
@@ -462,7 +463,7 @@ static int tcm_loop_driver_probe(struct device *dev)
462{ 463{
463 struct tcm_loop_hba *tl_hba; 464 struct tcm_loop_hba *tl_hba;
464 struct Scsi_Host *sh; 465 struct Scsi_Host *sh;
465 int error; 466 int error, host_prot;
466 467
467 tl_hba = to_tcm_loop_hba(dev); 468 tl_hba = to_tcm_loop_hba(dev);
468 469
@@ -486,6 +487,13 @@ static int tcm_loop_driver_probe(struct device *dev)
486 sh->max_channel = 0; 487 sh->max_channel = 0;
487 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; 488 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
488 489
490 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
491 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
492 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
493
494 scsi_host_set_prot(sh, host_prot);
495 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
496
489 error = scsi_add_host(sh, &tl_hba->dev); 497 error = scsi_add_host(sh, &tl_hba->dev);
490 if (error) { 498 if (error) {
491 pr_err("%s: scsi_add_host failed\n", __func__); 499 pr_err("%s: scsi_add_host failed\n", __func__);
@@ -1228,7 +1236,7 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1228 1236
1229/* Start items for tcm_loop_naa_cit */ 1237/* Start items for tcm_loop_naa_cit */
1230 1238
1231struct se_portal_group *tcm_loop_make_naa_tpg( 1239static struct se_portal_group *tcm_loop_make_naa_tpg(
1232 struct se_wwn *wwn, 1240 struct se_wwn *wwn,
1233 struct config_group *group, 1241 struct config_group *group,
1234 const char *name) 1242 const char *name)
@@ -1273,7 +1281,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
1273 return &tl_tpg->tl_se_tpg; 1281 return &tl_tpg->tl_se_tpg;
1274} 1282}
1275 1283
1276void tcm_loop_drop_naa_tpg( 1284static void tcm_loop_drop_naa_tpg(
1277 struct se_portal_group *se_tpg) 1285 struct se_portal_group *se_tpg)
1278{ 1286{
1279 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1287 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
@@ -1305,7 +1313,7 @@ void tcm_loop_drop_naa_tpg(
1305 1313
1306/* Start items for tcm_loop_cit */ 1314/* Start items for tcm_loop_cit */
1307 1315
1308struct se_wwn *tcm_loop_make_scsi_hba( 1316static struct se_wwn *tcm_loop_make_scsi_hba(
1309 struct target_fabric_configfs *tf, 1317 struct target_fabric_configfs *tf,
1310 struct config_group *group, 1318 struct config_group *group,
1311 const char *name) 1319 const char *name)
@@ -1375,7 +1383,7 @@ out:
1375 return ERR_PTR(ret); 1383 return ERR_PTR(ret);
1376} 1384}
1377 1385
1378void tcm_loop_drop_scsi_hba( 1386static void tcm_loop_drop_scsi_hba(
1379 struct se_wwn *wwn) 1387 struct se_wwn *wwn)
1380{ 1388{
1381 struct tcm_loop_hba *tl_hba = container_of(wwn, 1389 struct tcm_loop_hba *tl_hba = container_of(wwn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fdcee326bfbc..12da9b386169 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -41,11 +41,14 @@
41#include "target_core_alua.h" 41#include "target_core_alua.h"
42#include "target_core_ua.h" 42#include "target_core_ua.h"
43 43
44static sense_reason_t core_alua_check_transition(int state, int *primary); 44static sense_reason_t core_alua_check_transition(int state, int valid,
45 int *primary);
45static int core_alua_set_tg_pt_secondary_state( 46static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 47 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explicit, int offline); 48 struct se_port *port, int explicit, int offline);
48 49
50static char *core_alua_dump_state(int state);
51
49static u16 alua_lu_gps_counter; 52static u16 alua_lu_gps_counter;
50static u32 alua_lu_gps_count; 53static u32 alua_lu_gps_count;
51 54
@@ -55,6 +58,86 @@ static LIST_HEAD(lu_gps_list);
55struct t10_alua_lu_gp *default_lu_gp; 58struct t10_alua_lu_gp *default_lu_gp;
56 59
57/* 60/*
61 * REPORT REFERRALS
62 *
63 * See sbc3r35 section 5.23
64 */
65sense_reason_t
66target_emulate_report_referrals(struct se_cmd *cmd)
67{
68 struct se_device *dev = cmd->se_dev;
69 struct t10_alua_lba_map *map;
70 struct t10_alua_lba_map_member *map_mem;
71 unsigned char *buf;
72 u32 rd_len = 0, off;
73
74 if (cmd->data_length < 4) {
75 pr_warn("REPORT REFERRALS allocation length %u too"
76 " small\n", cmd->data_length);
77 return TCM_INVALID_CDB_FIELD;
78 }
79
80 buf = transport_kmap_data_sg(cmd);
81 if (!buf)
82 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
83
84 off = 4;
85 spin_lock(&dev->t10_alua.lba_map_lock);
86 if (list_empty(&dev->t10_alua.lba_map_list)) {
87 spin_unlock(&dev->t10_alua.lba_map_lock);
88 transport_kunmap_data_sg(cmd);
89
90 return TCM_UNSUPPORTED_SCSI_OPCODE;
91 }
92
93 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
94 lba_map_list) {
95 int desc_num = off + 3;
96 int pg_num;
97
98 off += 4;
99 if (cmd->data_length > off)
100 put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
101 off += 8;
102 if (cmd->data_length > off)
103 put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
104 off += 8;
105 rd_len += 20;
106 pg_num = 0;
107 list_for_each_entry(map_mem, &map->lba_map_mem_list,
108 lba_map_mem_list) {
109 int alua_state = map_mem->lba_map_mem_alua_state;
110 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
111
112 if (cmd->data_length > off)
113 buf[off] = alua_state & 0x0f;
114 off += 2;
115 if (cmd->data_length > off)
116 buf[off] = (alua_pg_id >> 8) & 0xff;
117 off++;
118 if (cmd->data_length > off)
119 buf[off] = (alua_pg_id & 0xff);
120 off++;
121 rd_len += 4;
122 pg_num++;
123 }
124 if (cmd->data_length > desc_num)
125 buf[desc_num] = pg_num;
126 }
127 spin_unlock(&dev->t10_alua.lba_map_lock);
128
129 /*
130 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
131 */
132 put_unaligned_be16(rd_len, &buf[2]);
133
134 transport_kunmap_data_sg(cmd);
135
136 target_complete_cmd(cmd, GOOD);
137 return 0;
138}
139
140/*
58 * REPORT_TARGET_PORT_GROUPS 141 * REPORT_TARGET_PORT_GROUPS
59 * 142 *
60 * See spc4r17 section 6.27 143 * See spc4r17 section 6.27
@@ -210,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
210 unsigned char *ptr; 293 unsigned char *ptr;
211 sense_reason_t rc = TCM_NO_SENSE; 294 sense_reason_t rc = TCM_NO_SENSE;
212 u32 len = 4; /* Skip over RESERVED area in header */ 295 u32 len = 4; /* Skip over RESERVED area in header */
213 int alua_access_state, primary = 0; 296 int alua_access_state, primary = 0, valid_states;
214 u16 tg_pt_id, rtpi; 297 u16 tg_pt_id, rtpi;
215 298
216 if (!l_port) 299 if (!l_port)
@@ -252,6 +335,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
252 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 335 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
253 goto out; 336 goto out;
254 } 337 }
338 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
255 339
256 ptr = &buf[4]; /* Skip over RESERVED area in header */ 340 ptr = &buf[4]; /* Skip over RESERVED area in header */
257 341
@@ -263,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
263 * the state is a primary or secondary target port asymmetric 347 * the state is a primary or secondary target port asymmetric
264 * access state. 348 * access state.
265 */ 349 */
266 rc = core_alua_check_transition(alua_access_state, &primary); 350 rc = core_alua_check_transition(alua_access_state,
351 valid_states, &primary);
267 if (rc) { 352 if (rc) {
268 /* 353 /*
269 * If the SET TARGET PORT GROUPS attempts to establish 354 * If the SET TARGET PORT GROUPS attempts to establish
@@ -386,6 +471,81 @@ static inline int core_alua_state_nonoptimized(
386 return 0; 471 return 0;
387} 472}
388 473
474static inline int core_alua_state_lba_dependent(
475 struct se_cmd *cmd,
476 struct t10_alua_tg_pt_gp *tg_pt_gp,
477 u8 *alua_ascq)
478{
479 struct se_device *dev = cmd->se_dev;
480 u64 segment_size, segment_mult, sectors, lba;
481
482 /* Only need to check for cdb actually containing LBAs */
483 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
484 return 0;
485
486 spin_lock(&dev->t10_alua.lba_map_lock);
487 segment_size = dev->t10_alua.lba_map_segment_size;
488 segment_mult = dev->t10_alua.lba_map_segment_multiplier;
489 sectors = cmd->data_length / dev->dev_attrib.block_size;
490
491 lba = cmd->t_task_lba;
492 while (lba < cmd->t_task_lba + sectors) {
493 struct t10_alua_lba_map *cur_map = NULL, *map;
494 struct t10_alua_lba_map_member *map_mem;
495
496 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
497 lba_map_list) {
498 u64 start_lba, last_lba;
499 u64 first_lba = map->lba_map_first_lba;
500
501 if (segment_mult) {
502 u64 tmp = lba;
503 start_lba = sector_div(tmp, segment_size * segment_mult);
504
505 last_lba = first_lba + segment_size - 1;
506 if (start_lba >= first_lba &&
507 start_lba <= last_lba) {
508 lba += segment_size;
509 cur_map = map;
510 break;
511 }
512 } else {
513 last_lba = map->lba_map_last_lba;
514 if (lba >= first_lba && lba <= last_lba) {
515 lba = last_lba + 1;
516 cur_map = map;
517 break;
518 }
519 }
520 }
521 if (!cur_map) {
522 spin_unlock(&dev->t10_alua.lba_map_lock);
523 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
524 return 1;
525 }
526 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
527 lba_map_mem_list) {
528 if (map_mem->lba_map_mem_alua_pg_id !=
529 tg_pt_gp->tg_pt_gp_id)
530 continue;
531 switch(map_mem->lba_map_mem_alua_state) {
532 case ALUA_ACCESS_STATE_STANDBY:
533 spin_unlock(&dev->t10_alua.lba_map_lock);
534 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
535 return 1;
536 case ALUA_ACCESS_STATE_UNAVAILABLE:
537 spin_unlock(&dev->t10_alua.lba_map_lock);
538 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
539 return 1;
540 default:
541 break;
542 }
543 }
544 }
545 spin_unlock(&dev->t10_alua.lba_map_lock);
546 return 0;
547}
548
389static inline int core_alua_state_standby( 549static inline int core_alua_state_standby(
390 struct se_cmd *cmd, 550 struct se_cmd *cmd,
391 unsigned char *cdb, 551 unsigned char *cdb,
@@ -583,6 +743,9 @@ target_alua_state_check(struct se_cmd *cmd)
583 case ALUA_ACCESS_STATE_TRANSITION: 743 case ALUA_ACCESS_STATE_TRANSITION:
584 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 744 ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
585 break; 745 break;
746 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
747 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
748 break;
586 /* 749 /*
587 * OFFLINE is a secondary ALUA target port group access state, that is 750 * OFFLINE is a secondary ALUA target port group access state, that is
588 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 751 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -618,17 +781,36 @@ out:
618 * Check implicit and explicit ALUA state change request. 781 * Check implicit and explicit ALUA state change request.
619 */ 782 */
620static sense_reason_t 783static sense_reason_t
621core_alua_check_transition(int state, int *primary) 784core_alua_check_transition(int state, int valid, int *primary)
622{ 785{
786 /*
787 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
788 * defined as primary target port asymmetric access states.
789 */
623 switch (state) { 790 switch (state) {
624 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 791 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
792 if (!(valid & ALUA_AO_SUP))
793 goto not_supported;
794 *primary = 1;
795 break;
625 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 796 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
797 if (!(valid & ALUA_AN_SUP))
798 goto not_supported;
799 *primary = 1;
800 break;
626 case ALUA_ACCESS_STATE_STANDBY: 801 case ALUA_ACCESS_STATE_STANDBY:
802 if (!(valid & ALUA_S_SUP))
803 goto not_supported;
804 *primary = 1;
805 break;
627 case ALUA_ACCESS_STATE_UNAVAILABLE: 806 case ALUA_ACCESS_STATE_UNAVAILABLE:
628 /* 807 if (!(valid & ALUA_U_SUP))
629 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 808 goto not_supported;
630 * defined as primary target port asymmetric access states. 809 *primary = 1;
631 */ 810 break;
811 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
812 if (!(valid & ALUA_LBD_SUP))
813 goto not_supported;
632 *primary = 1; 814 *primary = 1;
633 break; 815 break;
634 case ALUA_ACCESS_STATE_OFFLINE: 816 case ALUA_ACCESS_STATE_OFFLINE:
@@ -636,14 +818,27 @@ core_alua_check_transition(int state, int *primary)
636 * OFFLINE state is defined as a secondary target port 818 * OFFLINE state is defined as a secondary target port
637 * asymmetric access state. 819 * asymmetric access state.
638 */ 820 */
821 if (!(valid & ALUA_O_SUP))
822 goto not_supported;
639 *primary = 0; 823 *primary = 0;
640 break; 824 break;
825 case ALUA_ACCESS_STATE_TRANSITION:
826 /*
827 * Transitioning is set internally, and
828 * cannot be selected manually.
829 */
830 goto not_supported;
641 default: 831 default:
642 pr_err("Unknown ALUA access state: 0x%02x\n", state); 832 pr_err("Unknown ALUA access state: 0x%02x\n", state);
643 return TCM_INVALID_PARAMETER_LIST; 833 return TCM_INVALID_PARAMETER_LIST;
644 } 834 }
645 835
646 return 0; 836 return 0;
837
838not_supported:
839 pr_err("ALUA access state %s not supported",
840 core_alua_dump_state(state));
841 return TCM_INVALID_PARAMETER_LIST;
647} 842}
648 843
649static char *core_alua_dump_state(int state) 844static char *core_alua_dump_state(int state)
@@ -653,12 +848,16 @@ static char *core_alua_dump_state(int state)
653 return "Active/Optimized"; 848 return "Active/Optimized";
654 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 849 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
655 return "Active/NonOptimized"; 850 return "Active/NonOptimized";
851 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
852 return "LBA Dependent";
656 case ALUA_ACCESS_STATE_STANDBY: 853 case ALUA_ACCESS_STATE_STANDBY:
657 return "Standby"; 854 return "Standby";
658 case ALUA_ACCESS_STATE_UNAVAILABLE: 855 case ALUA_ACCESS_STATE_UNAVAILABLE:
659 return "Unavailable"; 856 return "Unavailable";
660 case ALUA_ACCESS_STATE_OFFLINE: 857 case ALUA_ACCESS_STATE_OFFLINE:
661 return "Offline"; 858 return "Offline";
859 case ALUA_ACCESS_STATE_TRANSITION:
860 return "Transitioning";
662 default: 861 default:
663 return "Unknown"; 862 return "Unknown";
664 } 863 }
@@ -735,58 +934,49 @@ static int core_alua_write_tpg_metadata(
735 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 934 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
736 */ 935 */
737static int core_alua_update_tpg_primary_metadata( 936static int core_alua_update_tpg_primary_metadata(
738 struct t10_alua_tg_pt_gp *tg_pt_gp, 937 struct t10_alua_tg_pt_gp *tg_pt_gp)
739 int primary_state,
740 unsigned char *md_buf)
741{ 938{
939 unsigned char *md_buf;
742 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 940 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
743 char path[ALUA_METADATA_PATH_LEN]; 941 char path[ALUA_METADATA_PATH_LEN];
744 int len; 942 int len, rc;
943
944 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
945 if (!md_buf) {
946 pr_err("Unable to allocate buf for ALUA metadata\n");
947 return -ENOMEM;
948 }
745 949
746 memset(path, 0, ALUA_METADATA_PATH_LEN); 950 memset(path, 0, ALUA_METADATA_PATH_LEN);
747 951
748 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, 952 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
749 "tg_pt_gp_id=%hu\n" 953 "tg_pt_gp_id=%hu\n"
750 "alua_access_state=0x%02x\n" 954 "alua_access_state=0x%02x\n"
751 "alua_access_status=0x%02x\n", 955 "alua_access_status=0x%02x\n",
752 tg_pt_gp->tg_pt_gp_id, primary_state, 956 tg_pt_gp->tg_pt_gp_id,
957 tg_pt_gp->tg_pt_gp_alua_pending_state,
753 tg_pt_gp->tg_pt_gp_alua_access_status); 958 tg_pt_gp->tg_pt_gp_alua_access_status);
754 959
755 snprintf(path, ALUA_METADATA_PATH_LEN, 960 snprintf(path, ALUA_METADATA_PATH_LEN,
756 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 961 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
757 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 962 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
758 963
759 return core_alua_write_tpg_metadata(path, md_buf, len); 964 rc = core_alua_write_tpg_metadata(path, md_buf, len);
965 kfree(md_buf);
966 return rc;
760} 967}
761 968
762static int core_alua_do_transition_tg_pt( 969static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
763 struct t10_alua_tg_pt_gp *tg_pt_gp,
764 struct se_port *l_port,
765 struct se_node_acl *nacl,
766 unsigned char *md_buf,
767 int new_state,
768 int explicit)
769{ 970{
971 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
972 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
973 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
770 struct se_dev_entry *se_deve; 974 struct se_dev_entry *se_deve;
771 struct se_lun_acl *lacl; 975 struct se_lun_acl *lacl;
772 struct se_port *port; 976 struct se_port *port;
773 struct t10_alua_tg_pt_gp_member *mem; 977 struct t10_alua_tg_pt_gp_member *mem;
774 int old_state = 0; 978 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
775 /* 979 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
776 * Save the old primary ALUA access state, and set the current state
777 * to ALUA_ACCESS_STATE_TRANSITION.
778 */
779 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
780 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
781 ALUA_ACCESS_STATE_TRANSITION);
782 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
783 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
784 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
785 /*
786 * Check for the optional ALUA primary state transition delay
787 */
788 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
789 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
790 980
791 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 981 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
792 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, 982 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
@@ -821,9 +1011,12 @@ static int core_alua_do_transition_tg_pt(
821 if (!lacl) 1011 if (!lacl)
822 continue; 1012 continue;
823 1013
824 if (explicit && 1014 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
825 (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 1015 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
826 (l_port != NULL) && (l_port == port)) 1016 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1017 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
1018 (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
1019 (tg_pt_gp->tg_pt_gp_alua_port == port))
827 continue; 1020 continue;
828 1021
829 core_scsi3_ua_allocate(lacl->se_lun_nacl, 1022 core_scsi3_ua_allocate(lacl->se_lun_nacl,
@@ -851,20 +1044,102 @@ static int core_alua_do_transition_tg_pt(
851 */ 1044 */
852 if (tg_pt_gp->tg_pt_gp_write_metadata) { 1045 if (tg_pt_gp->tg_pt_gp_write_metadata) {
853 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 1046 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
854 core_alua_update_tpg_primary_metadata(tg_pt_gp, 1047 core_alua_update_tpg_primary_metadata(tg_pt_gp);
855 new_state, md_buf);
856 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 1048 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
857 } 1049 }
858 /* 1050 /*
859 * Set the current primary ALUA access state to the requested new state 1051 * Set the current primary ALUA access state to the requested new state
860 */ 1052 */
861 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 1053 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1054 tg_pt_gp->tg_pt_gp_alua_pending_state);
862 1055
863 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1056 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
864 " from primary access state %s to %s\n", (explicit) ? "explicit" : 1057 " from primary access state %s to %s\n", (explicit) ? "explicit" :
865 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1058 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
866 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 1059 tg_pt_gp->tg_pt_gp_id,
867 core_alua_dump_state(new_state)); 1060 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1061 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1062 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1063 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1064 smp_mb__after_atomic_dec();
1065 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1066
1067 if (tg_pt_gp->tg_pt_gp_transition_complete)
1068 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1069}
1070
1071static int core_alua_do_transition_tg_pt(
1072 struct t10_alua_tg_pt_gp *tg_pt_gp,
1073 int new_state,
1074 int explicit)
1075{
1076 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1077 DECLARE_COMPLETION_ONSTACK(wait);
1078
1079 /* Nothing to be done here */
1080 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1081 return 0;
1082
1083 if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1084 return -EAGAIN;
1085
1086 /*
1087 * Flush any pending transitions
1088 */
1089 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1090 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1091 ALUA_ACCESS_STATE_TRANSITION) {
1092 /* Just in case */
1093 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1094 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1095 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1096 wait_for_completion(&wait);
1097 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1098 return 0;
1099 }
1100
1101 /*
1102 * Save the old primary ALUA access state, and set the current state
1103 * to ALUA_ACCESS_STATE_TRANSITION.
1104 */
1105 tg_pt_gp->tg_pt_gp_alua_previous_state =
1106 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1107 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1108
1109 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1110 ALUA_ACCESS_STATE_TRANSITION);
1111 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1112 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1113 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1114
1115 /*
1116 * Check for the optional ALUA primary state transition delay
1117 */
1118 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1119 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1120
1121 /*
1122 * Take a reference for workqueue item
1123 */
1124 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1125 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1126 smp_mb__after_atomic_inc();
1127 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1128
1129 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1130 unsigned long transition_tmo;
1131
1132 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1133 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1134 &tg_pt_gp->tg_pt_gp_transition_work,
1135 transition_tmo);
1136 } else {
1137 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1138 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1139 &tg_pt_gp->tg_pt_gp_transition_work, 0);
1140 wait_for_completion(&wait);
1141 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1142 }
868 1143
869 return 0; 1144 return 0;
870} 1145}
@@ -878,23 +1153,15 @@ int core_alua_do_port_transition(
878 int explicit) 1153 int explicit)
879{ 1154{
880 struct se_device *dev; 1155 struct se_device *dev;
881 struct se_port *port;
882 struct se_node_acl *nacl;
883 struct t10_alua_lu_gp *lu_gp; 1156 struct t10_alua_lu_gp *lu_gp;
884 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 1157 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
885 struct t10_alua_tg_pt_gp *tg_pt_gp; 1158 struct t10_alua_tg_pt_gp *tg_pt_gp;
886 unsigned char *md_buf; 1159 int primary, valid_states, rc = 0;
887 int primary;
888 1160
889 if (core_alua_check_transition(new_state, &primary) != 0) 1161 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1162 if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
890 return -EINVAL; 1163 return -EINVAL;
891 1164
892 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
893 if (!md_buf) {
894 pr_err("Unable to allocate buf for ALUA metadata\n");
895 return -ENOMEM;
896 }
897
898 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1165 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
899 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1166 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
900 lu_gp = local_lu_gp_mem->lu_gp; 1167 lu_gp = local_lu_gp_mem->lu_gp;
@@ -911,12 +1178,13 @@ int core_alua_do_port_transition(
911 * core_alua_do_transition_tg_pt() will always return 1178 * core_alua_do_transition_tg_pt() will always return
912 * success. 1179 * success.
913 */ 1180 */
914 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 1181 l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
915 md_buf, new_state, explicit); 1182 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1183 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1184 new_state, explicit);
916 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1185 atomic_dec(&lu_gp->lu_gp_ref_cnt);
917 smp_mb__after_atomic_dec(); 1186 smp_mb__after_atomic_dec();
918 kfree(md_buf); 1187 return rc;
919 return 0;
920 } 1188 }
921 /* 1189 /*
922 * For all other LU groups aside from 'default_lu_gp', walk all of 1190 * For all other LU groups aside from 'default_lu_gp', walk all of
@@ -951,11 +1219,11 @@ int core_alua_do_port_transition(
951 continue; 1219 continue;
952 1220
953 if (l_tg_pt_gp == tg_pt_gp) { 1221 if (l_tg_pt_gp == tg_pt_gp) {
954 port = l_port; 1222 tg_pt_gp->tg_pt_gp_alua_port = l_port;
955 nacl = l_nacl; 1223 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
956 } else { 1224 } else {
957 port = NULL; 1225 tg_pt_gp->tg_pt_gp_alua_port = NULL;
958 nacl = NULL; 1226 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
959 } 1227 }
960 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1228 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
961 smp_mb__after_atomic_inc(); 1229 smp_mb__after_atomic_inc();
@@ -964,12 +1232,14 @@ int core_alua_do_port_transition(
964 * core_alua_do_transition_tg_pt() will always return 1232 * core_alua_do_transition_tg_pt() will always return
965 * success. 1233 * success.
966 */ 1234 */
967 core_alua_do_transition_tg_pt(tg_pt_gp, port, 1235 rc = core_alua_do_transition_tg_pt(tg_pt_gp,
968 nacl, md_buf, new_state, explicit); 1236 new_state, explicit);
969 1237
970 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1238 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
971 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1239 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
972 smp_mb__after_atomic_dec(); 1240 smp_mb__after_atomic_dec();
1241 if (rc)
1242 break;
973 } 1243 }
974 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1244 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
975 1245
@@ -979,16 +1249,18 @@ int core_alua_do_port_transition(
979 } 1249 }
980 spin_unlock(&lu_gp->lu_gp_lock); 1250 spin_unlock(&lu_gp->lu_gp_lock);
981 1251
982 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 1252 if (!rc) {
983 " Group IDs: %hu %s transition to primary state: %s\n", 1253 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
984 config_item_name(&lu_gp->lu_gp_group.cg_item), 1254 " Group IDs: %hu %s transition to primary state: %s\n",
985 l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", 1255 config_item_name(&lu_gp->lu_gp_group.cg_item),
986 core_alua_dump_state(new_state)); 1256 l_tg_pt_gp->tg_pt_gp_id,
1257 (explicit) ? "explicit" : "implicit",
1258 core_alua_dump_state(new_state));
1259 }
987 1260
988 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1261 atomic_dec(&lu_gp->lu_gp_ref_cnt);
989 smp_mb__after_atomic_dec(); 1262 smp_mb__after_atomic_dec();
990 kfree(md_buf); 1263 return rc;
991 return 0;
992} 1264}
993 1265
994/* 1266/*
@@ -996,13 +1268,18 @@ int core_alua_do_port_transition(
996 */ 1268 */
997static int core_alua_update_tpg_secondary_metadata( 1269static int core_alua_update_tpg_secondary_metadata(
998 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1270 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
999 struct se_port *port, 1271 struct se_port *port)
1000 unsigned char *md_buf,
1001 u32 md_buf_len)
1002{ 1272{
1273 unsigned char *md_buf;
1003 struct se_portal_group *se_tpg = port->sep_tpg; 1274 struct se_portal_group *se_tpg = port->sep_tpg;
1004 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1275 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1005 int len; 1276 int len, rc;
1277
1278 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1279 if (!md_buf) {
1280 pr_err("Unable to allocate buf for ALUA metadata\n");
1281 return -ENOMEM;
1282 }
1006 1283
1007 memset(path, 0, ALUA_METADATA_PATH_LEN); 1284 memset(path, 0, ALUA_METADATA_PATH_LEN);
1008 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); 1285 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
@@ -1014,7 +1291,7 @@ static int core_alua_update_tpg_secondary_metadata(
1014 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1291 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1015 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1292 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1016 1293
1017 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" 1294 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1018 "alua_tg_pt_status=0x%02x\n", 1295 "alua_tg_pt_status=0x%02x\n",
1019 atomic_read(&port->sep_tg_pt_secondary_offline), 1296 atomic_read(&port->sep_tg_pt_secondary_offline),
1020 port->sep_tg_pt_secondary_stat); 1297 port->sep_tg_pt_secondary_stat);
@@ -1023,7 +1300,10 @@ static int core_alua_update_tpg_secondary_metadata(
1023 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1300 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1024 port->sep_lun->unpacked_lun); 1301 port->sep_lun->unpacked_lun);
1025 1302
1026 return core_alua_write_tpg_metadata(path, md_buf, len); 1303 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1304 kfree(md_buf);
1305
1306 return rc;
1027} 1307}
1028 1308
1029static int core_alua_set_tg_pt_secondary_state( 1309static int core_alua_set_tg_pt_secondary_state(
@@ -1033,8 +1313,6 @@ static int core_alua_set_tg_pt_secondary_state(
1033 int offline) 1313 int offline)
1034{ 1314{
1035 struct t10_alua_tg_pt_gp *tg_pt_gp; 1315 struct t10_alua_tg_pt_gp *tg_pt_gp;
1036 unsigned char *md_buf;
1037 u32 md_buf_len;
1038 int trans_delay_msecs; 1316 int trans_delay_msecs;
1039 1317
1040 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1318 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1055,7 +1333,6 @@ static int core_alua_set_tg_pt_secondary_state(
1055 else 1333 else
1056 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1334 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1057 1335
1058 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1059 port->sep_tg_pt_secondary_stat = (explicit) ? 1336 port->sep_tg_pt_secondary_stat = (explicit) ?
1060 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1337 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1061 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1338 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
@@ -1077,23 +1354,115 @@ static int core_alua_set_tg_pt_secondary_state(
1077 * secondary state and status 1354 * secondary state and status
1078 */ 1355 */
1079 if (port->sep_tg_pt_secondary_write_md) { 1356 if (port->sep_tg_pt_secondary_write_md) {
1080 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1081 if (!md_buf) {
1082 pr_err("Unable to allocate md_buf for"
1083 " secondary ALUA access metadata\n");
1084 return -ENOMEM;
1085 }
1086 mutex_lock(&port->sep_tg_pt_md_mutex); 1357 mutex_lock(&port->sep_tg_pt_md_mutex);
1087 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, 1358 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
1088 md_buf, md_buf_len);
1089 mutex_unlock(&port->sep_tg_pt_md_mutex); 1359 mutex_unlock(&port->sep_tg_pt_md_mutex);
1360 }
1361
1362 return 0;
1363}
1364
1365struct t10_alua_lba_map *
1366core_alua_allocate_lba_map(struct list_head *list,
1367 u64 first_lba, u64 last_lba)
1368{
1369 struct t10_alua_lba_map *lba_map;
1370
1371 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1372 if (!lba_map) {
1373 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1374 return ERR_PTR(-ENOMEM);
1375 }
1376 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1377 lba_map->lba_map_first_lba = first_lba;
1378 lba_map->lba_map_last_lba = last_lba;
1090 1379
1091 kfree(md_buf); 1380 list_add_tail(&lba_map->lba_map_list, list);
1381 return lba_map;
1382}
1383
1384int
1385core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1386 int pg_id, int state)
1387{
1388 struct t10_alua_lba_map_member *lba_map_mem;
1389
1390 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1391 lba_map_mem_list) {
1392 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1393 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1394 return -EINVAL;
1395 }
1396 }
1397
1398 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1399 if (!lba_map_mem) {
1400 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1401 return -ENOMEM;
1092 } 1402 }
1403 lba_map_mem->lba_map_mem_alua_state = state;
1404 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1093 1405
1406 list_add_tail(&lba_map_mem->lba_map_mem_list,
1407 &lba_map->lba_map_mem_list);
1094 return 0; 1408 return 0;
1095} 1409}
1096 1410
1411void
1412core_alua_free_lba_map(struct list_head *lba_list)
1413{
1414 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1415 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1416
1417 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1418 lba_map_list) {
1419 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1420 &lba_map->lba_map_mem_list,
1421 lba_map_mem_list) {
1422 list_del(&lba_map_mem->lba_map_mem_list);
1423 kmem_cache_free(t10_alua_lba_map_mem_cache,
1424 lba_map_mem);
1425 }
1426 list_del(&lba_map->lba_map_list);
1427 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1428 }
1429}
1430
1431void
1432core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1433 int segment_size, int segment_mult)
1434{
1435 struct list_head old_lba_map_list;
1436 struct t10_alua_tg_pt_gp *tg_pt_gp;
1437 int activate = 0, supported;
1438
1439 INIT_LIST_HEAD(&old_lba_map_list);
1440 spin_lock(&dev->t10_alua.lba_map_lock);
1441 dev->t10_alua.lba_map_segment_size = segment_size;
1442 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1443 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1444 if (lba_map_list) {
1445 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1446 activate = 1;
1447 }
1448 spin_unlock(&dev->t10_alua.lba_map_lock);
1449 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1450 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1451 tg_pt_gp_list) {
1452
1453 if (!tg_pt_gp->tg_pt_gp_valid_id)
1454 continue;
1455 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1456 if (activate)
1457 supported |= ALUA_LBD_SUP;
1458 else
1459 supported &= ~ALUA_LBD_SUP;
1460 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1461 }
1462 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1463 core_alua_free_lba_map(&old_lba_map_list);
1464}
1465
1097struct t10_alua_lu_gp * 1466struct t10_alua_lu_gp *
1098core_alua_allocate_lu_gp(const char *name, int def_group) 1467core_alua_allocate_lu_gp(const char *name, int def_group)
1099{ 1468{
@@ -1346,8 +1715,9 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1346 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1715 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1347 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1716 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1348 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1717 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1718 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1719 core_alua_do_transition_tg_pt_work);
1349 tg_pt_gp->tg_pt_gp_dev = dev; 1720 tg_pt_gp->tg_pt_gp_dev = dev;
1350 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1351 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1721 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1352 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1722 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1353 /* 1723 /*
@@ -1475,6 +1845,8 @@ void core_alua_free_tg_pt_gp(
1475 dev->t10_alua.alua_tg_pt_gps_counter--; 1845 dev->t10_alua.alua_tg_pt_gps_counter--;
1476 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1846 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1477 1847
1848 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1849
1478 /* 1850 /*
1479 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1851 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1480 * core_alua_get_tg_pt_gp_by_name() in 1852 * core_alua_get_tg_pt_gp_by_name() in
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 88e2e835f14a..0a7d65e80404 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -13,12 +13,13 @@
13/* 13/*
14 * ASYMMETRIC ACCESS STATE field 14 * ASYMMETRIC ACCESS STATE field
15 * 15 *
16 * from spc4r17 section 6.27 Table 245 16 * from spc4r36j section 6.37 Table 307
17 */ 17 */
18#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 18#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
20#define ALUA_ACCESS_STATE_STANDBY 0x2 20#define ALUA_ACCESS_STATE_STANDBY 0x2
21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
22#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
22#define ALUA_ACCESS_STATE_OFFLINE 0xe 23#define ALUA_ACCESS_STATE_OFFLINE 0xe
23#define ALUA_ACCESS_STATE_TRANSITION 0xf 24#define ALUA_ACCESS_STATE_TRANSITION 0xf
24 25
@@ -78,18 +79,30 @@
78 */ 79 */
79#define ALUA_SECONDARY_METADATA_WWN_LEN 256 80#define ALUA_SECONDARY_METADATA_WWN_LEN 256
80 81
82/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
83#define ALUA_MD_BUF_LEN 1024
84
81extern struct kmem_cache *t10_alua_lu_gp_cache; 85extern struct kmem_cache *t10_alua_lu_gp_cache;
82extern struct kmem_cache *t10_alua_lu_gp_mem_cache; 86extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
83extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 87extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
84extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 88extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
89extern struct kmem_cache *t10_alua_lba_map_cache;
90extern struct kmem_cache *t10_alua_lba_map_mem_cache;
85 91
86extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); 92extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
87extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); 93extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
94extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
88extern int core_alua_check_nonop_delay(struct se_cmd *); 95extern int core_alua_check_nonop_delay(struct se_cmd *);
89extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, 96extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
90 struct se_device *, struct se_port *, 97 struct se_device *, struct se_port *,
91 struct se_node_acl *, int, int); 98 struct se_node_acl *, int, int);
92extern char *core_alua_dump_status(int); 99extern char *core_alua_dump_status(int);
100extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
101 struct list_head *, u64, u64);
102extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
103extern void core_alua_free_lba_map(struct list_head *);
104extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
105 int, int);
93extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); 106extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
94extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); 107extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
95extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); 108extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 272755d03e5a..f0e85b119692 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -643,6 +643,15 @@ SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
643DEF_DEV_ATTRIB(emulate_3pc); 643DEF_DEV_ATTRIB(emulate_3pc);
644SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); 644SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
645 645
646DEF_DEV_ATTRIB(pi_prot_type);
647SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
648
649DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
650SE_DEV_ATTR_RO(hw_pi_prot_type);
651
652DEF_DEV_ATTRIB(pi_prot_format);
653SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
654
646DEF_DEV_ATTRIB(enforce_pr_isids); 655DEF_DEV_ATTRIB(enforce_pr_isids);
647SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); 656SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
648 657
@@ -702,6 +711,9 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
702 &target_core_dev_attrib_emulate_tpws.attr, 711 &target_core_dev_attrib_emulate_tpws.attr,
703 &target_core_dev_attrib_emulate_caw.attr, 712 &target_core_dev_attrib_emulate_caw.attr,
704 &target_core_dev_attrib_emulate_3pc.attr, 713 &target_core_dev_attrib_emulate_3pc.attr,
714 &target_core_dev_attrib_pi_prot_type.attr,
715 &target_core_dev_attrib_hw_pi_prot_type.attr,
716 &target_core_dev_attrib_pi_prot_format.attr,
705 &target_core_dev_attrib_enforce_pr_isids.attr, 717 &target_core_dev_attrib_enforce_pr_isids.attr,
706 &target_core_dev_attrib_is_nonrot.attr, 718 &target_core_dev_attrib_is_nonrot.attr,
707 &target_core_dev_attrib_emulate_rest_reord.attr, 719 &target_core_dev_attrib_emulate_rest_reord.attr,
@@ -1741,6 +1753,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1741 .store = target_core_store_alua_lu_gp, 1753 .store = target_core_store_alua_lu_gp,
1742}; 1754};
1743 1755
1756static ssize_t target_core_show_dev_lba_map(void *p, char *page)
1757{
1758 struct se_device *dev = p;
1759 struct t10_alua_lba_map *map;
1760 struct t10_alua_lba_map_member *mem;
1761 char *b = page;
1762 int bl = 0;
1763 char state;
1764
1765 spin_lock(&dev->t10_alua.lba_map_lock);
1766 if (!list_empty(&dev->t10_alua.lba_map_list))
1767 bl += sprintf(b + bl, "%u %u\n",
1768 dev->t10_alua.lba_map_segment_size,
1769 dev->t10_alua.lba_map_segment_multiplier);
1770 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
1771 bl += sprintf(b + bl, "%llu %llu",
1772 map->lba_map_first_lba, map->lba_map_last_lba);
1773 list_for_each_entry(mem, &map->lba_map_mem_list,
1774 lba_map_mem_list) {
1775 switch (mem->lba_map_mem_alua_state) {
1776 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
1777 state = 'O';
1778 break;
1779 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
1780 state = 'A';
1781 break;
1782 case ALUA_ACCESS_STATE_STANDBY:
1783 state = 'S';
1784 break;
1785 case ALUA_ACCESS_STATE_UNAVAILABLE:
1786 state = 'U';
1787 break;
1788 default:
1789 state = '.';
1790 break;
1791 }
1792 bl += sprintf(b + bl, " %d:%c",
1793 mem->lba_map_mem_alua_pg_id, state);
1794 }
1795 bl += sprintf(b + bl, "\n");
1796 }
1797 spin_unlock(&dev->t10_alua.lba_map_lock);
1798 return bl;
1799}
1800
1801static ssize_t target_core_store_dev_lba_map(
1802 void *p,
1803 const char *page,
1804 size_t count)
1805{
1806 struct se_device *dev = p;
1807 struct t10_alua_lba_map *lba_map = NULL;
1808 struct list_head lba_list;
1809 char *map_entries, *ptr;
1810 char state;
1811 int pg_num = -1, pg;
1812 int ret = 0, num = 0, pg_id, alua_state;
1813 unsigned long start_lba = -1, end_lba = -1;
1814 unsigned long segment_size = -1, segment_mult = -1;
1815
1816 map_entries = kstrdup(page, GFP_KERNEL);
1817 if (!map_entries)
1818 return -ENOMEM;
1819
1820 INIT_LIST_HEAD(&lba_list);
1821 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
1822 if (!*ptr)
1823 continue;
1824
1825 if (num == 0) {
1826 if (sscanf(ptr, "%lu %lu\n",
1827 &segment_size, &segment_mult) != 2) {
1828 pr_err("Invalid line %d\n", num);
1829 ret = -EINVAL;
1830 break;
1831 }
1832 num++;
1833 continue;
1834 }
1835 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
1836 pr_err("Invalid line %d\n", num);
1837 ret = -EINVAL;
1838 break;
1839 }
1840 ptr = strchr(ptr, ' ');
1841 if (!ptr) {
1842 pr_err("Invalid line %d, missing end lba\n", num);
1843 ret = -EINVAL;
1844 break;
1845 }
1846 ptr++;
1847 ptr = strchr(ptr, ' ');
1848 if (!ptr) {
1849 pr_err("Invalid line %d, missing state definitions\n",
1850 num);
1851 ret = -EINVAL;
1852 break;
1853 }
1854 ptr++;
1855 lba_map = core_alua_allocate_lba_map(&lba_list,
1856 start_lba, end_lba);
1857 if (IS_ERR(lba_map)) {
1858 ret = PTR_ERR(lba_map);
1859 break;
1860 }
1861 pg = 0;
1862 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
1863 switch (state) {
1864 case 'O':
1865 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1866 break;
1867 case 'A':
1868 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
1869 break;
1870 case 'S':
1871 alua_state = ALUA_ACCESS_STATE_STANDBY;
1872 break;
1873 case 'U':
1874 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
1875 break;
1876 default:
1877 pr_err("Invalid ALUA state '%c'\n", state);
1878 ret = -EINVAL;
1879 goto out;
1880 }
1881
1882 ret = core_alua_allocate_lba_map_mem(lba_map,
1883 pg_id, alua_state);
1884 if (ret) {
1885 pr_err("Invalid target descriptor %d:%c "
1886 "at line %d\n",
1887 pg_id, state, num);
1888 break;
1889 }
1890 pg++;
1891 ptr = strchr(ptr, ' ');
1892 if (ptr)
1893 ptr++;
1894 else
1895 break;
1896 }
1897 if (pg_num == -1)
1898 pg_num = pg;
1899 else if (pg != pg_num) {
1900 pr_err("Only %d from %d port groups definitions "
1901 "at line %d\n", pg, pg_num, num);
1902 ret = -EINVAL;
1903 break;
1904 }
1905 num++;
1906 }
1907out:
1908 if (ret) {
1909 core_alua_free_lba_map(&lba_list);
1910 count = ret;
1911 } else
1912 core_alua_set_lba_map(dev, &lba_list,
1913 segment_size, segment_mult);
1914 kfree(map_entries);
1915 return count;
1916}
1917
1918static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
1919 .attr = { .ca_owner = THIS_MODULE,
1920 .ca_name = "lba_map",
1921 .ca_mode = S_IRUGO | S_IWUSR },
1922 .show = target_core_show_dev_lba_map,
1923 .store = target_core_store_dev_lba_map,
1924};
1925
1744static struct configfs_attribute *lio_core_dev_attrs[] = { 1926static struct configfs_attribute *lio_core_dev_attrs[] = {
1745 &target_core_attr_dev_info.attr, 1927 &target_core_attr_dev_info.attr,
1746 &target_core_attr_dev_control.attr, 1928 &target_core_attr_dev_control.attr,
@@ -1748,6 +1930,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
1748 &target_core_attr_dev_udev_path.attr, 1930 &target_core_attr_dev_udev_path.attr,
1749 &target_core_attr_dev_enable.attr, 1931 &target_core_attr_dev_enable.attr,
1750 &target_core_attr_dev_alua_lu_gp.attr, 1932 &target_core_attr_dev_alua_lu_gp.attr,
1933 &target_core_attr_dev_lba_map.attr,
1751 NULL, 1934 NULL,
1752}; 1935};
1753 1936
@@ -2054,6 +2237,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2054 " transition while TPGS_IMPLICIT_ALUA is disabled\n"); 2237 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2055 return -EINVAL; 2238 return -EINVAL;
2056 } 2239 }
2240 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2241 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2242 /* LBA DEPENDENT is only allowed with implicit ALUA */
2243 pr_err("Unable to process implicit configfs ALUA transition"
2244 " while explicit ALUA management is enabled\n");
2245 return -EINVAL;
2246 }
2057 2247
2058 ret = core_alua_do_port_transition(tg_pt_gp, dev, 2248 ret = core_alua_do_port_transition(tg_pt_gp, dev,
2059 NULL, NULL, new_state, 0); 2249 NULL, NULL, new_state, 0);
@@ -2188,7 +2378,7 @@ SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
2188 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); 2378 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2189SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, 2379SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
2190 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); 2380 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2191SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR); 2381SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
2192 2382
2193SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, 2383SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
2194 tg_pt_gp_alua_supported_states, ALUA_U_SUP); 2384 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
@@ -2937,7 +3127,7 @@ static int __init target_core_init_configfs(void)
2937 * and ALUA Logical Unit Group and Target Port Group infrastructure. 3127 * and ALUA Logical Unit Group and Target Port Group infrastructure.
2938 */ 3128 */
2939 target_cg = &subsys->su_group; 3129 target_cg = &subsys->su_group;
2940 target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, 3130 target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2941 GFP_KERNEL); 3131 GFP_KERNEL);
2942 if (!target_cg->default_groups) { 3132 if (!target_cg->default_groups) {
2943 pr_err("Unable to allocate target_cg->default_groups\n"); 3133 pr_err("Unable to allocate target_cg->default_groups\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d06de84b069b..65001e133670 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -918,6 +918,90 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
918 return 0; 918 return 0;
919} 919}
920 920
921int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
922{
923 int rc, old_prot = dev->dev_attrib.pi_prot_type;
924
925 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
926 pr_err("Illegal value %d for pi_prot_type\n", flag);
927 return -EINVAL;
928 }
929 if (flag == 2) {
930 pr_err("DIF TYPE2 protection currently not supported\n");
931 return -ENOSYS;
932 }
933 if (dev->dev_attrib.hw_pi_prot_type) {
934 pr_warn("DIF protection enabled on underlying hardware,"
935 " ignoring\n");
936 return 0;
937 }
938 if (!dev->transport->init_prot || !dev->transport->free_prot) {
939 pr_err("DIF protection not supported by backend: %s\n",
940 dev->transport->name);
941 return -ENOSYS;
942 }
943 if (!(dev->dev_flags & DF_CONFIGURED)) {
944 pr_err("DIF protection requires device to be configured\n");
945 return -ENODEV;
946 }
947 if (dev->export_count) {
948 pr_err("dev[%p]: Unable to change SE Device PROT type while"
949 " export_count is %d\n", dev, dev->export_count);
950 return -EINVAL;
951 }
952
953 dev->dev_attrib.pi_prot_type = flag;
954
955 if (flag && !old_prot) {
956 rc = dev->transport->init_prot(dev);
957 if (rc) {
958 dev->dev_attrib.pi_prot_type = old_prot;
959 return rc;
960 }
961
962 } else if (!flag && old_prot) {
963 dev->transport->free_prot(dev);
964 }
965 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
966
967 return 0;
968}
969
970int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
971{
972 int rc;
973
974 if (!flag)
975 return 0;
976
977 if (flag != 1) {
978 pr_err("Illegal value %d for pi_prot_format\n", flag);
979 return -EINVAL;
980 }
981 if (!dev->transport->format_prot) {
982 pr_err("DIF protection format not supported by backend %s\n",
983 dev->transport->name);
984 return -ENOSYS;
985 }
986 if (!(dev->dev_flags & DF_CONFIGURED)) {
987 pr_err("DIF protection format requires device to be configured\n");
988 return -ENODEV;
989 }
990 if (dev->export_count) {
991 pr_err("dev[%p]: Unable to format SE Device PROT type while"
992 " export_count is %d\n", dev, dev->export_count);
993 return -EINVAL;
994 }
995
996 rc = dev->transport->format_prot(dev);
997 if (rc)
998 return rc;
999
1000 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
1001
1002 return 0;
1003}
1004
921int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1005int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
922{ 1006{
923 if ((flag != 0) && (flag != 1)) { 1007 if ((flag != 0) && (flag != 1)) {
@@ -1117,23 +1201,23 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1117struct se_lun *core_dev_add_lun( 1201struct se_lun *core_dev_add_lun(
1118 struct se_portal_group *tpg, 1202 struct se_portal_group *tpg,
1119 struct se_device *dev, 1203 struct se_device *dev,
1120 u32 lun) 1204 u32 unpacked_lun)
1121{ 1205{
1122 struct se_lun *lun_p; 1206 struct se_lun *lun;
1123 int rc; 1207 int rc;
1124 1208
1125 lun_p = core_tpg_pre_addlun(tpg, lun); 1209 lun = core_tpg_alloc_lun(tpg, unpacked_lun);
1126 if (IS_ERR(lun_p)) 1210 if (IS_ERR(lun))
1127 return lun_p; 1211 return lun;
1128 1212
1129 rc = core_tpg_post_addlun(tpg, lun_p, 1213 rc = core_tpg_add_lun(tpg, lun,
1130 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1214 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1131 if (rc < 0) 1215 if (rc < 0)
1132 return ERR_PTR(rc); 1216 return ERR_PTR(rc);
1133 1217
1134 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1218 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1135 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1219 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1136 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1220 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1137 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1221 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1138 /* 1222 /*
1139 * Update LUN maps for dynamically added initiators when 1223 * Update LUN maps for dynamically added initiators when
@@ -1154,7 +1238,7 @@ struct se_lun *core_dev_add_lun(
1154 spin_unlock_irq(&tpg->acl_node_lock); 1238 spin_unlock_irq(&tpg->acl_node_lock);
1155 } 1239 }
1156 1240
1157 return lun_p; 1241 return lun;
1158} 1242}
1159 1243
1160/* core_dev_del_lun(): 1244/* core_dev_del_lun():
@@ -1420,6 +1504,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1420 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 1504 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1421 dev->se_hba = hba; 1505 dev->se_hba = hba;
1422 dev->transport = hba->transport; 1506 dev->transport = hba->transport;
1507 dev->prot_length = sizeof(struct se_dif_v1_tuple);
1423 1508
1424 INIT_LIST_HEAD(&dev->dev_list); 1509 INIT_LIST_HEAD(&dev->dev_list);
1425 INIT_LIST_HEAD(&dev->dev_sep_list); 1510 INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -1444,6 +1529,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1444 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1529 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1445 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1530 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1446 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1531 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1532 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
1533 spin_lock_init(&dev->t10_alua.lba_map_lock);
1447 1534
1448 dev->t10_wwn.t10_dev = dev; 1535 dev->t10_wwn.t10_dev = dev;
1449 dev->t10_alua.t10_dev = dev; 1536 dev->t10_alua.t10_dev = dev;
@@ -1460,6 +1547,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1460 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1547 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1461 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 1548 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1462 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1549 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1550 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1463 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1551 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1464 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1552 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1465 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1553 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@ -1588,9 +1676,13 @@ void target_free_device(struct se_device *dev)
1588 } 1676 }
1589 1677
1590 core_alua_free_lu_gp_mem(dev); 1678 core_alua_free_lu_gp_mem(dev);
1679 core_alua_set_lba_map(dev, NULL, 0, 0);
1591 core_scsi3_free_all_registrations(dev); 1680 core_scsi3_free_all_registrations(dev);
1592 se_release_vpd_for_dev(dev); 1681 se_release_vpd_for_dev(dev);
1593 1682
1683 if (dev->transport->free_prot)
1684 dev->transport->free_prot(dev);
1685
1594 dev->transport->free_device(dev); 1686 dev->transport->free_device(dev);
1595} 1687}
1596 1688
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index dae2ad6a669e..7de9f0475d05 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -906,7 +906,7 @@ static struct config_group *target_fabric_make_lun(
906 lun_cg->default_groups[1] = NULL; 906 lun_cg->default_groups[1] = NULL;
907 907
908 port_stat_grp = &lun->port_stat_grps.stat_group; 908 port_stat_grp = &lun->port_stat_grps.stat_group;
909 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 909 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4,
910 GFP_KERNEL); 910 GFP_KERNEL);
911 if (!port_stat_grp->default_groups) { 911 if (!port_stat_grp->default_groups) {
912 pr_err("Unable to allocate port_stat_grp->default_groups\n"); 912 pr_err("Unable to allocate port_stat_grp->default_groups\n");
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 78241a53b555..cf991a91a8a9 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -257,6 +257,72 @@ static void fd_free_device(struct se_device *dev)
257 kfree(fd_dev); 257 kfree(fd_dev);
258} 258}
259 259
260static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
261 int is_write)
262{
263 struct se_device *se_dev = cmd->se_dev;
264 struct fd_dev *dev = FD_DEV(se_dev);
265 struct file *prot_fd = dev->fd_prot_file;
266 struct scatterlist *sg;
267 loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
268 unsigned char *buf;
269 u32 prot_size, len, size;
270 int rc, ret = 1, i;
271
272 prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
273 se_dev->prot_length;
274
275 if (!is_write) {
276 fd_prot->prot_buf = vzalloc(prot_size);
277 if (!fd_prot->prot_buf) {
278 pr_err("Unable to allocate fd_prot->prot_buf\n");
279 return -ENOMEM;
280 }
281 buf = fd_prot->prot_buf;
282
283 fd_prot->prot_sg_nents = cmd->t_prot_nents;
284 fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
285 fd_prot->prot_sg_nents, GFP_KERNEL);
286 if (!fd_prot->prot_sg) {
287 pr_err("Unable to allocate fd_prot->prot_sg\n");
288 vfree(fd_prot->prot_buf);
289 return -ENOMEM;
290 }
291 size = prot_size;
292
293 for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
294
295 len = min_t(u32, PAGE_SIZE, size);
296 sg_set_buf(sg, buf, len);
297 size -= len;
298 buf += len;
299 }
300 }
301
302 if (is_write) {
303 rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
304 if (rc < 0 || prot_size != rc) {
305 pr_err("kernel_write() for fd_do_prot_rw failed:"
306 " %d\n", rc);
307 ret = -EINVAL;
308 }
309 } else {
310 rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
311 if (rc < 0) {
312 pr_err("kernel_read() for fd_do_prot_rw failed:"
313 " %d\n", rc);
314 ret = -EINVAL;
315 }
316 }
317
318 if (is_write || ret < 0) {
319 kfree(fd_prot->prot_sg);
320 vfree(fd_prot->prot_buf);
321 }
322
323 return ret;
324}
325
260static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, 326static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
261 u32 sgl_nents, int is_write) 327 u32 sgl_nents, int is_write)
262{ 328{
@@ -551,6 +617,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
551 enum dma_data_direction data_direction) 617 enum dma_data_direction data_direction)
552{ 618{
553 struct se_device *dev = cmd->se_dev; 619 struct se_device *dev = cmd->se_dev;
620 struct fd_prot fd_prot;
621 sense_reason_t rc;
554 int ret = 0; 622 int ret = 0;
555 623
556 /* 624 /*
@@ -558,8 +626,48 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
558 * physical memory addresses to struct iovec virtual memory. 626 * physical memory addresses to struct iovec virtual memory.
559 */ 627 */
560 if (data_direction == DMA_FROM_DEVICE) { 628 if (data_direction == DMA_FROM_DEVICE) {
629 memset(&fd_prot, 0, sizeof(struct fd_prot));
630
631 if (cmd->prot_type) {
632 ret = fd_do_prot_rw(cmd, &fd_prot, false);
633 if (ret < 0)
634 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
635 }
636
561 ret = fd_do_rw(cmd, sgl, sgl_nents, 0); 637 ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
638
639 if (ret > 0 && cmd->prot_type) {
640 u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
641
642 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
643 0, fd_prot.prot_sg, 0);
644 if (rc) {
645 kfree(fd_prot.prot_sg);
646 vfree(fd_prot.prot_buf);
647 return rc;
648 }
649 kfree(fd_prot.prot_sg);
650 vfree(fd_prot.prot_buf);
651 }
562 } else { 652 } else {
653 memset(&fd_prot, 0, sizeof(struct fd_prot));
654
655 if (cmd->prot_type) {
656 u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
657
658 ret = fd_do_prot_rw(cmd, &fd_prot, false);
659 if (ret < 0)
660 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
661
662 rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
663 0, fd_prot.prot_sg, 0);
664 if (rc) {
665 kfree(fd_prot.prot_sg);
666 vfree(fd_prot.prot_buf);
667 return rc;
668 }
669 }
670
563 ret = fd_do_rw(cmd, sgl, sgl_nents, 1); 671 ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
564 /* 672 /*
565 * Perform implicit vfs_fsync_range() for fd_do_writev() ops 673 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
@@ -576,10 +684,19 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
576 684
577 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 685 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
578 } 686 }
687
688 if (ret > 0 && cmd->prot_type) {
689 ret = fd_do_prot_rw(cmd, &fd_prot, true);
690 if (ret < 0)
691 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
692 }
579 } 693 }
580 694
581 if (ret < 0) 695 if (ret < 0) {
696 kfree(fd_prot.prot_sg);
697 vfree(fd_prot.prot_buf);
582 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 698 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
699 }
583 700
584 if (ret) 701 if (ret)
585 target_complete_cmd(cmd, SAM_STAT_GOOD); 702 target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -700,6 +817,140 @@ static sector_t fd_get_blocks(struct se_device *dev)
700 dev->dev_attrib.block_size); 817 dev->dev_attrib.block_size);
701} 818}
702 819
820static int fd_init_prot(struct se_device *dev)
821{
822 struct fd_dev *fd_dev = FD_DEV(dev);
823 struct file *prot_file, *file = fd_dev->fd_file;
824 struct inode *inode;
825 int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
826 char buf[FD_MAX_DEV_PROT_NAME];
827
828 if (!file) {
829 pr_err("Unable to locate fd_dev->fd_file\n");
830 return -ENODEV;
831 }
832
833 inode = file->f_mapping->host;
834 if (S_ISBLK(inode->i_mode)) {
835 pr_err("FILEIO Protection emulation only supported on"
836 " !S_ISBLK\n");
837 return -ENOSYS;
838 }
839
840 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
841 flags &= ~O_DSYNC;
842
843 snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
844 fd_dev->fd_dev_name);
845
846 prot_file = filp_open(buf, flags, 0600);
847 if (IS_ERR(prot_file)) {
848 pr_err("filp_open(%s) failed\n", buf);
849 ret = PTR_ERR(prot_file);
850 return ret;
851 }
852 fd_dev->fd_prot_file = prot_file;
853
854 return 0;
855}
856
857static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
858 u32 unit_size, u32 *ref_tag, u16 app_tag,
859 bool inc_reftag)
860{
861 unsigned char *p = buf;
862 int i;
863
864 for (i = 0; i < unit_size; i += dev->prot_length) {
865 *((u16 *)&p[0]) = 0xffff;
866 *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
867 *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
868
869 if (inc_reftag)
870 (*ref_tag)++;
871
872 p += dev->prot_length;
873 }
874}
875
876static int fd_format_prot(struct se_device *dev)
877{
878 struct fd_dev *fd_dev = FD_DEV(dev);
879 struct file *prot_fd = fd_dev->fd_prot_file;
880 sector_t prot_length, prot;
881 unsigned char *buf;
882 loff_t pos = 0;
883 u32 ref_tag = 0;
884 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
885 int rc, ret = 0, size, len;
886 bool inc_reftag = false;
887
888 if (!dev->dev_attrib.pi_prot_type) {
889 pr_err("Unable to format_prot while pi_prot_type == 0\n");
890 return -ENODEV;
891 }
892 if (!prot_fd) {
893 pr_err("Unable to locate fd_dev->fd_prot_file\n");
894 return -ENODEV;
895 }
896
897 switch (dev->dev_attrib.pi_prot_type) {
898 case TARGET_DIF_TYPE3_PROT:
899 ref_tag = 0xffffffff;
900 break;
901 case TARGET_DIF_TYPE2_PROT:
902 case TARGET_DIF_TYPE1_PROT:
903 inc_reftag = true;
904 break;
905 default:
906 break;
907 }
908
909 buf = vzalloc(unit_size);
910 if (!buf) {
911 pr_err("Unable to allocate FILEIO prot buf\n");
912 return -ENOMEM;
913 }
914
915 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
916 size = prot_length;
917
918 pr_debug("Using FILEIO prot_length: %llu\n",
919 (unsigned long long)prot_length);
920
921 for (prot = 0; prot < prot_length; prot += unit_size) {
922
923 fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
924 inc_reftag);
925
926 len = min(unit_size, size);
927
928 rc = kernel_write(prot_fd, buf, len, pos);
929 if (rc != len) {
930 pr_err("vfs_write to prot file failed: %d\n", rc);
931 ret = -ENODEV;
932 goto out;
933 }
934 pos += len;
935 size -= len;
936 }
937
938out:
939 vfree(buf);
940 return ret;
941}
942
943static void fd_free_prot(struct se_device *dev)
944{
945 struct fd_dev *fd_dev = FD_DEV(dev);
946
947 if (!fd_dev->fd_prot_file)
948 return;
949
950 filp_close(fd_dev->fd_prot_file, NULL);
951 fd_dev->fd_prot_file = NULL;
952}
953
703static struct sbc_ops fd_sbc_ops = { 954static struct sbc_ops fd_sbc_ops = {
704 .execute_rw = fd_execute_rw, 955 .execute_rw = fd_execute_rw,
705 .execute_sync_cache = fd_execute_sync_cache, 956 .execute_sync_cache = fd_execute_sync_cache,
@@ -730,6 +981,9 @@ static struct se_subsystem_api fileio_template = {
730 .show_configfs_dev_params = fd_show_configfs_dev_params, 981 .show_configfs_dev_params = fd_show_configfs_dev_params,
731 .get_device_type = sbc_get_device_type, 982 .get_device_type = sbc_get_device_type,
732 .get_blocks = fd_get_blocks, 983 .get_blocks = fd_get_blocks,
984 .init_prot = fd_init_prot,
985 .format_prot = fd_format_prot,
986 .free_prot = fd_free_prot,
733}; 987};
734 988
735static int __init fileio_module_init(void) 989static int __init fileio_module_init(void)
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index d7772c167685..182cbb295039 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,6 +4,7 @@
4#define FD_VERSION "4.0" 4#define FD_VERSION "4.0"
5 5
6#define FD_MAX_DEV_NAME 256 6#define FD_MAX_DEV_NAME 256
7#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
7#define FD_DEVICE_QUEUE_DEPTH 32 8#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 9#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 10#define FD_BLOCKSIZE 512
@@ -18,6 +19,13 @@
18#define FBDF_HAS_PATH 0x01 19#define FBDF_HAS_PATH 0x01
19#define FBDF_HAS_SIZE 0x02 20#define FBDF_HAS_SIZE 0x02
20#define FDBD_HAS_BUFFERED_IO_WCE 0x04 21#define FDBD_HAS_BUFFERED_IO_WCE 0x04
22#define FDBD_FORMAT_UNIT_SIZE 2048
23
24struct fd_prot {
25 unsigned char *prot_buf;
26 struct scatterlist *prot_sg;
27 u32 prot_sg_nents;
28};
21 29
22struct fd_dev { 30struct fd_dev {
23 struct se_device dev; 31 struct se_device dev;
@@ -32,6 +40,7 @@ struct fd_dev {
32 u32 fd_block_size; 40 u32 fd_block_size;
33 unsigned long long fd_dev_size; 41 unsigned long long fd_dev_size;
34 struct file *fd_file; 42 struct file *fd_file;
43 struct file *fd_prot_file;
35 /* FILEIO HBA device is connected to */ 44 /* FILEIO HBA device is connected to */
36 struct fd_host *fd_host; 45 struct fd_host *fd_host;
37} ____cacheline_aligned; 46} ____cacheline_aligned;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 2d29356d0c85..554d4f75a75a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,6 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
92 struct request_queue *q; 92 struct request_queue *q;
93 struct block_device *bd = NULL; 93 struct block_device *bd = NULL;
94 struct blk_integrity *bi;
94 fmode_t mode; 95 fmode_t mode;
95 int ret = -ENOMEM; 96 int ret = -ENOMEM;
96 97
@@ -155,8 +156,40 @@ static int iblock_configure_device(struct se_device *dev)
155 if (blk_queue_nonrot(q)) 156 if (blk_queue_nonrot(q))
156 dev->dev_attrib.is_nonrot = 1; 157 dev->dev_attrib.is_nonrot = 1;
157 158
159 bi = bdev_get_integrity(bd);
160 if (bi) {
161 struct bio_set *bs = ib_dev->ibd_bio_set;
162
163 if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
164 !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
165 pr_err("IBLOCK export of blk_integrity: %s not"
166 " supported\n", bi->name);
167 ret = -ENOSYS;
168 goto out_blkdev_put;
169 }
170
171 if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
172 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
173 } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
174 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
175 }
176
177 if (dev->dev_attrib.pi_prot_type) {
178 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
179 pr_err("Unable to allocate bioset for PI\n");
180 ret = -ENOMEM;
181 goto out_blkdev_put;
182 }
183 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
184 bs->bio_integrity_pool);
185 }
186 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
187 }
188
158 return 0; 189 return 0;
159 190
191out_blkdev_put:
192 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
160out_free_bioset: 193out_free_bioset:
161 bioset_free(ib_dev->ibd_bio_set); 194 bioset_free(ib_dev->ibd_bio_set);
162 ib_dev->ibd_bio_set = NULL; 195 ib_dev->ibd_bio_set = NULL;
@@ -170,8 +203,10 @@ static void iblock_free_device(struct se_device *dev)
170 203
171 if (ib_dev->ibd_bd != NULL) 204 if (ib_dev->ibd_bd != NULL)
172 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
173 if (ib_dev->ibd_bio_set != NULL) 206 if (ib_dev->ibd_bio_set != NULL) {
207 bioset_integrity_free(ib_dev->ibd_bio_set);
174 bioset_free(ib_dev->ibd_bio_set); 208 bioset_free(ib_dev->ibd_bio_set);
209 }
175 kfree(ib_dev); 210 kfree(ib_dev);
176} 211}
177 212
@@ -586,13 +621,58 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
586 return bl; 621 return bl;
587} 622}
588 623
624static int
625iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
626{
627 struct se_device *dev = cmd->se_dev;
628 struct blk_integrity *bi;
629 struct bio_integrity_payload *bip;
630 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
631 struct scatterlist *sg;
632 int i, rc;
633
634 bi = bdev_get_integrity(ib_dev->ibd_bd);
635 if (!bi) {
636 pr_err("Unable to locate bio_integrity\n");
637 return -ENODEV;
638 }
639
640 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
641 if (!bip) {
642 pr_err("Unable to allocate bio_integrity_payload\n");
643 return -ENOMEM;
644 }
645
646 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
647 dev->prot_length;
648 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
649
650 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
651 (unsigned long long)bip->bip_iter.bi_sector);
652
653 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
654
655 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
656 sg->offset);
657 if (rc != sg->length) {
658 pr_err("bio_integrity_add_page() failed; %d\n", rc);
659 return -ENOMEM;
660 }
661
662 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
663 sg_page(sg), sg->length, sg->offset);
664 }
665
666 return 0;
667}
668
589static sense_reason_t 669static sense_reason_t
590iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 670iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
591 enum dma_data_direction data_direction) 671 enum dma_data_direction data_direction)
592{ 672{
593 struct se_device *dev = cmd->se_dev; 673 struct se_device *dev = cmd->se_dev;
594 struct iblock_req *ibr; 674 struct iblock_req *ibr;
595 struct bio *bio; 675 struct bio *bio, *bio_start;
596 struct bio_list list; 676 struct bio_list list;
597 struct scatterlist *sg; 677 struct scatterlist *sg;
598 u32 sg_num = sgl_nents; 678 u32 sg_num = sgl_nents;
@@ -655,6 +735,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
655 if (!bio) 735 if (!bio)
656 goto fail_free_ibr; 736 goto fail_free_ibr;
657 737
738 bio_start = bio;
658 bio_list_init(&list); 739 bio_list_init(&list);
659 bio_list_add(&list, bio); 740 bio_list_add(&list, bio);
660 741
@@ -688,6 +769,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
688 sg_num--; 769 sg_num--;
689 } 770 }
690 771
772 if (cmd->prot_type) {
773 int rc = iblock_alloc_bip(cmd, bio_start);
774 if (rc)
775 goto fail_put_bios;
776 }
777
691 iblock_submit_bios(&list, rw); 778 iblock_submit_bios(&list, rw);
692 iblock_complete_cmd(cmd); 779 iblock_complete_cmd(cmd);
693 return 0; 780 return 0;
@@ -763,7 +850,7 @@ iblock_parse_cdb(struct se_cmd *cmd)
763 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 850 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
764} 851}
765 852
766bool iblock_get_write_cache(struct se_device *dev) 853static bool iblock_get_write_cache(struct se_device *dev)
767{ 854{
768 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 855 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
769 struct block_device *bd = ib_dev->ibd_bd; 856 struct block_device *bd = ib_dev->ibd_bd;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 47b63b094cdc..de9cab708f45 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -35,6 +35,8 @@ int se_dev_set_emulate_tpu(struct se_device *, int);
35int se_dev_set_emulate_tpws(struct se_device *, int); 35int se_dev_set_emulate_tpws(struct se_device *, int);
36int se_dev_set_emulate_caw(struct se_device *, int); 36int se_dev_set_emulate_caw(struct se_device *, int);
37int se_dev_set_emulate_3pc(struct se_device *, int); 37int se_dev_set_emulate_3pc(struct se_device *, int);
38int se_dev_set_pi_prot_type(struct se_device *, int);
39int se_dev_set_pi_prot_format(struct se_device *, int);
38int se_dev_set_enforce_pr_isids(struct se_device *, int); 40int se_dev_set_enforce_pr_isids(struct se_device *, int);
39int se_dev_set_is_nonrot(struct se_device *, int); 41int se_dev_set_is_nonrot(struct se_device *, int);
40int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 42int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
@@ -77,9 +79,9 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tp
77 const char *); 79 const char *);
78void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); 80void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
79void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); 81void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
80struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); 82struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
81int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, 83int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
82 u32, void *); 84 u32, struct se_device *);
83struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 85struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
84int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); 86int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
85 87
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index ed75cdd32cb0..2ee2936fa0bd 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -43,6 +43,11 @@
43#define PR_APTPL_MAX_IPORT_LEN 256 43#define PR_APTPL_MAX_IPORT_LEN 256
44#define PR_APTPL_MAX_TPORT_LEN 256 44#define PR_APTPL_MAX_TPORT_LEN 256
45 45
46/*
47 * Function defined in target_core_spc.c
48 */
49void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
50
46extern struct kmem_cache *t10_pr_reg_cache; 51extern struct kmem_cache *t10_pr_reg_cache;
47 52
48extern void core_pr_dump_initiator_port(struct t10_pr_registration *, 53extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4ffe5f2ec0e9..66a5aba5a0d9 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)
78 hba->hba_ptr = NULL; 78 hba->hba_ptr = NULL;
79} 79}
80 80
81/* rd_release_device_space(): 81static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
82 * 82 u32 sg_table_count)
83 *
84 */
85static void rd_release_device_space(struct rd_dev *rd_dev)
86{ 83{
87 u32 i, j, page_count = 0, sg_per_table;
88 struct rd_dev_sg_table *sg_table;
89 struct page *pg; 84 struct page *pg;
90 struct scatterlist *sg; 85 struct scatterlist *sg;
86 u32 i, j, page_count = 0, sg_per_table;
91 87
92 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 88 for (i = 0; i < sg_table_count; i++) {
93 return;
94
95 sg_table = rd_dev->sg_table_array;
96
97 for (i = 0; i < rd_dev->sg_table_count; i++) {
98 sg = sg_table[i].sg_table; 89 sg = sg_table[i].sg_table;
99 sg_per_table = sg_table[i].rd_sg_count; 90 sg_per_table = sg_table[i].rd_sg_count;
100 91
@@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
105 page_count++; 96 page_count++;
106 } 97 }
107 } 98 }
108
109 kfree(sg); 99 kfree(sg);
110 } 100 }
111 101
102 kfree(sg_table);
103 return page_count;
104}
105
106static void rd_release_device_space(struct rd_dev *rd_dev)
107{
108 u32 page_count;
109
110 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
111 return;
112
113 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
114 rd_dev->sg_table_count);
115
112 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 116 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
113 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 117 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
114 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 118 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
115 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 119 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
116 120
117 kfree(sg_table);
118 rd_dev->sg_table_array = NULL; 121 rd_dev->sg_table_array = NULL;
119 rd_dev->sg_table_count = 0; 122 rd_dev->sg_table_count = 0;
120} 123}
@@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
124 * 127 *
125 * 128 *
126 */ 129 */
127static int rd_build_device_space(struct rd_dev *rd_dev) 130static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
131 u32 total_sg_needed, unsigned char init_payload)
128{ 132{
129 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; 133 u32 i = 0, j, page_offset = 0, sg_per_table;
130 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 134 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
131 sizeof(struct scatterlist)); 135 sizeof(struct scatterlist));
132 struct rd_dev_sg_table *sg_table;
133 struct page *pg; 136 struct page *pg;
134 struct scatterlist *sg; 137 struct scatterlist *sg;
135 138 unsigned char *p;
136 if (rd_dev->rd_page_count <= 0) {
137 pr_err("Illegal page count: %u for Ramdisk device\n",
138 rd_dev->rd_page_count);
139 return -EINVAL;
140 }
141
142 /* Don't need backing pages for NULLIO */
143 if (rd_dev->rd_flags & RDF_NULLIO)
144 return 0;
145
146 total_sg_needed = rd_dev->rd_page_count;
147
148 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
149
150 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
151 if (!sg_table) {
152 pr_err("Unable to allocate memory for Ramdisk"
153 " scatterlist tables\n");
154 return -ENOMEM;
155 }
156
157 rd_dev->sg_table_array = sg_table;
158 rd_dev->sg_table_count = sg_tables;
159 139
160 while (total_sg_needed) { 140 while (total_sg_needed) {
161 sg_per_table = (total_sg_needed > max_sg_per_table) ? 141 sg_per_table = (total_sg_needed > max_sg_per_table) ?
@@ -186,16 +166,114 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
186 } 166 }
187 sg_assign_page(&sg[j], pg); 167 sg_assign_page(&sg[j], pg);
188 sg[j].length = PAGE_SIZE; 168 sg[j].length = PAGE_SIZE;
169
170 p = kmap(pg);
171 memset(p, init_payload, PAGE_SIZE);
172 kunmap(pg);
189 } 173 }
190 174
191 page_offset += sg_per_table; 175 page_offset += sg_per_table;
192 total_sg_needed -= sg_per_table; 176 total_sg_needed -= sg_per_table;
193 } 177 }
194 178
179 return 0;
180}
181
182static int rd_build_device_space(struct rd_dev *rd_dev)
183{
184 struct rd_dev_sg_table *sg_table;
185 u32 sg_tables, total_sg_needed;
186 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
187 sizeof(struct scatterlist));
188 int rc;
189
190 if (rd_dev->rd_page_count <= 0) {
191 pr_err("Illegal page count: %u for Ramdisk device\n",
192 rd_dev->rd_page_count);
193 return -EINVAL;
194 }
195
196 /* Don't need backing pages for NULLIO */
197 if (rd_dev->rd_flags & RDF_NULLIO)
198 return 0;
199
200 total_sg_needed = rd_dev->rd_page_count;
201
202 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
203
204 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
205 if (!sg_table) {
206 pr_err("Unable to allocate memory for Ramdisk"
207 " scatterlist tables\n");
208 return -ENOMEM;
209 }
210
211 rd_dev->sg_table_array = sg_table;
212 rd_dev->sg_table_count = sg_tables;
213
214 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
215 if (rc)
216 return rc;
217
195 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 218 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
196 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 219 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
197 rd_dev->rd_dev_id, rd_dev->rd_page_count, 220 rd_dev->rd_dev_id, rd_dev->rd_page_count,
198 rd_dev->sg_table_count); 221 rd_dev->sg_table_count);
222
223 return 0;
224}
225
226static void rd_release_prot_space(struct rd_dev *rd_dev)
227{
228 u32 page_count;
229
230 if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
231 return;
232
233 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
234 rd_dev->sg_prot_count);
235
236 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
237 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
238 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
239 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
240
241 rd_dev->sg_prot_array = NULL;
242 rd_dev->sg_prot_count = 0;
243}
244
245static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
246{
247 struct rd_dev_sg_table *sg_table;
248 u32 total_sg_needed, sg_tables;
249 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
250 sizeof(struct scatterlist));
251 int rc;
252
253 if (rd_dev->rd_flags & RDF_NULLIO)
254 return 0;
255
256 total_sg_needed = rd_dev->rd_page_count / prot_length;
257
258 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
259
260 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
261 if (!sg_table) {
262 pr_err("Unable to allocate memory for Ramdisk protection"
263 " scatterlist tables\n");
264 return -ENOMEM;
265 }
266
267 rd_dev->sg_prot_array = sg_table;
268 rd_dev->sg_prot_count = sg_tables;
269
270 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
271 if (rc)
272 return rc;
273
274 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
275 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
276 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
199 277
200 return 0; 278 return 0;
201} 279}
@@ -278,6 +356,26 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
278 return NULL; 356 return NULL;
279} 357}
280 358
359static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
360{
361 struct rd_dev_sg_table *sg_table;
362 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
363 sizeof(struct scatterlist));
364
365 i = page / sg_per_table;
366 if (i < rd_dev->sg_prot_count) {
367 sg_table = &rd_dev->sg_prot_array[i];
368 if ((sg_table->page_start_offset <= page) &&
369 (sg_table->page_end_offset >= page))
370 return sg_table;
371 }
372
373 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
374 page);
375
376 return NULL;
377}
378
281static sense_reason_t 379static sense_reason_t
282rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 380rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
283 enum dma_data_direction data_direction) 381 enum dma_data_direction data_direction)
@@ -292,6 +390,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
292 u32 rd_page; 390 u32 rd_page;
293 u32 src_len; 391 u32 src_len;
294 u64 tmp; 392 u64 tmp;
393 sense_reason_t rc;
295 394
296 if (dev->rd_flags & RDF_NULLIO) { 395 if (dev->rd_flags & RDF_NULLIO) {
297 target_complete_cmd(cmd, SAM_STAT_GOOD); 396 target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -314,6 +413,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
314 data_direction == DMA_FROM_DEVICE ? "Read" : "Write", 413 data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
315 cmd->t_task_lba, rd_size, rd_page, rd_offset); 414 cmd->t_task_lba, rd_size, rd_page, rd_offset);
316 415
416 if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
417 struct rd_dev_sg_table *prot_table;
418 struct scatterlist *prot_sg;
419 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
420 u32 prot_offset, prot_page;
421
422 tmp = cmd->t_task_lba * se_dev->prot_length;
423 prot_offset = do_div(tmp, PAGE_SIZE);
424 prot_page = tmp;
425
426 prot_table = rd_get_prot_table(dev, prot_page);
427 if (!prot_table)
428 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
429
430 prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
431
432 rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
433 prot_sg, prot_offset);
434 if (rc)
435 return rc;
436 }
437
317 src_len = PAGE_SIZE - rd_offset; 438 src_len = PAGE_SIZE - rd_offset;
318 sg_miter_start(&m, sgl, sgl_nents, 439 sg_miter_start(&m, sgl, sgl_nents,
319 data_direction == DMA_FROM_DEVICE ? 440 data_direction == DMA_FROM_DEVICE ?
@@ -375,6 +496,28 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
375 } 496 }
376 sg_miter_stop(&m); 497 sg_miter_stop(&m);
377 498
499 if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
500 struct rd_dev_sg_table *prot_table;
501 struct scatterlist *prot_sg;
502 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
503 u32 prot_offset, prot_page;
504
505 tmp = cmd->t_task_lba * se_dev->prot_length;
506 prot_offset = do_div(tmp, PAGE_SIZE);
507 prot_page = tmp;
508
509 prot_table = rd_get_prot_table(dev, prot_page);
510 if (!prot_table)
511 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
512
513 prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
514
515 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
516 prot_sg, prot_offset);
517 if (rc)
518 return rc;
519 }
520
378 target_complete_cmd(cmd, SAM_STAT_GOOD); 521 target_complete_cmd(cmd, SAM_STAT_GOOD);
379 return 0; 522 return 0;
380} 523}
@@ -456,6 +599,23 @@ static sector_t rd_get_blocks(struct se_device *dev)
456 return blocks_long; 599 return blocks_long;
457} 600}
458 601
602static int rd_init_prot(struct se_device *dev)
603{
604 struct rd_dev *rd_dev = RD_DEV(dev);
605
606 if (!dev->dev_attrib.pi_prot_type)
607 return 0;
608
609 return rd_build_prot_space(rd_dev, dev->prot_length);
610}
611
612static void rd_free_prot(struct se_device *dev)
613{
614 struct rd_dev *rd_dev = RD_DEV(dev);
615
616 rd_release_prot_space(rd_dev);
617}
618
459static struct sbc_ops rd_sbc_ops = { 619static struct sbc_ops rd_sbc_ops = {
460 .execute_rw = rd_execute_rw, 620 .execute_rw = rd_execute_rw,
461}; 621};
@@ -481,6 +641,8 @@ static struct se_subsystem_api rd_mcp_template = {
481 .show_configfs_dev_params = rd_show_configfs_dev_params, 641 .show_configfs_dev_params = rd_show_configfs_dev_params,
482 .get_device_type = sbc_get_device_type, 642 .get_device_type = sbc_get_device_type,
483 .get_blocks = rd_get_blocks, 643 .get_blocks = rd_get_blocks,
644 .init_prot = rd_init_prot,
645 .free_prot = rd_free_prot,
484}; 646};
485 647
486int __init rd_module_init(void) 648int __init rd_module_init(void)
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 1789d1e14395..cc46a6a89b38 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -33,8 +33,12 @@ struct rd_dev {
33 u32 rd_page_count; 33 u32 rd_page_count;
34 /* Number of SG tables in sg_table_array */ 34 /* Number of SG tables in sg_table_array */
35 u32 sg_table_count; 35 u32 sg_table_count;
36 /* Number of SG tables in sg_prot_array */
37 u32 sg_prot_count;
36 /* Array of rd_dev_sg_table_t containing scatterlists */ 38 /* Array of rd_dev_sg_table_t containing scatterlists */
37 struct rd_dev_sg_table *sg_table_array; 39 struct rd_dev_sg_table *sg_table_array;
40 /* Array of rd_dev_sg_table containing protection scatterlists */
41 struct rd_dev_sg_table *sg_prot_array;
38 /* Ramdisk HBA device is connected to */ 42 /* Ramdisk HBA device is connected to */
39 struct rd_host *rd_host; 43 struct rd_host *rd_host;
40} ____cacheline_aligned; 44} ____cacheline_aligned;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 52ae54e60105..fa3cae393e13 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/ratelimit.h> 25#include <linux/ratelimit.h>
26#include <linux/crc-t10dif.h>
26#include <asm/unaligned.h> 27#include <asm/unaligned.h>
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_tcq.h> 29#include <scsi/scsi_tcq.h>
@@ -33,7 +34,7 @@
33 34
34#include "target_core_internal.h" 35#include "target_core_internal.h"
35#include "target_core_ua.h" 36#include "target_core_ua.h"
36 37#include "target_core_alua.h"
37 38
38static sense_reason_t 39static sense_reason_t
39sbc_emulate_readcapacity(struct se_cmd *cmd) 40sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -105,6 +106,11 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 106 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 107 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 buf[11] = dev->dev_attrib.block_size & 0xff; 108 buf[11] = dev->dev_attrib.block_size & 0xff;
109 /*
110 * Set P_TYPE and PROT_EN bits for DIF support
111 */
112 if (dev->dev_attrib.pi_prot_type)
113 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
108 114
109 if (dev->transport->get_lbppbe) 115 if (dev->transport->get_lbppbe)
110 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 116 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -563,6 +569,44 @@ sbc_compare_and_write(struct se_cmd *cmd)
563 return TCM_NO_SENSE; 569 return TCM_NO_SENSE;
564} 570}
565 571
572static bool
573sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
574 u32 sectors)
575{
576 if (!cmd->t_prot_sg || !cmd->t_prot_nents)
577 return true;
578
579 switch (dev->dev_attrib.pi_prot_type) {
580 case TARGET_DIF_TYPE3_PROT:
581 if (!(cdb[1] & 0xe0))
582 return true;
583
584 cmd->reftag_seed = 0xffffffff;
585 break;
586 case TARGET_DIF_TYPE2_PROT:
587 if (cdb[1] & 0xe0)
588 return false;
589
590 cmd->reftag_seed = cmd->t_task_lba;
591 break;
592 case TARGET_DIF_TYPE1_PROT:
593 if (!(cdb[1] & 0xe0))
594 return true;
595
596 cmd->reftag_seed = cmd->t_task_lba;
597 break;
598 case TARGET_DIF_TYPE0_PROT:
599 default:
600 return true;
601 }
602
603 cmd->prot_type = dev->dev_attrib.pi_prot_type;
604 cmd->prot_length = dev->prot_length * sectors;
605 cmd->prot_handover = PROT_SEPERATED;
606
607 return true;
608}
609
566sense_reason_t 610sense_reason_t
567sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 611sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
568{ 612{
@@ -583,6 +627,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
583 case READ_10: 627 case READ_10:
584 sectors = transport_get_sectors_10(cdb); 628 sectors = transport_get_sectors_10(cdb);
585 cmd->t_task_lba = transport_lba_32(cdb); 629 cmd->t_task_lba = transport_lba_32(cdb);
630
631 if (!sbc_check_prot(dev, cmd, cdb, sectors))
632 return TCM_UNSUPPORTED_SCSI_OPCODE;
633
586 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 634 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
587 cmd->execute_rw = ops->execute_rw; 635 cmd->execute_rw = ops->execute_rw;
588 cmd->execute_cmd = sbc_execute_rw; 636 cmd->execute_cmd = sbc_execute_rw;
@@ -590,6 +638,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
590 case READ_12: 638 case READ_12:
591 sectors = transport_get_sectors_12(cdb); 639 sectors = transport_get_sectors_12(cdb);
592 cmd->t_task_lba = transport_lba_32(cdb); 640 cmd->t_task_lba = transport_lba_32(cdb);
641
642 if (!sbc_check_prot(dev, cmd, cdb, sectors))
643 return TCM_UNSUPPORTED_SCSI_OPCODE;
644
593 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 645 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
594 cmd->execute_rw = ops->execute_rw; 646 cmd->execute_rw = ops->execute_rw;
595 cmd->execute_cmd = sbc_execute_rw; 647 cmd->execute_cmd = sbc_execute_rw;
@@ -597,6 +649,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
597 case READ_16: 649 case READ_16:
598 sectors = transport_get_sectors_16(cdb); 650 sectors = transport_get_sectors_16(cdb);
599 cmd->t_task_lba = transport_lba_64(cdb); 651 cmd->t_task_lba = transport_lba_64(cdb);
652
653 if (!sbc_check_prot(dev, cmd, cdb, sectors))
654 return TCM_UNSUPPORTED_SCSI_OPCODE;
655
600 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 656 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
601 cmd->execute_rw = ops->execute_rw; 657 cmd->execute_rw = ops->execute_rw;
602 cmd->execute_cmd = sbc_execute_rw; 658 cmd->execute_cmd = sbc_execute_rw;
@@ -612,6 +668,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
612 case WRITE_VERIFY: 668 case WRITE_VERIFY:
613 sectors = transport_get_sectors_10(cdb); 669 sectors = transport_get_sectors_10(cdb);
614 cmd->t_task_lba = transport_lba_32(cdb); 670 cmd->t_task_lba = transport_lba_32(cdb);
671
672 if (!sbc_check_prot(dev, cmd, cdb, sectors))
673 return TCM_UNSUPPORTED_SCSI_OPCODE;
674
615 if (cdb[1] & 0x8) 675 if (cdb[1] & 0x8)
616 cmd->se_cmd_flags |= SCF_FUA; 676 cmd->se_cmd_flags |= SCF_FUA;
617 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 677 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -621,6 +681,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
621 case WRITE_12: 681 case WRITE_12:
622 sectors = transport_get_sectors_12(cdb); 682 sectors = transport_get_sectors_12(cdb);
623 cmd->t_task_lba = transport_lba_32(cdb); 683 cmd->t_task_lba = transport_lba_32(cdb);
684
685 if (!sbc_check_prot(dev, cmd, cdb, sectors))
686 return TCM_UNSUPPORTED_SCSI_OPCODE;
687
624 if (cdb[1] & 0x8) 688 if (cdb[1] & 0x8)
625 cmd->se_cmd_flags |= SCF_FUA; 689 cmd->se_cmd_flags |= SCF_FUA;
626 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 690 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -630,6 +694,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
630 case WRITE_16: 694 case WRITE_16:
631 sectors = transport_get_sectors_16(cdb); 695 sectors = transport_get_sectors_16(cdb);
632 cmd->t_task_lba = transport_lba_64(cdb); 696 cmd->t_task_lba = transport_lba_64(cdb);
697
698 if (!sbc_check_prot(dev, cmd, cdb, sectors))
699 return TCM_UNSUPPORTED_SCSI_OPCODE;
700
633 if (cdb[1] & 0x8) 701 if (cdb[1] & 0x8)
634 cmd->se_cmd_flags |= SCF_FUA; 702 cmd->se_cmd_flags |= SCF_FUA;
635 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -731,6 +799,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
731 case SAI_READ_CAPACITY_16: 799 case SAI_READ_CAPACITY_16:
732 cmd->execute_cmd = sbc_emulate_readcapacity_16; 800 cmd->execute_cmd = sbc_emulate_readcapacity_16;
733 break; 801 break;
802 case SAI_REPORT_REFERRALS:
803 cmd->execute_cmd = target_emulate_report_referrals;
804 break;
734 default: 805 default:
735 pr_err("Unsupported SA: 0x%02x\n", 806 pr_err("Unsupported SA: 0x%02x\n",
736 cmd->t_task_cdb[1] & 0x1f); 807 cmd->t_task_cdb[1] & 0x1f);
@@ -959,3 +1030,182 @@ err:
959 return ret; 1030 return ret;
960} 1031}
961EXPORT_SYMBOL(sbc_execute_unmap); 1032EXPORT_SYMBOL(sbc_execute_unmap);
1033
1034static sense_reason_t
1035sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1036 const void *p, sector_t sector, unsigned int ei_lba)
1037{
1038 int block_size = dev->dev_attrib.block_size;
1039 __be16 csum;
1040
1041 csum = cpu_to_be16(crc_t10dif(p, block_size));
1042
1043 if (sdt->guard_tag != csum) {
1044 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1045 " csum 0x%04x\n", (unsigned long long)sector,
1046 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1047 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1048 }
1049
1050 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1051 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1052 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1053 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1054 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1055 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1056 }
1057
1058 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1059 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1060 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1061 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1062 be32_to_cpu(sdt->ref_tag), ei_lba);
1063 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1064 }
1065
1066 return 0;
1067}
1068
1069static void
1070sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1071 struct scatterlist *sg, int sg_off)
1072{
1073 struct se_device *dev = cmd->se_dev;
1074 struct scatterlist *psg;
1075 void *paddr, *addr;
1076 unsigned int i, len, left;
1077
1078 left = sectors * dev->prot_length;
1079
1080 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1081
1082 len = min(psg->length, left);
1083 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1084 addr = kmap_atomic(sg_page(sg)) + sg_off;
1085
1086 if (read)
1087 memcpy(paddr, addr, len);
1088 else
1089 memcpy(addr, paddr, len);
1090
1091 left -= len;
1092 kunmap_atomic(paddr);
1093 kunmap_atomic(addr);
1094 }
1095}
1096
1097sense_reason_t
1098sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1099 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1100{
1101 struct se_device *dev = cmd->se_dev;
1102 struct se_dif_v1_tuple *sdt;
1103 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1104 sector_t sector = start;
1105 void *daddr, *paddr;
1106 int i, j, offset = 0;
1107 sense_reason_t rc;
1108
1109 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1110 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1111 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1112
1113 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1114
1115 if (offset >= psg->length) {
1116 kunmap_atomic(paddr);
1117 psg = sg_next(psg);
1118 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1119 offset = 0;
1120 }
1121
1122 sdt = paddr + offset;
1123
1124 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1125 " app_tag: 0x%04x ref_tag: %u\n",
1126 (unsigned long long)sector, sdt->guard_tag,
1127 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1128
1129 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1130 ei_lba);
1131 if (rc) {
1132 kunmap_atomic(paddr);
1133 kunmap_atomic(daddr);
1134 cmd->bad_sector = sector;
1135 return rc;
1136 }
1137
1138 sector++;
1139 ei_lba++;
1140 offset += sizeof(struct se_dif_v1_tuple);
1141 }
1142
1143 kunmap_atomic(paddr);
1144 kunmap_atomic(daddr);
1145 }
1146 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1147
1148 return 0;
1149}
1150EXPORT_SYMBOL(sbc_dif_verify_write);
1151
1152sense_reason_t
1153sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1154 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1155{
1156 struct se_device *dev = cmd->se_dev;
1157 struct se_dif_v1_tuple *sdt;
1158 struct scatterlist *dsg;
1159 sector_t sector = start;
1160 void *daddr, *paddr;
1161 int i, j, offset = sg_off;
1162 sense_reason_t rc;
1163
1164 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1165 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1166 paddr = kmap_atomic(sg_page(sg)) + sg->offset;
1167
1168 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1169
1170 if (offset >= sg->length) {
1171 kunmap_atomic(paddr);
1172 sg = sg_next(sg);
1173 paddr = kmap_atomic(sg_page(sg)) + sg->offset;
1174 offset = 0;
1175 }
1176
1177 sdt = paddr + offset;
1178
1179 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1180 " app_tag: 0x%04x ref_tag: %u\n",
1181 (unsigned long long)sector, sdt->guard_tag,
1182 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1183
1184 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1185 sector++;
1186 offset += sizeof(struct se_dif_v1_tuple);
1187 continue;
1188 }
1189
1190 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1191 ei_lba);
1192 if (rc) {
1193 kunmap_atomic(paddr);
1194 kunmap_atomic(daddr);
1195 cmd->bad_sector = sector;
1196 return rc;
1197 }
1198
1199 sector++;
1200 ei_lba++;
1201 offset += sizeof(struct se_dif_v1_tuple);
1202 }
1203
1204 kunmap_atomic(paddr);
1205 kunmap_atomic(daddr);
1206 }
1207 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1208
1209 return 0;
1210}
1211EXPORT_SYMBOL(sbc_dif_verify_read);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 021c3f4a4f00..43c5ca9878bc 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -100,6 +100,11 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
100 */ 100 */
101 if (dev->dev_attrib.emulate_3pc) 101 if (dev->dev_attrib.emulate_3pc)
102 buf[5] |= 0x8; 102 buf[5] |= 0x8;
103 /*
104 * Set Protection (PROTECT) bit when DIF has been enabled.
105 */
106 if (dev->dev_attrib.pi_prot_type)
107 buf[5] |= 0x1;
103 108
104 buf[7] = 0x2; /* CmdQue=1 */ 109 buf[7] = 0x2; /* CmdQue=1 */
105 110
@@ -267,7 +272,7 @@ check_t10_vend_desc:
267 port = lun->lun_sep; 272 port = lun->lun_sep;
268 if (port) { 273 if (port) {
269 struct t10_alua_lu_gp *lu_gp; 274 struct t10_alua_lu_gp *lu_gp;
270 u32 padding, scsi_name_len; 275 u32 padding, scsi_name_len, scsi_target_len;
271 u16 lu_gp_id = 0; 276 u16 lu_gp_id = 0;
272 u16 tg_pt_gp_id = 0; 277 u16 tg_pt_gp_id = 0;
273 u16 tpgt; 278 u16 tpgt;
@@ -365,16 +370,6 @@ check_lu_gp:
365 * section 7.5.1 Table 362 370 * section 7.5.1 Table 362
366 */ 371 */
367check_scsi_name: 372check_scsi_name:
368 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
369 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
370 scsi_name_len += 10;
371 /* Check for 4-byte padding */
372 padding = ((-scsi_name_len) & 3);
373 if (padding != 0)
374 scsi_name_len += padding;
375 /* Header size + Designation descriptor */
376 scsi_name_len += 4;
377
378 buf[off] = 373 buf[off] =
379 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 374 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
380 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 375 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
@@ -402,13 +397,57 @@ check_scsi_name:
402 * shall be no larger than 256 and shall be a multiple 397 * shall be no larger than 256 and shall be a multiple
403 * of four. 398 * of four.
404 */ 399 */
400 padding = ((-scsi_name_len) & 3);
405 if (padding) 401 if (padding)
406 scsi_name_len += padding; 402 scsi_name_len += padding;
403 if (scsi_name_len > 256)
404 scsi_name_len = 256;
407 405
408 buf[off-1] = scsi_name_len; 406 buf[off-1] = scsi_name_len;
409 off += scsi_name_len; 407 off += scsi_name_len;
410 /* Header size + Designation descriptor */ 408 /* Header size + Designation descriptor */
411 len += (scsi_name_len + 4); 409 len += (scsi_name_len + 4);
410
411 /*
412 * Target device designator
413 */
414 buf[off] =
415 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
416 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
417 buf[off] = 0x80; /* Set PIV=1 */
418 /* Set ASSOCIATION == target device: 10b */
419 buf[off] |= 0x20;
420 /* DESIGNATOR TYPE == SCSI name string */
421 buf[off++] |= 0x8;
422 off += 2; /* Skip over Reserved and length */
423 /*
424 * SCSI name string identifer containing, $FABRIC_MOD
425 * dependent information. For LIO-Target and iSCSI
426 * Target Port, this means "<iSCSI name>" in
427 * UTF-8 encoding.
428 */
429 scsi_target_len = sprintf(&buf[off], "%s",
430 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
431 scsi_target_len += 1 /* Include NULL terminator */;
432 /*
433 * The null-terminated, null-padded (see 4.4.2) SCSI
434 * NAME STRING field contains a UTF-8 format string.
435 * The number of bytes in the SCSI NAME STRING field
436 * (i.e., the value in the DESIGNATOR LENGTH field)
437 * shall be no larger than 256 and shall be a multiple
438 * of four.
439 */
440 padding = ((-scsi_target_len) & 3);
441 if (padding)
442 scsi_target_len += padding;
443 if (scsi_name_len > 256)
444 scsi_name_len = 256;
445
446 buf[off-1] = scsi_target_len;
447 off += scsi_target_len;
448
449 /* Header size + Designation descriptor */
450 len += (scsi_target_len + 4);
412 } 451 }
413 buf[2] = ((len >> 8) & 0xff); 452 buf[2] = ((len >> 8) & 0xff);
414 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 453 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
@@ -436,12 +475,26 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
436 struct se_device *dev = cmd->se_dev; 475 struct se_device *dev = cmd->se_dev;
437 476
438 buf[3] = 0x3c; 477 buf[3] = 0x3c;
478 /*
479 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
480 * only for TYPE3 protection.
481 */
482 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
483 buf[4] = 0x5;
484 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
485 buf[4] = 0x4;
486
439 /* Set HEADSUP, ORDSUP, SIMPSUP */ 487 /* Set HEADSUP, ORDSUP, SIMPSUP */
440 buf[5] = 0x07; 488 buf[5] = 0x07;
441 489
442 /* If WriteCache emulation is enabled, set V_SUP */ 490 /* If WriteCache emulation is enabled, set V_SUP */
443 if (spc_check_dev_wce(dev)) 491 if (spc_check_dev_wce(dev))
444 buf[6] = 0x01; 492 buf[6] = 0x01;
493 /* If an LBA map is present set R_SUP */
494 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
495 if (!list_empty(&dev->t10_alua.lba_map_list))
496 buf[8] = 0x10;
497 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
445 return 0; 498 return 0;
446} 499}
447 500
@@ -600,6 +653,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
600 return 0; 653 return 0;
601} 654}
602 655
656/* Referrals VPD page */
657static sense_reason_t
658spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
659{
660 struct se_device *dev = cmd->se_dev;
661
662 buf[0] = dev->transport->get_device_type(dev);
663 buf[3] = 0x0c;
664 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
665 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
666
667 return 0;
668}
669
603static sense_reason_t 670static sense_reason_t
604spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 671spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
605 672
@@ -614,6 +681,7 @@ static struct {
614 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 681 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
615 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 682 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
616 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 683 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
684 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
617}; 685};
618 686
619/* supported vital product data pages */ 687/* supported vital product data pages */
@@ -643,11 +711,15 @@ spc_emulate_inquiry(struct se_cmd *cmd)
643 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 711 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
644 unsigned char *rbuf; 712 unsigned char *rbuf;
645 unsigned char *cdb = cmd->t_task_cdb; 713 unsigned char *cdb = cmd->t_task_cdb;
646 unsigned char buf[SE_INQUIRY_BUF]; 714 unsigned char *buf;
647 sense_reason_t ret; 715 sense_reason_t ret;
648 int p; 716 int p;
649 717
650 memset(buf, 0, SE_INQUIRY_BUF); 718 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
719 if (!buf) {
720 pr_err("Unable to allocate response buffer for INQUIRY\n");
721 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
722 }
651 723
652 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 724 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
653 buf[0] = 0x3f; /* Not connected */ 725 buf[0] = 0x3f; /* Not connected */
@@ -680,9 +752,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
680out: 752out:
681 rbuf = transport_kmap_data_sg(cmd); 753 rbuf = transport_kmap_data_sg(cmd);
682 if (rbuf) { 754 if (rbuf) {
683 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 755 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
684 transport_kunmap_data_sg(cmd); 756 transport_kunmap_data_sg(cmd);
685 } 757 }
758 kfree(buf);
686 759
687 if (!ret) 760 if (!ret)
688 target_complete_cmd(cmd, GOOD); 761 target_complete_cmd(cmd, GOOD);
@@ -785,6 +858,19 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
785 * status (see SAM-4). 858 * status (see SAM-4).
786 */ 859 */
787 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 860 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
861 /*
862 * From spc4r30, section 7.5.7 Control mode page
863 *
864 * Application Tag Owner (ATO) bit set to one.
865 *
866 * If the ATO bit is set to one the device server shall not modify the
867 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
868 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
869 * TAG field.
870 */
871 if (dev->dev_attrib.pi_prot_type)
872 p[5] |= 0x80;
873
788 p[8] = 0xff; 874 p[8] = 0xff;
789 p[9] = 0xff; 875 p[9] = 0xff;
790 p[11] = 30; 876 p[11] = 30;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2a573de19a9f..c036595b17cf 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -656,7 +656,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
656 spin_lock_init(&lun->lun_sep_lock); 656 spin_lock_init(&lun->lun_sep_lock);
657 init_completion(&lun->lun_ref_comp); 657 init_completion(&lun->lun_ref_comp);
658 658
659 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 659 ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
660 if (ret < 0) 660 if (ret < 0)
661 return ret; 661 return ret;
662 662
@@ -781,7 +781,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
781} 781}
782EXPORT_SYMBOL(core_tpg_deregister); 782EXPORT_SYMBOL(core_tpg_deregister);
783 783
784struct se_lun *core_tpg_pre_addlun( 784struct se_lun *core_tpg_alloc_lun(
785 struct se_portal_group *tpg, 785 struct se_portal_group *tpg,
786 u32 unpacked_lun) 786 u32 unpacked_lun)
787{ 787{
@@ -811,11 +811,11 @@ struct se_lun *core_tpg_pre_addlun(
811 return lun; 811 return lun;
812} 812}
813 813
814int core_tpg_post_addlun( 814int core_tpg_add_lun(
815 struct se_portal_group *tpg, 815 struct se_portal_group *tpg,
816 struct se_lun *lun, 816 struct se_lun *lun,
817 u32 lun_access, 817 u32 lun_access,
818 void *lun_ptr) 818 struct se_device *dev)
819{ 819{
820 int ret; 820 int ret;
821 821
@@ -823,7 +823,7 @@ int core_tpg_post_addlun(
823 if (ret < 0) 823 if (ret < 0)
824 return ret; 824 return ret;
825 825
826 ret = core_dev_export(lun_ptr, tpg, lun); 826 ret = core_dev_export(dev, tpg, lun);
827 if (ret < 0) { 827 if (ret < 0) {
828 percpu_ref_cancel_init(&lun->lun_ref); 828 percpu_ref_cancel_init(&lun->lun_ref);
829 return ret; 829 return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 91953da0f623..c50fd9f11aab 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -62,6 +62,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
62struct kmem_cache *t10_alua_lu_gp_mem_cache; 62struct kmem_cache *t10_alua_lu_gp_mem_cache;
63struct kmem_cache *t10_alua_tg_pt_gp_cache; 63struct kmem_cache *t10_alua_tg_pt_gp_cache;
64struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 64struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
65struct kmem_cache *t10_alua_lba_map_cache;
66struct kmem_cache *t10_alua_lba_map_mem_cache;
65 67
66static void transport_complete_task_attr(struct se_cmd *cmd); 68static void transport_complete_task_attr(struct se_cmd *cmd);
67static void transport_handle_queue_full(struct se_cmd *cmd, 69static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -128,14 +130,36 @@ int init_se_kmem_caches(void)
128 "mem_t failed\n"); 130 "mem_t failed\n");
129 goto out_free_tg_pt_gp_cache; 131 goto out_free_tg_pt_gp_cache;
130 } 132 }
133 t10_alua_lba_map_cache = kmem_cache_create(
134 "t10_alua_lba_map_cache",
135 sizeof(struct t10_alua_lba_map),
136 __alignof__(struct t10_alua_lba_map), 0, NULL);
137 if (!t10_alua_lba_map_cache) {
138 pr_err("kmem_cache_create() for t10_alua_lba_map_"
139 "cache failed\n");
140 goto out_free_tg_pt_gp_mem_cache;
141 }
142 t10_alua_lba_map_mem_cache = kmem_cache_create(
143 "t10_alua_lba_map_mem_cache",
144 sizeof(struct t10_alua_lba_map_member),
145 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
146 if (!t10_alua_lba_map_mem_cache) {
147 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
148 "cache failed\n");
149 goto out_free_lba_map_cache;
150 }
131 151
132 target_completion_wq = alloc_workqueue("target_completion", 152 target_completion_wq = alloc_workqueue("target_completion",
133 WQ_MEM_RECLAIM, 0); 153 WQ_MEM_RECLAIM, 0);
134 if (!target_completion_wq) 154 if (!target_completion_wq)
135 goto out_free_tg_pt_gp_mem_cache; 155 goto out_free_lba_map_mem_cache;
136 156
137 return 0; 157 return 0;
138 158
159out_free_lba_map_mem_cache:
160 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
161out_free_lba_map_cache:
162 kmem_cache_destroy(t10_alua_lba_map_cache);
139out_free_tg_pt_gp_mem_cache: 163out_free_tg_pt_gp_mem_cache:
140 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 164 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
141out_free_tg_pt_gp_cache: 165out_free_tg_pt_gp_cache:
@@ -164,6 +188,8 @@ void release_se_kmem_caches(void)
164 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 188 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 189 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
166 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 190 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
191 kmem_cache_destroy(t10_alua_lba_map_cache);
192 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
167} 193}
168 194
169/* This code ensures unique mib indexes are handed out. */ 195/* This code ensures unique mib indexes are handed out. */
@@ -568,10 +594,11 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
568{ 594{
569 struct se_lun *lun = cmd->se_lun; 595 struct se_lun *lun = cmd->se_lun;
570 596
571 if (!lun || !cmd->lun_ref_active) 597 if (!lun)
572 return; 598 return;
573 599
574 percpu_ref_put(&lun->lun_ref); 600 if (cmpxchg(&cmd->lun_ref_active, true, false))
601 percpu_ref_put(&lun->lun_ref);
575} 602}
576 603
577void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 604void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@ -1284,6 +1311,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1284 * @sgl_count: scatterlist count for unidirectional mapping 1311 * @sgl_count: scatterlist count for unidirectional mapping
1285 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1312 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1286 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1313 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1314 * @sgl_prot: struct scatterlist memory protection information
1315 * @sgl_prot_count: scatterlist count for protection information
1287 * 1316 *
1288 * Returns non zero to signal active I/O shutdown failure. All other 1317 * Returns non zero to signal active I/O shutdown failure. All other
1289 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1318 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
@@ -1296,7 +1325,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1296 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1325 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1297 u32 data_length, int task_attr, int data_dir, int flags, 1326 u32 data_length, int task_attr, int data_dir, int flags,
1298 struct scatterlist *sgl, u32 sgl_count, 1327 struct scatterlist *sgl, u32 sgl_count,
1299 struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1328 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1329 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1300{ 1330{
1301 struct se_portal_group *se_tpg; 1331 struct se_portal_group *se_tpg;
1302 sense_reason_t rc; 1332 sense_reason_t rc;
@@ -1338,6 +1368,14 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1338 target_put_sess_cmd(se_sess, se_cmd); 1368 target_put_sess_cmd(se_sess, se_cmd);
1339 return 0; 1369 return 0;
1340 } 1370 }
1371 /*
1372 * Save pointers for SGLs containing protection information,
1373 * if present.
1374 */
1375 if (sgl_prot_count) {
1376 se_cmd->t_prot_sg = sgl_prot;
1377 se_cmd->t_prot_nents = sgl_prot_count;
1378 }
1341 1379
1342 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1380 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1343 if (rc != 0) { 1381 if (rc != 0) {
@@ -1380,6 +1418,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1380 return 0; 1418 return 0;
1381 } 1419 }
1382 } 1420 }
1421
1383 /* 1422 /*
1384 * Check if we need to delay processing because of ALUA 1423 * Check if we need to delay processing because of ALUA
1385 * Active/NonOptimized primary access state.. 1424 * Active/NonOptimized primary access state..
@@ -1419,7 +1458,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1419{ 1458{
1420 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1459 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1421 unpacked_lun, data_length, task_attr, data_dir, 1460 unpacked_lun, data_length, task_attr, data_dir,
1422 flags, NULL, 0, NULL, 0); 1461 flags, NULL, 0, NULL, 0, NULL, 0);
1423} 1462}
1424EXPORT_SYMBOL(target_submit_cmd); 1463EXPORT_SYMBOL(target_submit_cmd);
1425 1464
@@ -2455,6 +2494,19 @@ static int transport_get_sense_codes(
2455 return 0; 2494 return 0;
2456} 2495}
2457 2496
2497static
2498void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
2499{
2500 /* Place failed LBA in sense data information descriptor 0. */
2501 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
2502 buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
2503 buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
2504 buffer[SPC_VALIDITY_OFFSET] = 0x80;
2505
2506 /* Descriptor Information: failing sector */
2507 put_unaligned_be64(bad_sector, &buffer[12]);
2508}
2509
2458int 2510int
2459transport_send_check_condition_and_sense(struct se_cmd *cmd, 2511transport_send_check_condition_and_sense(struct se_cmd *cmd,
2460 sense_reason_t reason, int from_transport) 2512 sense_reason_t reason, int from_transport)
@@ -2648,6 +2700,39 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2648 buffer[SPC_ASC_KEY_OFFSET] = 0x1d; 2700 buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
2649 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00; 2701 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
2650 break; 2702 break;
2703 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2704 /* CURRENT ERROR */
2705 buffer[0] = 0x70;
2706 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2707 /* ILLEGAL REQUEST */
2708 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2709 /* LOGICAL BLOCK GUARD CHECK FAILED */
2710 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2711 buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
2712 transport_err_sector_info(buffer, cmd->bad_sector);
2713 break;
2714 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2715 /* CURRENT ERROR */
2716 buffer[0] = 0x70;
2717 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2718 /* ILLEGAL REQUEST */
2719 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2720 /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2721 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2722 buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
2723 transport_err_sector_info(buffer, cmd->bad_sector);
2724 break;
2725 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2726 /* CURRENT ERROR */
2727 buffer[0] = 0x70;
2728 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2729 /* ILLEGAL REQUEST */
2730 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2731 /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2732 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2733 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2734 transport_err_sector_info(buffer, cmd->bad_sector);
2735 break;
2651 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2736 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2652 default: 2737 default:
2653 /* CURRENT ERROR */ 2738 /* CURRENT ERROR */
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index b04467e7547c..505519b10cb7 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -98,7 +98,6 @@ int core_scsi3_ua_allocate(
98 pr_err("Unable to allocate struct se_ua\n"); 98 pr_err("Unable to allocate struct se_ua\n");
99 return -ENOMEM; 99 return -ENOMEM;
100 } 100 }
101 INIT_LIST_HEAD(&ua->ua_dev_list);
102 INIT_LIST_HEAD(&ua->ua_nacl_list); 101 INIT_LIST_HEAD(&ua->ua_nacl_list);
103 102
104 ua->ua_nacl = nacl; 103 ua->ua_nacl = nacl;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6b88a9958f61..669c536fd959 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -40,10 +40,6 @@
40 40
41static struct workqueue_struct *xcopy_wq = NULL; 41static struct workqueue_struct *xcopy_wq = NULL;
42/* 42/*
43 * From target_core_spc.c
44 */
45extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
46/*
47 * From target_core_device.c 43 * From target_core_device.c
48 */ 44 */
49extern struct mutex g_device_mutex; 45extern struct mutex g_device_mutex;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 479ec5621a4e..8b2c1aaf81de 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
438 struct se_session *se_sess = sess->se_sess; 438 struct se_session *se_sess = sess->se_sess;
439 int tag; 439 int tag;
440 440
441 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); 441 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
442 if (tag < 0) 442 if (tag < 0)
443 goto busy; 443 goto busy;
444 444
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index c6932fb53a8d..e879da81ad93 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -267,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
267 return found; 267 return found;
268} 268}
269 269
270struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) 270static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
271{ 271{
272 struct ft_node_acl *acl; 272 struct ft_node_acl *acl;
273 273
@@ -552,7 +552,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
552 .fabric_drop_nodeacl = &ft_del_acl, 552 .fabric_drop_nodeacl = &ft_del_acl,
553}; 553};
554 554
555int ft_register_configfs(void) 555static int ft_register_configfs(void)
556{ 556{
557 struct target_fabric_configfs *fabric; 557 struct target_fabric_configfs *fabric;
558 int ret; 558 int ret;
@@ -599,7 +599,7 @@ int ft_register_configfs(void)
599 return 0; 599 return 0;
600} 600}
601 601
602void ft_deregister_configfs(void) 602static void ft_deregister_configfs(void)
603{ 603{
604 if (!ft_configfs) 604 if (!ft_configfs)
605 return; 605 return;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 1e4c75c5b36b..0a025b8e2a12 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
728 } 728 }
729 se_sess = tv_nexus->tvn_se_sess; 729 se_sess = tv_nexus->tvn_se_sess;
730 730
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
732 if (tag < 0) { 732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); 733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM); 734 return ERR_PTR(-ENOMEM);
@@ -889,7 +889,7 @@ static void tcm_vhost_submission_work(struct work_struct *work)
889 cmd->tvc_lun, cmd->tvc_exp_data_len, 889 cmd->tvc_lun, cmd->tvc_exp_data_len,
890 cmd->tvc_task_attr, cmd->tvc_data_direction, 890 cmd->tvc_task_attr, cmd->tvc_data_direction,
891 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 891 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
892 sg_bidi_ptr, sg_no_bidi); 892 sg_bidi_ptr, sg_no_bidi, NULL, 0);
893 if (rc < 0) { 893 if (rc < 0) {
894 transport_send_check_condition_and_sense(se_cmd, 894 transport_send_check_condition_and_sense(se_cmd,
895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
index 1900bd0fa639..f5cfdd6a5539 100644
--- a/include/linux/percpu_ida.h
+++ b/include/linux/percpu_ida.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/sched.h>
7#include <linux/spinlock_types.h> 8#include <linux/spinlock_types.h>
8#include <linux/wait.h> 9#include <linux/wait.h>
9#include <linux/cpumask.h> 10#include <linux/cpumask.h>
@@ -61,7 +62,7 @@ struct percpu_ida {
61/* Max size of percpu freelist, */ 62/* Max size of percpu freelist, */
62#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) 63#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
63 64
64int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); 65int percpu_ida_alloc(struct percpu_ida *pool, int state);
65void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 66void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
66 67
67void percpu_ida_destroy(struct percpu_ida *pool); 68void percpu_ida_destroy(struct percpu_ida *pool);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 66d42edfb3fc..0a4edfe8af51 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -155,6 +155,7 @@ enum scsi_timeouts {
155/* values for service action in */ 155/* values for service action in */
156#define SAI_READ_CAPACITY_16 0x10 156#define SAI_READ_CAPACITY_16 0x10
157#define SAI_GET_LBA_STATUS 0x12 157#define SAI_GET_LBA_STATUS 0x12
158#define SAI_REPORT_REFERRALS 0x13
158/* values for VARIABLE_LENGTH_CMD service action codes 159/* values for VARIABLE_LENGTH_CMD service action codes
159 * see spc4r17 Section D.3.5, table D.7 and D.8 */ 160 * see spc4r17 Section D.3.5, table D.7 and D.8 */
160#define VLC_SA_RECEIVE_CREDENTIAL 0x1800 161#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index a12589c4ee92..ae5a17111968 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -94,7 +94,7 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
94/* 94/*
95 * From iscsi_target_util.c 95 * From iscsi_target_util.c
96 */ 96 */
97extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 97extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
98extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, 98extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
99 unsigned char *, __be32); 99 unsigned char *, __be32);
100extern void iscsit_release_cmd(struct iscsi_cmd *); 100extern void iscsit_release_cmd(struct iscsi_cmd *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 39e0114d70c5..7020e33e742e 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -41,6 +41,9 @@ struct se_subsystem_api {
41 unsigned int (*get_io_opt)(struct se_device *); 41 unsigned int (*get_io_opt)(struct se_device *);
42 unsigned char *(*get_sense_buffer)(struct se_cmd *); 42 unsigned char *(*get_sense_buffer)(struct se_cmd *);
43 bool (*get_write_cache)(struct se_device *); 43 bool (*get_write_cache)(struct se_device *);
44 int (*init_prot)(struct se_device *);
45 int (*format_prot)(struct se_device *);
46 void (*free_prot)(struct se_device *);
44}; 47};
45 48
46struct sbc_ops { 49struct sbc_ops {
@@ -70,6 +73,10 @@ sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
70 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv, 73 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
71 sector_t lba, sector_t nolb), 74 sector_t lba, sector_t nolb),
72 void *priv); 75 void *priv);
76sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
77 unsigned int, struct scatterlist *, int);
78sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
79 unsigned int, struct scatterlist *, int);
73 80
74void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 81void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
75int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); 82int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 321301c0a643..c9c791209cd1 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -37,6 +37,9 @@
37/* Used by transport_send_check_condition_and_sense() */ 37/* Used by transport_send_check_condition_and_sense() */
38#define SPC_SENSE_KEY_OFFSET 2 38#define SPC_SENSE_KEY_OFFSET 2
39#define SPC_ADD_SENSE_LEN_OFFSET 7 39#define SPC_ADD_SENSE_LEN_OFFSET 7
40#define SPC_DESC_TYPE_OFFSET 8
41#define SPC_ADDITIONAL_DESC_LEN_OFFSET 9
42#define SPC_VALIDITY_OFFSET 10
40#define SPC_ASC_KEY_OFFSET 12 43#define SPC_ASC_KEY_OFFSET 12
41#define SPC_ASCQ_KEY_OFFSET 13 44#define SPC_ASCQ_KEY_OFFSET 13
42#define TRANSPORT_IQN_LEN 224 45#define TRANSPORT_IQN_LEN 224
@@ -112,7 +115,7 @@
112/* Queue Algorithm Modifier default for restricted reordering in control mode page */ 115/* Queue Algorithm Modifier default for restricted reordering in control mode page */
113#define DA_EMULATE_REST_REORD 0 116#define DA_EMULATE_REST_REORD 0
114 117
115#define SE_INQUIRY_BUF 512 118#define SE_INQUIRY_BUF 1024
116#define SE_MODE_PAGE_BUF 512 119#define SE_MODE_PAGE_BUF 512
117#define SE_SENSE_BUF 96 120#define SE_SENSE_BUF 96
118 121
@@ -205,6 +208,9 @@ enum tcm_sense_reason_table {
205 TCM_OUT_OF_RESOURCES = R(0x12), 208 TCM_OUT_OF_RESOURCES = R(0x12),
206 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), 209 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
207 TCM_MISCOMPARE_VERIFY = R(0x14), 210 TCM_MISCOMPARE_VERIFY = R(0x14),
211 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
212 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
213 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
208#undef R 214#undef R
209}; 215};
210 216
@@ -247,10 +253,28 @@ typedef enum {
247 253
248struct se_cmd; 254struct se_cmd;
249 255
256struct t10_alua_lba_map_member {
257 struct list_head lba_map_mem_list;
258 int lba_map_mem_alua_state;
259 int lba_map_mem_alua_pg_id;
260};
261
262struct t10_alua_lba_map {
263 u64 lba_map_first_lba;
264 u64 lba_map_last_lba;
265 struct list_head lba_map_list;
266 struct list_head lba_map_mem_list;
267};
268
250struct t10_alua { 269struct t10_alua {
251 /* ALUA Target Port Group ID */ 270 /* ALUA Target Port Group ID */
252 u16 alua_tg_pt_gps_counter; 271 u16 alua_tg_pt_gps_counter;
253 u32 alua_tg_pt_gps_count; 272 u32 alua_tg_pt_gps_count;
273 /* Referrals support */
274 spinlock_t lba_map_lock;
275 u32 lba_map_segment_size;
276 u32 lba_map_segment_multiplier;
277 struct list_head lba_map_list;
254 spinlock_t tg_pt_gps_lock; 278 spinlock_t tg_pt_gps_lock;
255 struct se_device *t10_dev; 279 struct se_device *t10_dev;
256 /* Used for default ALUA Target Port Group */ 280 /* Used for default ALUA Target Port Group */
@@ -284,6 +308,8 @@ struct t10_alua_tg_pt_gp {
284 u16 tg_pt_gp_id; 308 u16 tg_pt_gp_id;
285 int tg_pt_gp_valid_id; 309 int tg_pt_gp_valid_id;
286 int tg_pt_gp_alua_supported_states; 310 int tg_pt_gp_alua_supported_states;
311 int tg_pt_gp_alua_pending_state;
312 int tg_pt_gp_alua_previous_state;
287 int tg_pt_gp_alua_access_status; 313 int tg_pt_gp_alua_access_status;
288 int tg_pt_gp_alua_access_type; 314 int tg_pt_gp_alua_access_type;
289 int tg_pt_gp_nonop_delay_msecs; 315 int tg_pt_gp_nonop_delay_msecs;
@@ -291,9 +317,6 @@ struct t10_alua_tg_pt_gp {
291 int tg_pt_gp_implicit_trans_secs; 317 int tg_pt_gp_implicit_trans_secs;
292 int tg_pt_gp_pref; 318 int tg_pt_gp_pref;
293 int tg_pt_gp_write_metadata; 319 int tg_pt_gp_write_metadata;
294 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
295#define ALUA_MD_BUF_LEN 1024
296 u32 tg_pt_gp_md_buf_len;
297 u32 tg_pt_gp_members; 320 u32 tg_pt_gp_members;
298 atomic_t tg_pt_gp_alua_access_state; 321 atomic_t tg_pt_gp_alua_access_state;
299 atomic_t tg_pt_gp_ref_cnt; 322 atomic_t tg_pt_gp_ref_cnt;
@@ -303,6 +326,10 @@ struct t10_alua_tg_pt_gp {
303 struct config_group tg_pt_gp_group; 326 struct config_group tg_pt_gp_group;
304 struct list_head tg_pt_gp_list; 327 struct list_head tg_pt_gp_list;
305 struct list_head tg_pt_gp_mem_list; 328 struct list_head tg_pt_gp_mem_list;
329 struct se_port *tg_pt_gp_alua_port;
330 struct se_node_acl *tg_pt_gp_alua_nacl;
331 struct delayed_work tg_pt_gp_transition_work;
332 struct completion *tg_pt_gp_transition_complete;
306}; 333};
307 334
308struct t10_alua_tg_pt_gp_member { 335struct t10_alua_tg_pt_gp_member {
@@ -414,6 +441,34 @@ struct se_tmr_req {
414 struct list_head tmr_list; 441 struct list_head tmr_list;
415}; 442};
416 443
444enum target_prot_op {
445 TARGET_PROT_NORMAL = 0,
446 TARGET_PROT_DIN_INSERT,
447 TARGET_PROT_DOUT_INSERT,
448 TARGET_PROT_DIN_STRIP,
449 TARGET_PROT_DOUT_STRIP,
450 TARGET_PROT_DIN_PASS,
451 TARGET_PROT_DOUT_PASS,
452};
453
454enum target_prot_ho {
455 PROT_SEPERATED,
456 PROT_INTERLEAVED,
457};
458
459enum target_prot_type {
460 TARGET_DIF_TYPE0_PROT,
461 TARGET_DIF_TYPE1_PROT,
462 TARGET_DIF_TYPE2_PROT,
463 TARGET_DIF_TYPE3_PROT,
464};
465
466struct se_dif_v1_tuple {
467 __be16 guard_tag;
468 __be16 app_tag;
469 __be32 ref_tag;
470};
471
417struct se_cmd { 472struct se_cmd {
418 /* SAM response code being sent to initiator */ 473 /* SAM response code being sent to initiator */
419 u8 scsi_status; 474 u8 scsi_status;
@@ -497,14 +552,24 @@ struct se_cmd {
497 void *priv; 552 void *priv;
498 553
499 /* Used for lun->lun_ref counting */ 554 /* Used for lun->lun_ref counting */
500 bool lun_ref_active; 555 int lun_ref_active;
556
557 /* DIF related members */
558 enum target_prot_op prot_op;
559 enum target_prot_type prot_type;
560 u32 prot_length;
561 u32 reftag_seed;
562 struct scatterlist *t_prot_sg;
563 unsigned int t_prot_nents;
564 enum target_prot_ho prot_handover;
565 sense_reason_t pi_err;
566 sector_t bad_sector;
501}; 567};
502 568
503struct se_ua { 569struct se_ua {
504 u8 ua_asc; 570 u8 ua_asc;
505 u8 ua_ascq; 571 u8 ua_ascq;
506 struct se_node_acl *ua_nacl; 572 struct se_node_acl *ua_nacl;
507 struct list_head ua_dev_list;
508 struct list_head ua_nacl_list; 573 struct list_head ua_nacl_list;
509}; 574};
510 575
@@ -605,6 +670,9 @@ struct se_dev_attrib {
605 int emulate_tpws; 670 int emulate_tpws;
606 int emulate_caw; 671 int emulate_caw;
607 int emulate_3pc; 672 int emulate_3pc;
673 int pi_prot_format;
674 enum target_prot_type pi_prot_type;
675 enum target_prot_type hw_pi_prot_type;
608 int enforce_pr_isids; 676 int enforce_pr_isids;
609 int is_nonrot; 677 int is_nonrot;
610 int emulate_rest_reord; 678 int emulate_rest_reord;
@@ -736,6 +804,8 @@ struct se_device {
736 /* Linked list for struct se_hba struct se_device list */ 804 /* Linked list for struct se_hba struct se_device list */
737 struct list_head dev_list; 805 struct list_head dev_list;
738 struct se_lun xcopy_lun; 806 struct se_lun xcopy_lun;
807 /* Protection Information */
808 int prot_length;
739}; 809};
740 810
741struct se_hba { 811struct se_hba {
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 4cf4fda404a3..0218d689b3d7 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -105,7 +105,8 @@ sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
105sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); 105sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
106int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, 106int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
107 unsigned char *, unsigned char *, u32, u32, int, int, int, 107 unsigned char *, unsigned char *, u32, u32, int, int, int,
108 struct scatterlist *, u32, struct scatterlist *, u32); 108 struct scatterlist *, u32, struct scatterlist *, u32,
109 struct scatterlist *, u32);
109int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 110int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
110 unsigned char *, u32, u32, int, int, int); 111 unsigned char *, u32, u32, int, int, int);
111int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 112int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 9d054bf91d0f..7be235f1a70b 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
132/** 132/**
133 * percpu_ida_alloc - allocate a tag 133 * percpu_ida_alloc - allocate a tag
134 * @pool: pool to allocate from 134 * @pool: pool to allocate from
135 * @gfp: gfp flags 135 * @state: task state for prepare_to_wait
136 * 136 *
137 * Returns a tag - an integer in the range [0..nr_tags) (passed to 137 * Returns a tag - an integer in the range [0..nr_tags) (passed to
138 * tag_pool_init()), or otherwise -ENOSPC on allocation failure. 138 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
139 * 139 *
140 * Safe to be called from interrupt context (assuming it isn't passed 140 * Safe to be called from interrupt context (assuming it isn't passed
141 * __GFP_WAIT, of course). 141 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
142 * 142 *
143 * @gfp indicates whether or not to wait until a free id is available (it's not 143 * @gfp indicates whether or not to wait until a free id is available (it's not
144 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 144 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
145 * however long it takes until another thread frees an id (same semantics as a 145 * however long it takes until another thread frees an id (same semantics as a
146 * mempool). 146 * mempool).
147 * 147 *
148 * Will not fail if passed __GFP_WAIT. 148 * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
149 */ 149 */
150int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) 150int percpu_ida_alloc(struct percpu_ida *pool, int state)
151{ 151{
152 DEFINE_WAIT(wait); 152 DEFINE_WAIT(wait);
153 struct percpu_ida_cpu *tags; 153 struct percpu_ida_cpu *tags;
@@ -174,7 +174,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
174 * 174 *
175 * global lock held and irqs disabled, don't need percpu lock 175 * global lock held and irqs disabled, don't need percpu lock
176 */ 176 */
177 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 177 if (state != TASK_RUNNING)
178 prepare_to_wait(&pool->wait, &wait, state);
178 179
179 if (!tags->nr_free) 180 if (!tags->nr_free)
180 alloc_global_tags(pool, tags); 181 alloc_global_tags(pool, tags);
@@ -191,16 +192,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
191 spin_unlock(&pool->lock); 192 spin_unlock(&pool->lock);
192 local_irq_restore(flags); 193 local_irq_restore(flags);
193 194
194 if (tag >= 0 || !(gfp & __GFP_WAIT)) 195 if (tag >= 0 || state == TASK_RUNNING)
195 break; 196 break;
196 197
198 if (signal_pending_state(state, current)) {
199 tag = -ERESTARTSYS;
200 break;
201 }
202
197 schedule(); 203 schedule();
198 204
199 local_irq_save(flags); 205 local_irq_save(flags);
200 tags = this_cpu_ptr(pool->tag_cpu); 206 tags = this_cpu_ptr(pool->tag_cpu);
201 } 207 }
208 if (state != TASK_RUNNING)
209 finish_wait(&pool->wait, &wait);
202 210
203 finish_wait(&pool->wait, &wait);
204 return tag; 211 return tag;
205} 212}
206EXPORT_SYMBOL_GPL(percpu_ida_alloc); 213EXPORT_SYMBOL_GPL(percpu_ida_alloc);